comment
stringlengths
1
45k
method_body
stringlengths
23
281k
target_code
stringlengths
0
5.16k
method_body_after
stringlengths
12
281k
context_before
stringlengths
8
543k
context_after
stringlengths
8
543k
Yes we have
public void crudOnDifferentOverload() { List<EncryptionPojo> actualProperties = new ArrayList<>(); EncryptionPojo properties = getItem(UUID.randomUUID().toString()); CosmosItemResponse<EncryptionPojo> itemResponse = this.cosmosEncryptionContainer.createItem(properties); assertThat(itemResponse.getRequestCharge()).isGreaterThan(0); EncryptionPojo responseItem = itemResponse.getItem(); validateResponse(properties, responseItem); actualProperties.add(properties); properties = getItem(UUID.randomUUID().toString()); CosmosItemResponse<EncryptionPojo> itemResponse1 = this.cosmosEncryptionContainer.createItem(properties, new CosmosItemRequestOptions()); assertThat(itemResponse1.getRequestCharge()).isGreaterThan(0); EncryptionPojo responseItem1 = itemResponse1.getItem(); validateResponse(properties, responseItem1); actualProperties.add(properties); properties = getItem(UUID.randomUUID().toString()); CosmosItemResponse<EncryptionPojo> upsertResponse1 = this.cosmosEncryptionContainer.upsertItem(properties); assertThat(upsertResponse1.getRequestCharge()).isGreaterThan(0); EncryptionPojo responseItem2 = upsertResponse1.getItem(); validateResponse(properties, responseItem2); actualProperties.add(properties); properties = getItem(UUID.randomUUID().toString()); CosmosItemResponse<EncryptionPojo> upsertResponse2 = this.cosmosEncryptionContainer.upsertItem(properties, new CosmosItemRequestOptions()); assertThat(upsertResponse2.getRequestCharge()).isGreaterThan(0); EncryptionPojo responseItem3 = upsertResponse2.getItem(); validateResponse(properties, responseItem3); actualProperties.add(properties); EncryptionPojo readItem = this.cosmosEncryptionContainer.readItem(actualProperties.get(0).getId(), new PartitionKey(actualProperties.get(0).getMypk()), EncryptionPojo.class).getItem(); validateResponse(actualProperties.get(0), readItem); String query = String.format("SELECT * from c where c.id = '%s'", actualProperties.get(1).getId()); CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); CosmosPagedIterable<EncryptionPojo> feedResponseIterator = this.cosmosEncryptionContainer.queryItems(query, cosmosQueryRequestOptions, EncryptionPojo.class); List<EncryptionPojo> feedResponse = new ArrayList<>(); feedResponseIterator.iterator().forEachRemaining(pojo -> { feedResponse.add(pojo); }); assertThat(feedResponse.size()).isGreaterThanOrEqualTo(1); for (EncryptionPojo pojo : feedResponse) { if (pojo.getId().equals(properties.getId())) { EncryptionAsyncApiCrudTest.validateResponse(pojo, responseItem); } } CosmosQueryRequestOptions cosmosQueryRequestOptions1 = new CosmosQueryRequestOptions(); SqlQuerySpec querySpec = new SqlQuerySpec(query); CosmosPagedIterable<EncryptionPojo> feedResponseIterator2 = this.cosmosEncryptionContainer.queryItems(querySpec, cosmosQueryRequestOptions1, EncryptionPojo.class); List<EncryptionPojo> feedResponse2 = new ArrayList<>(); feedResponseIterator2.iterator().forEachRemaining(pojo -> { feedResponse2.add(pojo); }); assertThat(feedResponse2.size()).isGreaterThanOrEqualTo(1); for (EncryptionPojo pojo : feedResponse2) { if (pojo.getId().equals(properties.getId())) { EncryptionAsyncApiCrudTest.validateResponse(pojo, responseItem); } } CosmosItemRequestOptions requestOptions = new CosmosItemRequestOptions(); CosmosItemResponse<EncryptionPojo> replaceResponse = this.cosmosEncryptionContainer.replaceItem(actualProperties.get(2), actualProperties.get(2).getId(), new PartitionKey(actualProperties.get(2).getMypk()), requestOptions); assertThat(upsertResponse1.getRequestCharge()).isGreaterThan(0); responseItem = replaceResponse.getItem(); validateResponse(actualProperties.get(2), responseItem); CosmosItemResponse<?> deleteResponse1 = this.cosmosEncryptionContainer.deleteItem(actualProperties.get(1).getId(), new PartitionKey(actualProperties.get(1).getMypk()), new CosmosItemRequestOptions()); assertThat(deleteResponse1.getStatusCode()).isEqualTo(204); CosmosItemResponse<?> deleteResponse2 = this.cosmosEncryptionContainer.deleteItem(actualProperties.get(2), new CosmosItemRequestOptions()); assertThat(deleteResponse2.getStatusCode()).isEqualTo(204); CosmosItemResponse<?> deleteResponse3 = this.cosmosEncryptionContainer.deleteAllItemsByPartitionKey(new PartitionKey(actualProperties.get(3).getMypk()), new CosmosItemRequestOptions()); assertThat(deleteResponse3.getStatusCode()).isEqualTo(200); }
EncryptionPojo readItem = this.cosmosEncryptionContainer.readItem(actualProperties.get(0).getId(),
public void crudOnDifferentOverload() { List<EncryptionPojo> actualProperties = new ArrayList<>(); EncryptionPojo properties = getItem(UUID.randomUUID().toString()); CosmosItemResponse<EncryptionPojo> itemResponse = this.cosmosEncryptionContainer.createItem(properties); assertThat(itemResponse.getRequestCharge()).isGreaterThan(0); EncryptionPojo responseItem = itemResponse.getItem(); validateResponse(properties, responseItem); actualProperties.add(properties); properties = getItem(UUID.randomUUID().toString()); CosmosItemResponse<EncryptionPojo> itemResponse1 = this.cosmosEncryptionContainer.createItem(properties, new CosmosItemRequestOptions()); assertThat(itemResponse1.getRequestCharge()).isGreaterThan(0); EncryptionPojo responseItem1 = itemResponse1.getItem(); validateResponse(properties, responseItem1); actualProperties.add(properties); properties = getItem(UUID.randomUUID().toString()); CosmosItemResponse<EncryptionPojo> upsertResponse1 = this.cosmosEncryptionContainer.upsertItem(properties); assertThat(upsertResponse1.getRequestCharge()).isGreaterThan(0); EncryptionPojo responseItem2 = upsertResponse1.getItem(); validateResponse(properties, responseItem2); actualProperties.add(properties); properties = getItem(UUID.randomUUID().toString()); CosmosItemResponse<EncryptionPojo> upsertResponse2 = this.cosmosEncryptionContainer.upsertItem(properties, new CosmosItemRequestOptions()); assertThat(upsertResponse2.getRequestCharge()).isGreaterThan(0); EncryptionPojo responseItem3 = upsertResponse2.getItem(); validateResponse(properties, responseItem3); actualProperties.add(properties); EncryptionPojo readItem = this.cosmosEncryptionContainer.readItem(actualProperties.get(0).getId(), new PartitionKey(actualProperties.get(0).getMypk()), EncryptionPojo.class).getItem(); validateResponse(actualProperties.get(0), readItem); String query = String.format("SELECT * from c where c.id = '%s'", actualProperties.get(1).getId()); CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); CosmosPagedIterable<EncryptionPojo> feedResponseIterator = this.cosmosEncryptionContainer.queryItems(query, cosmosQueryRequestOptions, EncryptionPojo.class); List<EncryptionPojo> feedResponse = new ArrayList<>(); feedResponseIterator.iterator().forEachRemaining(pojo -> { feedResponse.add(pojo); }); assertThat(feedResponse.size()).isGreaterThanOrEqualTo(1); for (EncryptionPojo pojo : feedResponse) { if (pojo.getId().equals(properties.getId())) { EncryptionAsyncApiCrudTest.validateResponse(pojo, responseItem); } } CosmosQueryRequestOptions cosmosQueryRequestOptions1 = new CosmosQueryRequestOptions(); SqlQuerySpec querySpec = new SqlQuerySpec(query); CosmosPagedIterable<EncryptionPojo> feedResponseIterator2 = this.cosmosEncryptionContainer.queryItems(querySpec, cosmosQueryRequestOptions1, EncryptionPojo.class); List<EncryptionPojo> feedResponse2 = new ArrayList<>(); feedResponseIterator2.iterator().forEachRemaining(pojo -> { feedResponse2.add(pojo); }); assertThat(feedResponse2.size()).isGreaterThanOrEqualTo(1); for (EncryptionPojo pojo : feedResponse2) { if (pojo.getId().equals(properties.getId())) { EncryptionAsyncApiCrudTest.validateResponse(pojo, responseItem); } } CosmosItemRequestOptions requestOptions = new CosmosItemRequestOptions(); CosmosItemResponse<EncryptionPojo> replaceResponse = this.cosmosEncryptionContainer.replaceItem(actualProperties.get(2), actualProperties.get(2).getId(), new PartitionKey(actualProperties.get(2).getMypk()), requestOptions); assertThat(upsertResponse1.getRequestCharge()).isGreaterThan(0); responseItem = replaceResponse.getItem(); validateResponse(actualProperties.get(2), responseItem); CosmosItemResponse<?> deleteResponse1 = this.cosmosEncryptionContainer.deleteItem(actualProperties.get(1).getId(), new PartitionKey(actualProperties.get(1).getMypk()), new CosmosItemRequestOptions()); assertThat(deleteResponse1.getStatusCode()).isEqualTo(204); CosmosItemResponse<?> deleteResponse2 = this.cosmosEncryptionContainer.deleteItem(actualProperties.get(2), new CosmosItemRequestOptions()); assertThat(deleteResponse2.getStatusCode()).isEqualTo(204); CosmosItemResponse<?> deleteResponse3 = this.cosmosEncryptionContainer.deleteAllItemsByPartitionKey(new PartitionKey(actualProperties.get(3).getMypk()), new CosmosItemRequestOptions()); assertThat(deleteResponse3.getStatusCode()).isEqualTo(200); }
class EncryptionSyncApiCrudTest extends TestSuiteBase { private CosmosClient client; private CosmosEncryptionClient cosmosEncryptionClient; private CosmosEncryptionContainer cosmosEncryptionContainer; @Factory(dataProvider = "clientBuildersWithSessionConsistency") public EncryptionSyncApiCrudTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @BeforeClass(groups = {"encryption"}, timeOut = SETUP_TIMEOUT) public void before_CosmosItemTest() { assertThat(this.client).isNull(); this.client = getClientBuilder().buildClient(); EncryptionAsyncApiCrudTest.TestEncryptionKeyStoreProvider encryptionKeyStoreProvider = new EncryptionAsyncApiCrudTest.TestEncryptionKeyStoreProvider(); this.cosmosEncryptionClient = CosmosEncryptionClient.createCosmosEncryptionClient(this.client, encryptionKeyStoreProvider); this.cosmosEncryptionContainer = getSharedSyncEncryptionContainer(this.cosmosEncryptionClient); } @AfterClass(groups = {"encryption"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { assertThat(this.client).isNotNull(); this.client.close(); } @Test(groups = {"encryption"}, timeOut = TIMEOUT) public void createItemEncrypt_readItemDecrypt() { EncryptionPojo properties = getItem(UUID.randomUUID().toString()); CosmosItemResponse<EncryptionPojo> itemResponse = this.cosmosEncryptionContainer.createItem(properties, new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()); assertThat(itemResponse.getRequestCharge()).isGreaterThan(0); EncryptionPojo responseItem = itemResponse.getItem(); validateResponse(properties, responseItem); EncryptionPojo readItem = this.cosmosEncryptionContainer.readItem(properties.getId(), new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions(), EncryptionPojo.class).getItem(); validateResponse(properties, readItem); properties = getItem(UUID.randomUUID().toString()); String longString = ""; for (int i = 0; i < 10000; i++) { longString += "a"; } properties.setSensitiveString(longString); itemResponse = cosmosEncryptionContainer.createItem(properties, new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()); assertThat(itemResponse.getRequestCharge()).isGreaterThan(0); responseItem = itemResponse.getItem(); validateResponse(properties, responseItem); } @Test(groups = {"encryption"}, timeOut = TIMEOUT) public void upsertItem_readItem() { EncryptionPojo properties = getItem(UUID.randomUUID().toString()); CosmosItemResponse<EncryptionPojo> itemResponse = this.cosmosEncryptionContainer.upsertItem(properties, new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()); assertThat(itemResponse.getRequestCharge()).isGreaterThan(0); EncryptionPojo responseItem = itemResponse.getItem(); validateResponse(properties, responseItem); EncryptionPojo readItem = this.cosmosEncryptionContainer.readItem(properties.getId(), new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions(), EncryptionPojo.class).getItem(); validateResponse(properties, readItem); } @Test(groups = {"encryption"}, timeOut = TIMEOUT) public void queryItems() { EncryptionPojo properties = getItem(UUID.randomUUID().toString()); CosmosItemResponse<EncryptionPojo> itemResponse = this.cosmosEncryptionContainer.createItem(properties, new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()); assertThat(itemResponse.getRequestCharge()).isGreaterThan(0); EncryptionPojo responseItem = itemResponse.getItem(); validateResponse(properties, responseItem); String query = String.format("SELECT * from c where c.id = '%s'", properties.getId()); CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); SqlQuerySpec querySpec = new SqlQuerySpec(query); CosmosPagedIterable<EncryptionPojo> feedResponseIterator = this.cosmosEncryptionContainer.queryItems(querySpec, cosmosQueryRequestOptions, EncryptionPojo.class); List<EncryptionPojo> feedResponse = new ArrayList<>(); feedResponseIterator.iterator().forEachRemaining(pojo -> { feedResponse.add(pojo); }); assertThat(feedResponse.size()).isGreaterThanOrEqualTo(1); for (EncryptionPojo pojo : feedResponse) { if (pojo.getId().equals(properties.getId())) { validateResponse(pojo, responseItem); } } } @Test(groups = {"encryption"}, timeOut = TIMEOUT) public void queryItemsOnEncryptedProperties() { EncryptionPojo properties = getItem(UUID.randomUUID().toString()); CosmosItemResponse<EncryptionPojo> itemResponse = this.cosmosEncryptionContainer.createItem(properties, new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()); assertThat(itemResponse.getRequestCharge()).isGreaterThan(0); EncryptionPojo responseItem = itemResponse.getItem(); validateResponse(properties, responseItem); String query = String.format("SELECT * FROM c where c.sensitiveString = @sensitiveString and c.nonSensitive =" + " " + "@nonSensitive and c.sensitiveLong = @sensitiveLong"); SqlQuerySpec querySpec = new SqlQuerySpec(query); SqlParameter parameter1 = new SqlParameter("@nonSensitive", properties.getNonSensitive()); querySpec.getParameters().add(parameter1); SqlParameter parameter2 = new SqlParameter("@sensitiveString", properties.getSensitiveString()); SqlParameter parameter3 = new SqlParameter("@sensitiveLong", properties.getSensitiveLong()); SqlQuerySpecWithEncryption sqlQuerySpecWithEncryption = new SqlQuerySpecWithEncryption(querySpec); sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveString", parameter2); sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveLong", parameter3); CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); CosmosPagedIterable<EncryptionPojo> feedResponseIterator = this.cosmosEncryptionContainer.queryItemsOnEncryptedProperties(sqlQuerySpecWithEncryption, cosmosQueryRequestOptions, EncryptionPojo.class); List<EncryptionPojo> feedResponse = new ArrayList<>(); feedResponseIterator.iterator().forEachRemaining(pojo -> { feedResponse.add(pojo); }); assertThat(feedResponse.size()).isGreaterThanOrEqualTo(1); for (EncryptionPojo pojo : feedResponse) { if (pojo.getId().equals(properties.getId())) { validateResponse(pojo, responseItem); } } } @Test(groups = {"encryption"}, timeOut = TIMEOUT) public void queryItemsOnRandomizedEncryption() { EncryptionPojo properties = getItem(UUID.randomUUID().toString()); CosmosItemResponse<EncryptionPojo> itemResponse = this.cosmosEncryptionContainer.createItem(properties, new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()); assertThat(itemResponse.getRequestCharge()).isGreaterThan(0); EncryptionPojo responseItem = itemResponse.getItem(); validateResponse(properties, responseItem); String query = String.format("SELECT * FROM c where c.sensitiveString = @sensitiveString and c.nonSensitive =" + " " + "@nonSensitive and c.sensitiveDouble = @sensitiveDouble"); SqlQuerySpec querySpec = new SqlQuerySpec(query); SqlParameter parameter1 = new SqlParameter("@nonSensitive", properties.getNonSensitive()); querySpec.getParameters().add(parameter1); SqlParameter parameter2 = new SqlParameter("@sensitiveString", properties.getSensitiveString()); SqlParameter parameter3 = new SqlParameter("@sensitiveDouble", properties.getSensitiveDouble()); SqlQuerySpecWithEncryption sqlQuerySpecWithEncryption = new SqlQuerySpecWithEncryption(querySpec); sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveString", parameter2); sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveDouble", parameter3); CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); CosmosPagedIterable<EncryptionPojo> feedResponseIterator = this.cosmosEncryptionContainer.queryItemsOnEncryptedProperties(sqlQuerySpecWithEncryption, cosmosQueryRequestOptions, EncryptionPojo.class); try { List<EncryptionPojo> feedResponse = new ArrayList<>(); feedResponseIterator.iterator().forEachRemaining(pojo -> { feedResponse.add(pojo); }); fail("Query on randomized parameter should fail"); } catch (IllegalArgumentException ex) { assertThat(ex.getMessage()).contains("Path /sensitiveDouble cannot be used in the " + "query because of randomized encryption"); } } @Test(groups = {"encryption"}, timeOut = TIMEOUT) public void queryItemsWithContinuationTokenAndPageSize() throws Exception { List<String> actualIds = new ArrayList<>(); EncryptionPojo properties = getItem(UUID.randomUUID().toString()); this.cosmosEncryptionContainer.createItem(properties, new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()); actualIds.add(properties.getId()); properties = getItem(UUID.randomUUID().toString()); this.cosmosEncryptionContainer.createItem(properties, new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()); actualIds.add(properties.getId()); properties = getItem(UUID.randomUUID().toString()); this.cosmosEncryptionContainer.createItem(properties, new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()); actualIds.add(properties.getId()); String query = String.format("SELECT * from c where c.id in ('%s', '%s', '%s')", actualIds.get(0), actualIds.get(1), actualIds.get(2)); CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); String continuationToken = null; int pageSize = 1; int initialDocumentCount = 3; int finalDocumentCount = 0; CosmosPagedIterable<EncryptionPojo> pojoCosmosPagedIterable = this.cosmosEncryptionContainer.queryItems(query, cosmosQueryRequestOptions, EncryptionPojo.class); do { Iterable<FeedResponse<EncryptionPojo>> feedResponseIterable = pojoCosmosPagedIterable.iterableByPage(continuationToken, 1); for (FeedResponse<EncryptionPojo> fr : feedResponseIterable) { int resultSize = fr.getResults().size(); assertThat(resultSize).isEqualTo(pageSize); finalDocumentCount += fr.getResults().size(); continuationToken = fr.getContinuationToken(); } } while (continuationToken != null); assertThat(finalDocumentCount).isEqualTo(initialDocumentCount); } @Test(groups = {"encryption"}, timeOut = TIMEOUT) @Test(groups = {"encryption"}, timeOut = TIMEOUT) public void batchExecution() { String itemId = UUID.randomUUID().toString(); EncryptionPojo createPojo = getItem(itemId); EncryptionPojo replacePojo = getItem(itemId); replacePojo.setSensitiveString("ReplacedSensitiveString"); CosmosBatch cosmosBatch = CosmosBatch.createCosmosBatch(new PartitionKey(itemId)); cosmosBatch.createItemOperation(createPojo); cosmosBatch.replaceItemOperation(itemId, replacePojo); cosmosBatch.upsertItemOperation(createPojo); cosmosBatch.readItemOperation(itemId); cosmosBatch.deleteItemOperation(itemId); CosmosBatchResponse batchResponse = this.cosmosEncryptionContainer.executeCosmosBatch(cosmosBatch); assertThat(batchResponse.getResults().size()).isEqualTo(5); assertThat(batchResponse.getResults().get(0).getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code()); assertThat(batchResponse.getResults().get(1).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code()); assertThat(batchResponse.getResults().get(2).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code()); assertThat(batchResponse.getResults().get(3).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code()); assertThat(batchResponse.getResults().get(4).getStatusCode()).isEqualTo(HttpResponseStatus.NO_CONTENT.code()); validateResponse(batchResponse.getResults().get(0).getItem(EncryptionPojo.class), createPojo); validateResponse(batchResponse.getResults().get(1).getItem(EncryptionPojo.class), replacePojo); validateResponse(batchResponse.getResults().get(2).getItem(EncryptionPojo.class), createPojo); validateResponse(batchResponse.getResults().get(3).getItem(EncryptionPojo.class), createPojo); } @Test(groups = {"encryption"}, timeOut = TIMEOUT) public void batchExecutionWithOptionsApi() { String itemId = UUID.randomUUID().toString(); EncryptionPojo createPojo = getItem(itemId); EncryptionPojo replacePojo = getItem(itemId); replacePojo.setSensitiveString("ReplacedSensitiveString"); CosmosBatch cosmosBatch = CosmosBatch.createCosmosBatch(new PartitionKey(itemId)); CosmosBatchItemRequestOptions cosmosBatchItemRequestOptions = new CosmosBatchItemRequestOptions(); cosmosBatch.createItemOperation(createPojo, cosmosBatchItemRequestOptions); cosmosBatch.replaceItemOperation(itemId, replacePojo, cosmosBatchItemRequestOptions); cosmosBatch.upsertItemOperation(createPojo, cosmosBatchItemRequestOptions); cosmosBatch.readItemOperation(itemId, cosmosBatchItemRequestOptions); cosmosBatch.deleteItemOperation(itemId, cosmosBatchItemRequestOptions); CosmosBatchResponse batchResponse = this.cosmosEncryptionContainer.executeCosmosBatch(cosmosBatch, new CosmosBatchRequestOptions()); assertThat(batchResponse.getResults().size()).isEqualTo(5); assertThat(batchResponse.getResults().get(0).getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code()); assertThat(batchResponse.getResults().get(1).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code()); assertThat(batchResponse.getResults().get(2).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code()); assertThat(batchResponse.getResults().get(3).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code()); assertThat(batchResponse.getResults().get(4).getStatusCode()).isEqualTo(HttpResponseStatus.NO_CONTENT.code()); validateResponse(batchResponse.getResults().get(0).getItem(EncryptionPojo.class), createPojo); validateResponse(batchResponse.getResults().get(1).getItem(EncryptionPojo.class), replacePojo); validateResponse(batchResponse.getResults().get(2).getItem(EncryptionPojo.class), createPojo); validateResponse(batchResponse.getResults().get(3).getItem(EncryptionPojo.class), createPojo); } @Test(groups = {"encryption"}, timeOut = TIMEOUT) public void patchItem() { String itemId = UUID.randomUUID().toString(); EncryptionPojo createPojo = getItem(itemId); CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionContainer.createItem(createPojo, new PartitionKey(createPojo.getMypk()), new CosmosItemRequestOptions()); int originalSensitiveInt = createPojo.getSensitiveInt(); int newSensitiveInt = originalSensitiveInt + 1; String itemIdToReplace = UUID.randomUUID().toString(); EncryptionPojo nestedEncryptionPojoToReplace = getItem(itemIdToReplace); nestedEncryptionPojoToReplace.setSensitiveString("testing"); CosmosPatchOperations cosmosPatchOperations = CosmosPatchOperations.create(); cosmosPatchOperations.add("/sensitiveString", "patched"); cosmosPatchOperations.remove("/sensitiveDouble"); cosmosPatchOperations.replace("/sensitiveInt", newSensitiveInt); cosmosPatchOperations.replace("/sensitiveNestedPojo", nestedEncryptionPojoToReplace); cosmosPatchOperations.set("/sensitiveBoolean", false); CosmosPatchItemRequestOptions options = new CosmosPatchItemRequestOptions(); CosmosItemResponse<EncryptionPojo> response = this.cosmosEncryptionContainer.patchItem( createPojo.getId(), new PartitionKey(createPojo.getMypk()), cosmosPatchOperations, options, EncryptionPojo.class); assertThat(response.getStatusCode()).isEqualTo(HttpResponseStatus.OK.code()); EncryptionPojo patchedItem = response.getItem(); assertThat(patchedItem).isNotNull(); assertThat(patchedItem.getSensitiveString()).isEqualTo("patched"); assertThat(patchedItem.getSensitiveDouble()).isNull(); assertThat(patchedItem.getSensitiveNestedPojo()).isNotNull(); assertThat(patchedItem.getSensitiveInt()).isEqualTo(newSensitiveInt); assertThat(patchedItem.isSensitiveBoolean()).isEqualTo(false); response = this.cosmosEncryptionContainer.readItem( createPojo.getId(), new PartitionKey(createPojo.getMypk()), options, EncryptionPojo.class); assertThat(response.getStatusCode()).isEqualTo(HttpResponseStatus.OK.code()); validateResponse(patchedItem, response.getItem()); } private int getTotalRequest() { int countRequest = new Random().nextInt(100) + 120; logger.info("Total count of request for this test case: " + countRequest); return countRequest; } @Test(groups = {"encryption"}, timeOut = TIMEOUT) public void bulkExecution_createItem() { int totalRequest = getTotalRequest(); Map<String, EncryptionPojo> idToItemMap = new HashMap<>(); List<CosmosItemOperation> cosmosItemOperationsList = new ArrayList<>(); for (int i = 0; i < totalRequest; i++) { String itemId = UUID.randomUUID().toString(); EncryptionPojo createPojo = getItem(itemId); idToItemMap.put(itemId, createPojo); cosmosItemOperationsList.add(CosmosBulkOperations.getCreateItemOperation(createPojo, new PartitionKey(createPojo.getMypk()))); } List<CosmosBulkOperationResponse<EncryptionAsyncApiCrudTest>> bulkResponse = Lists.newArrayList(this.cosmosEncryptionContainer. executeBulkOperations(cosmosItemOperationsList)); AtomicInteger processedDoc = new AtomicInteger(0); for (CosmosBulkOperationResponse<EncryptionAsyncApiCrudTest> cosmosBulkOperationResponse : bulkResponse) { processedDoc.incrementAndGet(); CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse(); if (cosmosBulkOperationResponse.getException() != null) { logger.error("Bulk operation failed", cosmosBulkOperationResponse.getException()); fail(cosmosBulkOperationResponse.getException().toString()); } assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code()); assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0); assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull(); assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull(); assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull(); assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull(); EncryptionPojo item = cosmosBulkItemResponse.getItem(EncryptionPojo.class); validateResponse(item, idToItemMap.get(item.getId())); } assertThat(processedDoc.get()).isEqualTo(totalRequest); } @Test(groups = {"encryption"}, timeOut = TIMEOUT) public void bulkExecution_upsertItem() { int totalRequest = getTotalRequest(); Map<String, EncryptionPojo> idToItemMap = new HashMap<>(); List<CosmosItemOperation> cosmosItemOperationsList = new ArrayList<>(); for (int i = 0; i < totalRequest; i++) { String itemId = UUID.randomUUID().toString(); EncryptionPojo createPojo = getItem(itemId); idToItemMap.put(itemId, createPojo); cosmosItemOperationsList.add(CosmosBulkOperations.getUpsertItemOperation(createPojo, new PartitionKey(createPojo.getMypk()))); } List<CosmosBulkOperationResponse<EncryptionAsyncApiCrudTest>> bulkResponse = Lists.newArrayList(this.cosmosEncryptionContainer. executeBulkOperations(cosmosItemOperationsList, new CosmosBulkExecutionOptions())); AtomicInteger processedDoc = new AtomicInteger(0); for (CosmosBulkOperationResponse<EncryptionAsyncApiCrudTest> cosmosBulkOperationResponse : bulkResponse) { processedDoc.incrementAndGet(); CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse(); if (cosmosBulkOperationResponse.getException() != null) { logger.error("Bulk operation failed", cosmosBulkOperationResponse.getException()); fail(cosmosBulkOperationResponse.getException().toString()); } assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code()); assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0); assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull(); assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull(); assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull(); assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull(); EncryptionPojo item = cosmosBulkItemResponse.getItem(EncryptionPojo.class); validateResponse(item, idToItemMap.get(item.getId())); } ; assertThat(processedDoc.get()).isEqualTo(totalRequest); } @Test(groups = {"encryption"}, timeOut = TIMEOUT) public void bulkExecution_deleteItem() { int totalRequest = Math.min(getTotalRequest(), 20); List<CosmosItemOperation> cosmosItemOperations = new ArrayList<>(); for (int i = 0; i < totalRequest; i++) { String itemId = UUID.randomUUID().toString(); EncryptionPojo createPojo = getItem(itemId); cosmosItemOperations.add(CosmosBulkOperations.getCreateItemOperation(createPojo, new PartitionKey(createPojo.getMypk()))); } createItemsAndVerify(cosmosItemOperations); List<CosmosItemOperation> deleteCosmosItemOperations = new ArrayList<>(); for (CosmosItemOperation cosmosItemOperation : cosmosItemOperations) { EncryptionPojo encryptionPojo = cosmosItemOperation.getItem(); deleteCosmosItemOperations.add(CosmosBulkOperations.getDeleteItemOperation(encryptionPojo.getId(), cosmosItemOperation.getPartitionKeyValue())); } List<CosmosBulkOperationResponse<EncryptionAsyncApiCrudTest>> bulkResponse = Lists.newArrayList(this.cosmosEncryptionContainer .executeBulkOperations(deleteCosmosItemOperations)); AtomicInteger processedDoc = new AtomicInteger(0); for (CosmosBulkOperationResponse<EncryptionAsyncApiCrudTest> cosmosBulkOperationResponse : bulkResponse) { processedDoc.incrementAndGet(); CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse(); if (cosmosBulkOperationResponse.getException() != null) { logger.error("Bulk operation failed", cosmosBulkOperationResponse.getException()); fail(cosmosBulkOperationResponse.getException().toString()); } assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.NO_CONTENT.code()); assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0); assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull(); assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull(); assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull(); assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull(); } ; assertThat(processedDoc.get()).isEqualTo(totalRequest); } @Test(groups = {"encryption"}, timeOut = TIMEOUT) public void bulkExecution_readItem() { int totalRequest = getTotalRequest(); List<CosmosItemOperation> cosmosItemOperations = new ArrayList<>(); Map<String, EncryptionPojo> idToItemMap = new HashMap<>(); for (int i = 0; i < totalRequest; i++) { String itemId = UUID.randomUUID().toString(); EncryptionPojo createPojo = getItem(itemId); idToItemMap.put(itemId, createPojo); cosmosItemOperations.add(CosmosBulkOperations.getCreateItemOperation(createPojo, new PartitionKey(createPojo.getMypk()))); } createItemsAndVerify(cosmosItemOperations); List<CosmosItemOperation> readCosmosItemOperations = new ArrayList<>(); for (CosmosItemOperation cosmosItemOperation : cosmosItemOperations) { EncryptionPojo encryptionPojo = cosmosItemOperation.getItem(); readCosmosItemOperations.add(CosmosBulkOperations.getReadItemOperation(encryptionPojo.getId(), cosmosItemOperation.getPartitionKeyValue())); } List<CosmosBulkOperationResponse<EncryptionAsyncApiCrudTest>> bulkResponse = Lists.newArrayList(this.cosmosEncryptionContainer .executeBulkOperations(readCosmosItemOperations)); AtomicInteger processedDoc = new AtomicInteger(0); for (CosmosBulkOperationResponse<EncryptionAsyncApiCrudTest> cosmosBulkOperationResponse : bulkResponse) { processedDoc.incrementAndGet(); CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse(); if (cosmosBulkOperationResponse.getException() != null) { logger.error("Bulk operation failed", cosmosBulkOperationResponse.getException()); fail(cosmosBulkOperationResponse.getException().toString()); } assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.OK.code()); assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0); assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull(); assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull(); assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull(); assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull(); EncryptionPojo item = cosmosBulkItemResponse.getItem(EncryptionPojo.class); validateResponse(item, idToItemMap.get(item.getId())); } ; assertThat(processedDoc.get()).isEqualTo(totalRequest); } private void createItemsAndVerify(List<CosmosItemOperation> cosmosItemOperations) { List<CosmosBulkOperationResponse<EncryptionAsyncApiCrudTest>> createResponseFlux = Lists.newArrayList(this.cosmosEncryptionContainer. executeBulkOperations(cosmosItemOperations)); Set<String> distinctIndex = new HashSet<>(); AtomicInteger processedDoc = new AtomicInteger(0); for (CosmosBulkOperationResponse<EncryptionAsyncApiCrudTest> cosmosBulkOperationResponse : createResponseFlux) { processedDoc.incrementAndGet(); CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse(); if (cosmosBulkOperationResponse.getException() != null) { logger.error("Bulk operation failed", cosmosBulkOperationResponse.getException()); fail(cosmosBulkOperationResponse.getException().toString()); } assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code()); assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0); assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull(); assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull(); assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull(); assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull(); EncryptionPojo encryptionPojo = cosmosBulkItemResponse.getItem(EncryptionPojo.class); distinctIndex.add(encryptionPojo.getId()); } ; assertThat(processedDoc.get()).isEqualTo(cosmosItemOperations.size()); assertThat(distinctIndex.size()).isEqualTo(cosmosItemOperations.size()); } }
class EncryptionSyncApiCrudTest extends TestSuiteBase { private CosmosClient client; private CosmosEncryptionClient cosmosEncryptionClient; private CosmosEncryptionContainer cosmosEncryptionContainer; @Factory(dataProvider = "clientBuildersWithSessionConsistency") public EncryptionSyncApiCrudTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @BeforeClass(groups = {"encryption"}, timeOut = SETUP_TIMEOUT) public void before_CosmosItemTest() { assertThat(this.client).isNull(); this.client = getClientBuilder().buildClient(); EncryptionAsyncApiCrudTest.TestEncryptionKeyStoreProvider encryptionKeyStoreProvider = new EncryptionAsyncApiCrudTest.TestEncryptionKeyStoreProvider(); this.cosmosEncryptionClient = CosmosEncryptionClient.createCosmosEncryptionClient(this.client, encryptionKeyStoreProvider); this.cosmosEncryptionContainer = getSharedSyncEncryptionContainer(this.cosmosEncryptionClient); } @AfterClass(groups = {"encryption"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { assertThat(this.client).isNotNull(); this.client.close(); } @Test(groups = {"encryption"}, timeOut = TIMEOUT) public void createItemEncrypt_readItemDecrypt() { EncryptionPojo properties = getItem(UUID.randomUUID().toString()); CosmosItemResponse<EncryptionPojo> itemResponse = this.cosmosEncryptionContainer.createItem(properties, new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()); assertThat(itemResponse.getRequestCharge()).isGreaterThan(0); EncryptionPojo responseItem = itemResponse.getItem(); validateResponse(properties, responseItem); EncryptionPojo readItem = this.cosmosEncryptionContainer.readItem(properties.getId(), new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions(), EncryptionPojo.class).getItem(); validateResponse(properties, readItem); properties = getItem(UUID.randomUUID().toString()); String longString = ""; for (int i = 0; i < 10000; i++) { longString += "a"; } properties.setSensitiveString(longString); itemResponse = cosmosEncryptionContainer.createItem(properties, new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()); assertThat(itemResponse.getRequestCharge()).isGreaterThan(0); responseItem = itemResponse.getItem(); validateResponse(properties, responseItem); } @Test(groups = {"encryption"}, timeOut = TIMEOUT) public void upsertItem_readItem() { EncryptionPojo properties = getItem(UUID.randomUUID().toString()); CosmosItemResponse<EncryptionPojo> itemResponse = this.cosmosEncryptionContainer.upsertItem(properties, new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()); assertThat(itemResponse.getRequestCharge()).isGreaterThan(0); EncryptionPojo responseItem = itemResponse.getItem(); validateResponse(properties, responseItem); EncryptionPojo readItem = this.cosmosEncryptionContainer.readItem(properties.getId(), new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions(), EncryptionPojo.class).getItem(); validateResponse(properties, readItem); } @Test(groups = {"encryption"}, timeOut = TIMEOUT) public void queryItems() { EncryptionPojo properties = getItem(UUID.randomUUID().toString()); CosmosItemResponse<EncryptionPojo> itemResponse = this.cosmosEncryptionContainer.createItem(properties, new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()); assertThat(itemResponse.getRequestCharge()).isGreaterThan(0); EncryptionPojo responseItem = itemResponse.getItem(); validateResponse(properties, responseItem); String query = String.format("SELECT * from c where c.id = '%s'", properties.getId()); CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); SqlQuerySpec querySpec = new SqlQuerySpec(query); CosmosPagedIterable<EncryptionPojo> feedResponseIterator = this.cosmosEncryptionContainer.queryItems(querySpec, cosmosQueryRequestOptions, EncryptionPojo.class); List<EncryptionPojo> feedResponse = new ArrayList<>(); feedResponseIterator.iterator().forEachRemaining(pojo -> { feedResponse.add(pojo); }); assertThat(feedResponse.size()).isGreaterThanOrEqualTo(1); for (EncryptionPojo pojo : feedResponse) { if (pojo.getId().equals(properties.getId())) { validateResponse(pojo, responseItem); } } } @Test(groups = {"encryption"}, timeOut = TIMEOUT) public void queryItemsOnEncryptedProperties() { EncryptionPojo properties = getItem(UUID.randomUUID().toString()); CosmosItemResponse<EncryptionPojo> itemResponse = this.cosmosEncryptionContainer.createItem(properties, new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()); assertThat(itemResponse.getRequestCharge()).isGreaterThan(0); EncryptionPojo responseItem = itemResponse.getItem(); validateResponse(properties, responseItem); String query = String.format("SELECT * FROM c where c.sensitiveString = @sensitiveString and c.nonSensitive =" + " " + "@nonSensitive and c.sensitiveLong = @sensitiveLong"); SqlQuerySpec querySpec = new SqlQuerySpec(query); SqlParameter parameter1 = new SqlParameter("@nonSensitive", properties.getNonSensitive()); querySpec.getParameters().add(parameter1); SqlParameter parameter2 = new SqlParameter("@sensitiveString", properties.getSensitiveString()); SqlParameter parameter3 = new SqlParameter("@sensitiveLong", properties.getSensitiveLong()); SqlQuerySpecWithEncryption sqlQuerySpecWithEncryption = new SqlQuerySpecWithEncryption(querySpec); sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveString", parameter2); sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveLong", parameter3); CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); CosmosPagedIterable<EncryptionPojo> feedResponseIterator = this.cosmosEncryptionContainer.queryItemsOnEncryptedProperties(sqlQuerySpecWithEncryption, cosmosQueryRequestOptions, EncryptionPojo.class); List<EncryptionPojo> feedResponse = new ArrayList<>(); feedResponseIterator.iterator().forEachRemaining(pojo -> { feedResponse.add(pojo); }); assertThat(feedResponse.size()).isGreaterThanOrEqualTo(1); for (EncryptionPojo pojo : feedResponse) { if (pojo.getId().equals(properties.getId())) { validateResponse(pojo, responseItem); } } } @Test(groups = {"encryption"}, timeOut = TIMEOUT) public void queryItemsOnRandomizedEncryption() { EncryptionPojo properties = getItem(UUID.randomUUID().toString()); CosmosItemResponse<EncryptionPojo> itemResponse = this.cosmosEncryptionContainer.createItem(properties, new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()); assertThat(itemResponse.getRequestCharge()).isGreaterThan(0); EncryptionPojo responseItem = itemResponse.getItem(); validateResponse(properties, responseItem); String query = String.format("SELECT * FROM c where c.sensitiveString = @sensitiveString and c.nonSensitive =" + " " + "@nonSensitive and c.sensitiveDouble = @sensitiveDouble"); SqlQuerySpec querySpec = new SqlQuerySpec(query); SqlParameter parameter1 = new SqlParameter("@nonSensitive", properties.getNonSensitive()); querySpec.getParameters().add(parameter1); SqlParameter parameter2 = new SqlParameter("@sensitiveString", properties.getSensitiveString()); SqlParameter parameter3 = new SqlParameter("@sensitiveDouble", properties.getSensitiveDouble()); SqlQuerySpecWithEncryption sqlQuerySpecWithEncryption = new SqlQuerySpecWithEncryption(querySpec); sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveString", parameter2); sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveDouble", parameter3); CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); CosmosPagedIterable<EncryptionPojo> feedResponseIterator = this.cosmosEncryptionContainer.queryItemsOnEncryptedProperties(sqlQuerySpecWithEncryption, cosmosQueryRequestOptions, EncryptionPojo.class); try { List<EncryptionPojo> feedResponse = new ArrayList<>(); feedResponseIterator.iterator().forEachRemaining(pojo -> { feedResponse.add(pojo); }); fail("Query on randomized parameter should fail"); } catch (IllegalArgumentException ex) { assertThat(ex.getMessage()).contains("Path /sensitiveDouble cannot be used in the " + "query because of randomized encryption"); } } @Test(groups = {"encryption"}, timeOut = TIMEOUT) public void queryItemsWithContinuationTokenAndPageSize() throws Exception { List<String> actualIds = new ArrayList<>(); EncryptionPojo properties = getItem(UUID.randomUUID().toString()); this.cosmosEncryptionContainer.createItem(properties, new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()); actualIds.add(properties.getId()); properties = getItem(UUID.randomUUID().toString()); this.cosmosEncryptionContainer.createItem(properties, new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()); actualIds.add(properties.getId()); properties = getItem(UUID.randomUUID().toString()); this.cosmosEncryptionContainer.createItem(properties, new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()); actualIds.add(properties.getId()); String query = String.format("SELECT * from c where c.id in ('%s', '%s', '%s')", actualIds.get(0), actualIds.get(1), actualIds.get(2)); CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); String continuationToken = null; int pageSize = 1; int initialDocumentCount = 3; int finalDocumentCount = 0; CosmosPagedIterable<EncryptionPojo> pojoCosmosPagedIterable = this.cosmosEncryptionContainer.queryItems(query, cosmosQueryRequestOptions, EncryptionPojo.class); do { Iterable<FeedResponse<EncryptionPojo>> feedResponseIterable = pojoCosmosPagedIterable.iterableByPage(continuationToken, 1); for (FeedResponse<EncryptionPojo> fr : feedResponseIterable) { int resultSize = fr.getResults().size(); assertThat(resultSize).isEqualTo(pageSize); finalDocumentCount += fr.getResults().size(); continuationToken = fr.getContinuationToken(); } } while (continuationToken != null); assertThat(finalDocumentCount).isEqualTo(initialDocumentCount); } @Test(groups = {"encryption"}, timeOut = TIMEOUT) @Test(groups = {"encryption"}, timeOut = TIMEOUT) public void batchExecution() { String itemId = UUID.randomUUID().toString(); EncryptionPojo createPojo = getItem(itemId); EncryptionPojo replacePojo = getItem(itemId); replacePojo.setSensitiveString("ReplacedSensitiveString"); CosmosBatch cosmosBatch = CosmosBatch.createCosmosBatch(new PartitionKey(itemId)); cosmosBatch.createItemOperation(createPojo); cosmosBatch.replaceItemOperation(itemId, replacePojo); cosmosBatch.upsertItemOperation(createPojo); cosmosBatch.readItemOperation(itemId); cosmosBatch.deleteItemOperation(itemId); CosmosBatchResponse batchResponse = this.cosmosEncryptionContainer.executeCosmosBatch(cosmosBatch); assertThat(batchResponse.getResults().size()).isEqualTo(5); assertThat(batchResponse.getResults().get(0).getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code()); assertThat(batchResponse.getResults().get(1).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code()); assertThat(batchResponse.getResults().get(2).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code()); assertThat(batchResponse.getResults().get(3).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code()); assertThat(batchResponse.getResults().get(4).getStatusCode()).isEqualTo(HttpResponseStatus.NO_CONTENT.code()); validateResponse(batchResponse.getResults().get(0).getItem(EncryptionPojo.class), createPojo); validateResponse(batchResponse.getResults().get(1).getItem(EncryptionPojo.class), replacePojo); validateResponse(batchResponse.getResults().get(2).getItem(EncryptionPojo.class), createPojo); validateResponse(batchResponse.getResults().get(3).getItem(EncryptionPojo.class), createPojo); } @Test(groups = {"encryption"}, timeOut = TIMEOUT) public void batchExecutionWithOptionsApi() { String itemId = UUID.randomUUID().toString(); EncryptionPojo createPojo = getItem(itemId); EncryptionPojo replacePojo = getItem(itemId); replacePojo.setSensitiveString("ReplacedSensitiveString"); CosmosBatch cosmosBatch = CosmosBatch.createCosmosBatch(new PartitionKey(itemId)); CosmosBatchItemRequestOptions cosmosBatchItemRequestOptions = new CosmosBatchItemRequestOptions(); cosmosBatch.createItemOperation(createPojo, cosmosBatchItemRequestOptions); cosmosBatch.replaceItemOperation(itemId, replacePojo, cosmosBatchItemRequestOptions); cosmosBatch.upsertItemOperation(createPojo, cosmosBatchItemRequestOptions); cosmosBatch.readItemOperation(itemId, cosmosBatchItemRequestOptions); cosmosBatch.deleteItemOperation(itemId, cosmosBatchItemRequestOptions); CosmosBatchResponse batchResponse = this.cosmosEncryptionContainer.executeCosmosBatch(cosmosBatch, new CosmosBatchRequestOptions()); assertThat(batchResponse.getResults().size()).isEqualTo(5); assertThat(batchResponse.getResults().get(0).getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code()); assertThat(batchResponse.getResults().get(1).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code()); assertThat(batchResponse.getResults().get(2).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code()); assertThat(batchResponse.getResults().get(3).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code()); assertThat(batchResponse.getResults().get(4).getStatusCode()).isEqualTo(HttpResponseStatus.NO_CONTENT.code()); validateResponse(batchResponse.getResults().get(0).getItem(EncryptionPojo.class), createPojo); validateResponse(batchResponse.getResults().get(1).getItem(EncryptionPojo.class), replacePojo); validateResponse(batchResponse.getResults().get(2).getItem(EncryptionPojo.class), createPojo); validateResponse(batchResponse.getResults().get(3).getItem(EncryptionPojo.class), createPojo); } @Test(groups = {"encryption"}, timeOut = TIMEOUT) public void patchItem() { String itemId = UUID.randomUUID().toString(); EncryptionPojo createPojo = getItem(itemId); CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionContainer.createItem(createPojo, new PartitionKey(createPojo.getMypk()), new CosmosItemRequestOptions()); int originalSensitiveInt = createPojo.getSensitiveInt(); int newSensitiveInt = originalSensitiveInt + 1; String itemIdToReplace = UUID.randomUUID().toString(); EncryptionPojo nestedEncryptionPojoToReplace = getItem(itemIdToReplace); nestedEncryptionPojoToReplace.setSensitiveString("testing"); CosmosPatchOperations cosmosPatchOperations = CosmosPatchOperations.create(); cosmosPatchOperations.add("/sensitiveString", "patched"); cosmosPatchOperations.remove("/sensitiveDouble"); cosmosPatchOperations.replace("/sensitiveInt", newSensitiveInt); cosmosPatchOperations.replace("/sensitiveNestedPojo", nestedEncryptionPojoToReplace); cosmosPatchOperations.set("/sensitiveBoolean", false); CosmosPatchItemRequestOptions options = new CosmosPatchItemRequestOptions(); CosmosItemResponse<EncryptionPojo> response = this.cosmosEncryptionContainer.patchItem( createPojo.getId(), new PartitionKey(createPojo.getMypk()), cosmosPatchOperations, options, EncryptionPojo.class); assertThat(response.getStatusCode()).isEqualTo(HttpResponseStatus.OK.code()); EncryptionPojo patchedItem = response.getItem(); assertThat(patchedItem).isNotNull(); assertThat(patchedItem.getSensitiveString()).isEqualTo("patched"); assertThat(patchedItem.getSensitiveDouble()).isNull(); assertThat(patchedItem.getSensitiveNestedPojo()).isNotNull(); assertThat(patchedItem.getSensitiveInt()).isEqualTo(newSensitiveInt); assertThat(patchedItem.isSensitiveBoolean()).isEqualTo(false); response = this.cosmosEncryptionContainer.readItem( createPojo.getId(), new PartitionKey(createPojo.getMypk()), options, EncryptionPojo.class); assertThat(response.getStatusCode()).isEqualTo(HttpResponseStatus.OK.code()); validateResponse(patchedItem, response.getItem()); } private int getTotalRequest() { int countRequest = new Random().nextInt(100) + 120; logger.info("Total count of request for this test case: " + countRequest); return countRequest; } @Test(groups = {"encryption"}, timeOut = TIMEOUT) public void bulkExecution_createItem() { int totalRequest = getTotalRequest(); Map<String, EncryptionPojo> idToItemMap = new HashMap<>(); List<CosmosItemOperation> cosmosItemOperationsList = new ArrayList<>(); for (int i = 0; i < totalRequest; i++) { String itemId = UUID.randomUUID().toString(); EncryptionPojo createPojo = getItem(itemId); idToItemMap.put(itemId, createPojo); cosmosItemOperationsList.add(CosmosBulkOperations.getCreateItemOperation(createPojo, new PartitionKey(createPojo.getMypk()))); } List<CosmosBulkOperationResponse<EncryptionAsyncApiCrudTest>> bulkResponse = Lists.newArrayList(this.cosmosEncryptionContainer. executeBulkOperations(cosmosItemOperationsList)); AtomicInteger processedDoc = new AtomicInteger(0); for (CosmosBulkOperationResponse<EncryptionAsyncApiCrudTest> cosmosBulkOperationResponse : bulkResponse) { processedDoc.incrementAndGet(); CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse(); if (cosmosBulkOperationResponse.getException() != null) { logger.error("Bulk operation failed", cosmosBulkOperationResponse.getException()); fail(cosmosBulkOperationResponse.getException().toString()); } assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code()); assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0); assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull(); assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull(); assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull(); assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull(); EncryptionPojo item = cosmosBulkItemResponse.getItem(EncryptionPojo.class); validateResponse(item, idToItemMap.get(item.getId())); } assertThat(processedDoc.get()).isEqualTo(totalRequest); } @Test(groups = {"encryption"}, timeOut = TIMEOUT) public void bulkExecution_upsertItem() { int totalRequest = getTotalRequest(); Map<String, EncryptionPojo> idToItemMap = new HashMap<>(); List<CosmosItemOperation> cosmosItemOperationsList = new ArrayList<>(); for (int i = 0; i < totalRequest; i++) { String itemId = UUID.randomUUID().toString(); EncryptionPojo createPojo = getItem(itemId); idToItemMap.put(itemId, createPojo); cosmosItemOperationsList.add(CosmosBulkOperations.getUpsertItemOperation(createPojo, new PartitionKey(createPojo.getMypk()))); } List<CosmosBulkOperationResponse<EncryptionAsyncApiCrudTest>> bulkResponse = Lists.newArrayList(this.cosmosEncryptionContainer. executeBulkOperations(cosmosItemOperationsList, new CosmosBulkExecutionOptions())); AtomicInteger processedDoc = new AtomicInteger(0); for (CosmosBulkOperationResponse<EncryptionAsyncApiCrudTest> cosmosBulkOperationResponse : bulkResponse) { processedDoc.incrementAndGet(); CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse(); if (cosmosBulkOperationResponse.getException() != null) { logger.error("Bulk operation failed", cosmosBulkOperationResponse.getException()); fail(cosmosBulkOperationResponse.getException().toString()); } assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code()); assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0); assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull(); assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull(); assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull(); assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull(); EncryptionPojo item = cosmosBulkItemResponse.getItem(EncryptionPojo.class); validateResponse(item, idToItemMap.get(item.getId())); } ; assertThat(processedDoc.get()).isEqualTo(totalRequest); } @Test(groups = {"encryption"}, timeOut = TIMEOUT) public void bulkExecution_deleteItem() { int totalRequest = Math.min(getTotalRequest(), 20); List<CosmosItemOperation> cosmosItemOperations = new ArrayList<>(); for (int i = 0; i < totalRequest; i++) { String itemId = UUID.randomUUID().toString(); EncryptionPojo createPojo = getItem(itemId); cosmosItemOperations.add(CosmosBulkOperations.getCreateItemOperation(createPojo, new PartitionKey(createPojo.getMypk()))); } createItemsAndVerify(cosmosItemOperations); List<CosmosItemOperation> deleteCosmosItemOperations = new ArrayList<>(); for (CosmosItemOperation cosmosItemOperation : cosmosItemOperations) { EncryptionPojo encryptionPojo = cosmosItemOperation.getItem(); deleteCosmosItemOperations.add(CosmosBulkOperations.getDeleteItemOperation(encryptionPojo.getId(), cosmosItemOperation.getPartitionKeyValue())); } List<CosmosBulkOperationResponse<EncryptionAsyncApiCrudTest>> bulkResponse = Lists.newArrayList(this.cosmosEncryptionContainer .executeBulkOperations(deleteCosmosItemOperations)); AtomicInteger processedDoc = new AtomicInteger(0); for (CosmosBulkOperationResponse<EncryptionAsyncApiCrudTest> cosmosBulkOperationResponse : bulkResponse) { processedDoc.incrementAndGet(); CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse(); if (cosmosBulkOperationResponse.getException() != null) { logger.error("Bulk operation failed", cosmosBulkOperationResponse.getException()); fail(cosmosBulkOperationResponse.getException().toString()); } assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.NO_CONTENT.code()); assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0); assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull(); assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull(); assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull(); assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull(); } ; assertThat(processedDoc.get()).isEqualTo(totalRequest); } @Test(groups = {"encryption"}, timeOut = TIMEOUT) public void bulkExecution_readItem() { int totalRequest = getTotalRequest(); List<CosmosItemOperation> cosmosItemOperations = new ArrayList<>(); Map<String, EncryptionPojo> idToItemMap = new HashMap<>(); for (int i = 0; i < totalRequest; i++) { String itemId = UUID.randomUUID().toString(); EncryptionPojo createPojo = getItem(itemId); idToItemMap.put(itemId, createPojo); cosmosItemOperations.add(CosmosBulkOperations.getCreateItemOperation(createPojo, new PartitionKey(createPojo.getMypk()))); } createItemsAndVerify(cosmosItemOperations); List<CosmosItemOperation> readCosmosItemOperations = new ArrayList<>(); for (CosmosItemOperation cosmosItemOperation : cosmosItemOperations) { EncryptionPojo encryptionPojo = cosmosItemOperation.getItem(); readCosmosItemOperations.add(CosmosBulkOperations.getReadItemOperation(encryptionPojo.getId(), cosmosItemOperation.getPartitionKeyValue())); } List<CosmosBulkOperationResponse<EncryptionAsyncApiCrudTest>> bulkResponse = Lists.newArrayList(this.cosmosEncryptionContainer .executeBulkOperations(readCosmosItemOperations)); AtomicInteger processedDoc = new AtomicInteger(0); for (CosmosBulkOperationResponse<EncryptionAsyncApiCrudTest> cosmosBulkOperationResponse : bulkResponse) { processedDoc.incrementAndGet(); CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse(); if (cosmosBulkOperationResponse.getException() != null) { logger.error("Bulk operation failed", cosmosBulkOperationResponse.getException()); fail(cosmosBulkOperationResponse.getException().toString()); } assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.OK.code()); assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0); assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull(); assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull(); assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull(); assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull(); EncryptionPojo item = cosmosBulkItemResponse.getItem(EncryptionPojo.class); validateResponse(item, idToItemMap.get(item.getId())); } ; assertThat(processedDoc.get()).isEqualTo(totalRequest); } private void createItemsAndVerify(List<CosmosItemOperation> cosmosItemOperations) { List<CosmosBulkOperationResponse<EncryptionAsyncApiCrudTest>> createResponseFlux = Lists.newArrayList(this.cosmosEncryptionContainer. executeBulkOperations(cosmosItemOperations)); Set<String> distinctIndex = new HashSet<>(); AtomicInteger processedDoc = new AtomicInteger(0); for (CosmosBulkOperationResponse<EncryptionAsyncApiCrudTest> cosmosBulkOperationResponse : createResponseFlux) { processedDoc.incrementAndGet(); CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse(); if (cosmosBulkOperationResponse.getException() != null) { logger.error("Bulk operation failed", cosmosBulkOperationResponse.getException()); fail(cosmosBulkOperationResponse.getException().toString()); } assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code()); assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0); assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull(); assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull(); assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull(); assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull(); EncryptionPojo encryptionPojo = cosmosBulkItemResponse.getItem(EncryptionPojo.class); distinctIndex.add(encryptionPojo.getId()); } ; assertThat(processedDoc.get()).isEqualTo(cosmosItemOperations.size()); assertThat(distinctIndex.size()).isEqualTo(cosmosItemOperations.size()); } }
why we remove the beta tag here?
public Mono<CosmosBatchResponse> executeCosmosBatch(CosmosBatch cosmosBatch) { return this.executeCosmosBatch(cosmosBatch, new CosmosBatchRequestOptions()); }
}
public Mono<CosmosBatchResponse> executeCosmosBatch(CosmosBatch cosmosBatch) { return this.executeCosmosBatch(cosmosBatch, new CosmosBatchRequestOptions()); }
class type. * @return a {@link CosmosPagedFlux}
class type. * @return a {@link CosmosPagedFlux}
oh because batch is GA'd right?
public Mono<CosmosBatchResponse> executeCosmosBatch(CosmosBatch cosmosBatch) { return this.executeCosmosBatch(cosmosBatch, new CosmosBatchRequestOptions()); }
}
public Mono<CosmosBatchResponse> executeCosmosBatch(CosmosBatch cosmosBatch) { return this.executeCosmosBatch(cosmosBatch, new CosmosBatchRequestOptions()); }
class type. * @return a {@link CosmosPagedFlux}
class type. * @return a {@link CosmosPagedFlux}
yes
public Mono<CosmosBatchResponse> executeCosmosBatch(CosmosBatch cosmosBatch) { return this.executeCosmosBatch(cosmosBatch, new CosmosBatchRequestOptions()); }
}
public Mono<CosmosBatchResponse> executeCosmosBatch(CosmosBatch cosmosBatch) { return this.executeCosmosBatch(cosmosBatch, new CosmosBatchRequestOptions()); }
class type. * @return a {@link CosmosPagedFlux}
class type. * @return a {@link CosmosPagedFlux}
why we need to * 2 here?
public void queryDiagnosticsOnOrderBy() { String containerId = "testcontainer"; cosmosAsyncDatabase.createContainer(containerId, "/mypk", ThroughputProperties.createManualThroughput(40000)).block(); CosmosAsyncContainer testcontainer = cosmosAsyncDatabase.getContainer(containerId); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); testcontainer.createItem(getInternalObjectNode()).block(); options.setMaxDegreeOfParallelism(-1); String query = "SELECT * from c ORDER BY c._ts DESC"; CosmosPagedFlux<InternalObjectNode> cosmosPagedFlux = testcontainer.queryItems(query, options, InternalObjectNode.class); AtomicInteger counterPkRid = new AtomicInteger(); AtomicInteger counterPartitionKeyRangeId = new AtomicInteger(); cosmosPagedFlux.byPage().flatMap(feedResponse -> { String cosmosDiagnosticsString = feedResponse.getCosmosDiagnostics().toString(); Pattern pattern = Pattern.compile("\"partitionKeyRangeId\":\""); Matcher matcher = pattern.matcher(cosmosDiagnosticsString); while (matcher.find()) { counterPartitionKeyRangeId.incrementAndGet(); } pattern = Pattern.compile("pkrId:"); matcher = pattern.matcher(cosmosDiagnosticsString); while (matcher.find()) { counterPkRid.incrementAndGet(); } return Flux.just(feedResponse); }).blockLast(); assertThat(counterPkRid.get() * 2).isEqualTo(counterPartitionKeyRangeId.get()); deleteCollection(testcontainer); }
assertThat(counterPkRid.get() * 2).isEqualTo(counterPartitionKeyRangeId.get());
public void queryDiagnosticsOnOrderBy() { String containerId = "testcontainer"; cosmosAsyncDatabase.createContainer(containerId, "/mypk", ThroughputProperties.createManualThroughput(40000)).block(); CosmosAsyncContainer testcontainer = cosmosAsyncDatabase.getContainer(containerId); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setConsistencyLevel(ConsistencyLevel.EVENTUAL); testcontainer.createItem(getInternalObjectNode()).block(); options.setMaxDegreeOfParallelism(-1); String query = "SELECT * from c ORDER BY c._ts DESC"; CosmosPagedFlux<InternalObjectNode> cosmosPagedFlux = testcontainer.queryItems(query, options, InternalObjectNode.class); Set<String> partitionKeyRangeIds = new HashSet<>(); Set<String> pkRids = new HashSet<>(); cosmosPagedFlux.byPage().flatMap(feedResponse -> { String cosmosDiagnosticsString = feedResponse.getCosmosDiagnostics().toString(); Pattern pattern = Pattern.compile("(\"partitionKeyRangeId\":\")(\\d)"); Matcher matcher = pattern.matcher(cosmosDiagnosticsString); while (matcher.find()) { String group = matcher.group(2); partitionKeyRangeIds.add(group); } pattern = Pattern.compile("(pkrId:)(\\d)"); matcher = pattern.matcher(cosmosDiagnosticsString); while (matcher.find()) { String group = matcher.group(2); pkRids.add(group); } return Flux.just(feedResponse); }).blockLast(); assertThat(pkRids).isNotEmpty(); assertThat(pkRids).isEqualTo(partitionKeyRangeIds); deleteCollection(testcontainer); }
class CosmosDiagnosticsTest extends TestSuiteBase { private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private static final DateTimeFormatter RESPONSE_TIME_FORMATTER = DateTimeFormatter.ISO_INSTANT; private CosmosClient gatewayClient; private CosmosClient directClient; private CosmosAsyncDatabase cosmosAsyncDatabase; private CosmosContainer container; private CosmosAsyncContainer cosmosAsyncContainer; @BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT) public void beforeClass() { assertThat(this.gatewayClient).isNull(); gatewayClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .gatewayMode() .buildClient(); directClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); cosmosAsyncContainer = getSharedMultiPartitionCosmosContainer(this.gatewayClient.asyncClient()); cosmosAsyncDatabase = directClient.asyncClient().getDatabase(cosmosAsyncContainer.getDatabase().getId()); container = gatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); } @AfterClass(groups = {"simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { if (this.gatewayClient != null) { this.gatewayClient.close(); } if (this.directClient != null) { this.directClient.close(); } } @DataProvider(name = "query") private Object[][] query() { return new Object[][]{ new Object[] { "Select * from c where c.id = 'wrongId'", true }, new Object[] { "Select top 1 * from c where c.id = 'wrongId'", true }, new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", true }, new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", true }, new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", true }, new Object[] { "Select * from c where c.id = 'wrongId'", false }, new Object[] { "Select top 1 * from c where c.id = 'wrongId'", false }, new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", false }, new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", false }, new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", false }, new Object[] { "Select * from c where c.id = 'wrongId'", false }, new Object[] { "Select top 1 * from c where c.id = 'wrongId'", false }, new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", false }, new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", false }, new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", false }, }; } @DataProvider(name = "readAllItemsOfLogicalPartition") private Object[][] readAllItemsOfLogicalPartition() { return new Object[][]{ new Object[] { 1, true }, new Object[] { 5, null }, new Object[] { 20, null }, new Object[] { 1, false }, new Object[] { 5, false }, new Object[] { 20, false }, }; } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void gatewayDiagnostics() throws Exception { CosmosClient testGatewayClient = null; try { testGatewayClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .gatewayMode() .buildClient(); CosmosContainer container = testGatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = container.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\""); assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null")); assertThat(diagnostics).contains("\"operationType\":\"Create\""); assertThat(diagnostics).contains("\"metaDataName\":\"CONTAINER_LOOK_UP\""); assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(createResponse.getDiagnostics().getDuration()).isNotNull(); assertThat(createResponse.getDiagnostics().getContactedRegionNames()).isNotNull(); validateTransportRequestTimelineGateway(diagnostics); validateRegionContacted(createResponse.getDiagnostics(), testGatewayClient.asyncClient()); isValidJSON(diagnostics); } finally { if (testGatewayClient != null) { testGatewayClient.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void gatewayDiagnosticsOnException() throws Exception { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = null; try { createResponse = this.container.createItem(internalObjectNode); CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions(); ModelBridgeInternal.setPartitionKey(cosmosItemRequestOptions, new PartitionKey("wrongPartitionKey")); CosmosItemResponse<InternalObjectNode> readResponse = this.container.readItem(BridgeInternal.getProperties(createResponse).getId(), new PartitionKey("wrongPartitionKey"), InternalObjectNode.class); fail("request should fail as partition key is wrong"); } catch (CosmosException exception) { isValidJSON(exception.toString()); isValidJSON(exception.getMessage()); String diagnostics = exception.getDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\""); assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null")); assertThat(diagnostics).contains("\"statusCode\":404"); assertThat(diagnostics).contains("\"operationType\":\"Read\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null")); assertThat(createResponse.getDiagnostics().getContactedRegionNames()).isNotNull(); validateRegionContacted(createResponse.getDiagnostics(), this.container.asyncContainer.getDatabase().getClient()); assertThat(exception.getDiagnostics().getDuration()).isNotNull(); validateTransportRequestTimelineGateway(diagnostics); isValidJSON(diagnostics); } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void systemDiagnosticsForSystemStateInformation() { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = this.container.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("systemInformation"); assertThat(diagnostics).contains("usedMemory"); assertThat(diagnostics).contains("availableMemory"); assertThat(diagnostics).contains("systemCpuLoad"); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(createResponse.getDiagnostics().getDuration()).isNotNull(); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void directDiagnostics() throws Exception { CosmosClient testDirectClient = null; try { testDirectClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); CosmosContainer cosmosContainer = testDirectClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).contains("supplementalResponseStatisticsList"); assertThat(diagnostics).contains("\"gatewayStatistics\":null"); assertThat(diagnostics).contains("addressResolutionStatistics"); assertThat(diagnostics).contains("\"metaDataName\":\"CONTAINER_LOOK_UP\""); assertThat(diagnostics).contains("\"metaDataName\":\"PARTITION_KEY_RANGE_LOOK_UP\""); assertThat(diagnostics).contains("\"metaDataName\":\"SERVER_ADDRESS_LOOKUP\""); assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(diagnostics).contains("\"backendLatencyInMs\""); assertThat(createResponse.getDiagnostics().getContactedRegionNames()).isNotEmpty(); assertThat(createResponse.getDiagnostics().getDuration()).isNotNull(); validateTransportRequestTimelineDirect(diagnostics); validateRegionContacted(createResponse.getDiagnostics(), testDirectClient.asyncClient()); isValidJSON(diagnostics); try { cosmosContainer.createItem(internalObjectNode); fail("expected 409"); } catch (CosmosException e) { diagnostics = e.getDiagnostics().toString(); assertThat(diagnostics).contains("\"backendLatencyInMs\""); validateTransportRequestTimelineDirect(e.getDiagnostics().toString()); } } finally { if (testDirectClient != null) { testDirectClient.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryPlanDiagnostics() throws JsonProcessingException { CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); List<String> itemIdList = new ArrayList<>(); for(int i = 0; i< 100; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); if(i%20 == 0) { itemIdList.add(internalObjectNode.getId()); } } String queryDiagnostics = null; List<String> queryList = new ArrayList<>(); queryList.add("Select * from c"); StringBuilder queryBuilder = new StringBuilder("SELECT * from c where c.mypk in ("); for(int i = 0 ; i < itemIdList.size(); i++){ queryBuilder.append("'").append(itemIdList.get(i)).append("'"); if(i < (itemIdList.size()-1)) { queryBuilder.append(","); } else { queryBuilder.append(")"); } } queryList.add(queryBuilder.toString()); queryList.add("Select * from c where c.id = 'wrongId'"); for(String query : queryList) { int feedResponseCounter = 0; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setQueryMetricsEnabled(true); Iterator<FeedResponse<InternalObjectNode>> iterator = cosmosContainer.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); if (feedResponseCounter == 0) { assertThat(queryDiagnostics).contains("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan Duration (ms)="); String requestTimeLine = OBJECT_MAPPER.writeValueAsString(feedResponse.getCosmosDiagnostics().getFeedResponseDiagnostics().getQueryPlanDiagnosticsContext().getRequestTimeline()); assertThat(requestTimeLine).contains("connectionConfigured"); assertThat(requestTimeLine).contains("requestSent"); assertThat(requestTimeLine).contains("transitTime"); assertThat(requestTimeLine).contains("received"); } else { assertThat(queryDiagnostics).doesNotContain("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan Duration (ms)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan RequestTimeline ="); } feedResponseCounter++; } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryMetricsWithIndexMetrics() { CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); List<String> itemIdList = new ArrayList<>(); for(int i = 0; i< 100; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); if(i%20 == 0) { itemIdList.add(internalObjectNode.getId()); } } String queryDiagnostics = null; List<String> queryList = new ArrayList<>(); StringBuilder queryBuilder = new StringBuilder("SELECT * from c where c.mypk in ("); for(int i = 0 ; i < itemIdList.size(); i++){ queryBuilder.append("'").append(itemIdList.get(i)).append("'"); if(i < (itemIdList.size()-1)) { queryBuilder.append(","); } else { queryBuilder.append(")"); } } queryList.add(queryBuilder.toString()); for (String query : queryList) { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setQueryMetricsEnabled(true); options.setIndexMetricsEnabled(true); Iterator<FeedResponse<InternalObjectNode>> iterator = cosmosContainer.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); logger.info("This is query diagnostics {}", queryDiagnostics); if (feedResponse.getResponseHeaders().containsKey(HttpConstants.HttpHeaders.INDEX_UTILIZATION)) { assertThat(feedResponse.getResponseHeaders().get(HttpConstants.HttpHeaders.INDEX_UTILIZATION)).isNotNull(); assertThat(createFromJSONString(Utils.decodeBase64String(feedResponse.getResponseHeaders().get(HttpConstants.HttpHeaders.INDEX_UTILIZATION))).getUtilizedSingleIndexes()).isNotNull(); } } } } @Test(groups = {"simple"}, dataProvider = "query", timeOut = TIMEOUT) public void queryMetrics(String query, Boolean qmEnabled) { CosmosContainer directContainer = this.directClient.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()) .getContainer(this.cosmosAsyncContainer.getId()); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); if (qmEnabled != null) { options.setQueryMetricsEnabled(qmEnabled); } boolean qroupByFirstResponse = true; Iterator<FeedResponse<InternalObjectNode>> iterator = directContainer.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator(); assertThat(iterator.hasNext()).isTrue(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); assertThat(feedResponse.getResults().size()).isEqualTo(0); if (!query.contains("group by") || qroupByFirstResponse) { validateQueryDiagnostics(queryDiagnostics, qmEnabled, true); validateDirectModeQueryDiagnostics(queryDiagnostics); if (query.contains("group by")) { qroupByFirstResponse = false; } } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) private void validateDirectModeQueryDiagnostics(String diagnostics) { assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).contains("supplementalResponseStatisticsList"); assertThat(diagnostics).contains("responseStatisticsList"); assertThat(diagnostics).contains("\"gatewayStatistics\":null"); assertThat(diagnostics).contains("addressResolutionStatistics"); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); } private void validateGatewayModeQueryDiagnostics(String diagnostics) { assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\""); assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null")); assertThat(diagnostics).contains("\"operationType\":\"Query\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(diagnostics).contains("\"regionsContacted\""); } @Test(groups = {"simple"}, dataProvider = "query", timeOut = TIMEOUT*2) public void queryDiagnosticsGatewayMode(String query, Boolean qmEnabled) { CosmosClient testDirectClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .gatewayMode() .buildClient(); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); CosmosContainer cosmosContainer = testDirectClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()) .getContainer(cosmosAsyncContainer.getId()); List<String> itemIdList = new ArrayList<>(); for (int i = 0; i < 100; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); if (i % 20 == 0) { itemIdList.add(internalObjectNode.getId()); } } boolean qroupByFirstResponse = true; if (qmEnabled != null) { options.setQueryMetricsEnabled(qmEnabled); } Iterator<FeedResponse<InternalObjectNode>> iterator = cosmosContainer .queryItems(query, options, InternalObjectNode.class) .iterableByPage() .iterator(); assertThat(iterator.hasNext()).isTrue(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); assertThat(feedResponse.getResults().size()).isEqualTo(0); if (!query.contains("group by") || qroupByFirstResponse) { validateQueryDiagnostics(queryDiagnostics, qmEnabled, true); validateGatewayModeQueryDiagnostics(queryDiagnostics); if (query.contains("group by")) { qroupByFirstResponse = false; } } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryMetricsWithADifferentLocale() { Locale.setDefault(Locale.GERMAN); String query = "select * from root where root.id= \"someid\""; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); Iterator<FeedResponse<InternalObjectNode>> iterator = this.container.queryItems(query, options, InternalObjectNode.class) .iterableByPage().iterator(); double requestCharge = 0; while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); requestCharge += feedResponse.getRequestCharge(); } assertThat(requestCharge).isGreaterThan(0); Locale.setDefault(Locale.ROOT); } private static void validateQueryDiagnostics( String queryDiagnostics, Boolean qmEnabled, boolean expectQueryPlanDiagnostics) { if (qmEnabled == null || qmEnabled) { assertThat(queryDiagnostics).contains("Retrieved Document Count"); assertThat(queryDiagnostics).contains("Query Preparation Times"); assertThat(queryDiagnostics).contains("Runtime Execution Times"); assertThat(queryDiagnostics).contains("Partition Execution Timeline"); } else { assertThat(queryDiagnostics).doesNotContain("Retrieved Document Count"); assertThat(queryDiagnostics).doesNotContain("Query Preparation Times"); assertThat(queryDiagnostics).doesNotContain("Runtime Execution Times"); assertThat(queryDiagnostics).doesNotContain("Partition Execution Timeline"); } if (expectQueryPlanDiagnostics) { assertThat(queryDiagnostics).contains("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan Duration (ms)="); } else { assertThat(queryDiagnostics).doesNotContain("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan Duration (ms)="); } } @Test(groups = {"simple"}, dataProvider = "readAllItemsOfLogicalPartition", timeOut = TIMEOUT) public void queryMetricsForReadAllItemsOfLogicalPartition(Integer expectedItemCount, Boolean qmEnabled) { String pkValue = UUID.randomUUID().toString(); for (int i = 0; i < expectedItemCount; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(pkValue); CosmosItemResponse<InternalObjectNode> createResponse = container.createItem(internalObjectNode); } CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); if (qmEnabled != null) { options = options.setQueryMetricsEnabled(qmEnabled); } ModelBridgeInternal.setQueryRequestOptionsMaxItemCount(options, 5); Iterator<FeedResponse<InternalObjectNode>> iterator = this.container .readAllItems( new PartitionKey(pkValue), options, InternalObjectNode.class) .iterableByPage().iterator(); assertThat(iterator.hasNext()).isTrue(); int actualItemCount = 0; while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); actualItemCount += feedResponse.getResults().size(); validateQueryDiagnostics(queryDiagnostics, qmEnabled, false); } assertThat(actualItemCount).isEqualTo(expectedItemCount); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void directDiagnosticsOnException() throws Exception { CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = null; CosmosClient client = null; try { client = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); CosmosContainer container = client.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); createResponse = container.createItem(internalObjectNode); CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions(); ModelBridgeInternal.setPartitionKey(cosmosItemRequestOptions, new PartitionKey("wrongPartitionKey")); CosmosItemResponse<InternalObjectNode> readResponse = cosmosContainer.readItem(BridgeInternal.getProperties(createResponse).getId(), new PartitionKey("wrongPartitionKey"), InternalObjectNode.class); fail("request should fail as partition key is wrong"); } catch (CosmosException exception) { isValidJSON(exception.toString()); isValidJSON(exception.getMessage()); String diagnostics = exception.getDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null")); assertThat(exception.getDiagnostics().getContactedRegionNames()).isNotEmpty(); assertThat(exception.getDiagnostics().getDuration()).isNotNull(); assertThat(diagnostics).contains("\"backendLatencyInMs\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); isValidJSON(diagnostics); validateTransportRequestTimelineDirect(diagnostics); validateRegionContacted(createResponse.getDiagnostics(), client.asyncClient()); } finally { if (client != null) { client.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void directDiagnosticsOnMetadataException() { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosClient client = null; try { client = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); CosmosContainer container = client.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); HttpClient mockHttpClient = Mockito.mock(HttpClient.class); Mockito.when(mockHttpClient.send(Mockito.any(HttpRequest.class), Mockito.any(Duration.class))) .thenReturn(Mono.error(new CosmosException(400, "TestBadRequest"))); RxStoreModel rxGatewayStoreModel = rxGatewayStoreModel = ReflectionUtils.getGatewayProxy((RxDocumentClientImpl) client.asyncClient().getDocClientWrapper()); ReflectionUtils.setGatewayHttpClient(rxGatewayStoreModel, mockHttpClient); container.createItem(internalObjectNode); fail("request should fail as bad request"); } catch (CosmosException exception) { isValidJSON(exception.toString()); isValidJSON(exception.getMessage()); String diagnostics = exception.getDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.BADREQUEST); assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null")); assertThat(diagnostics).contains("\"resourceType\":\"DocumentCollection\""); assertThat(exception.getDiagnostics().getContactedRegionNames()).isNotEmpty(); assertThat(exception.getDiagnostics().getDuration()).isNotNull(); isValidJSON(diagnostics); } finally { if (client != null) { client.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void supplementalResponseStatisticsList() throws Exception { ClientSideRequestStatistics clientSideRequestStatistics = new ClientSideRequestStatistics(mockDiagnosticsClientContext(),null); for (int i = 0; i < 15; i++) { RxDocumentServiceRequest rxDocumentServiceRequest = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), OperationType.Head, ResourceType.Document); clientSideRequestStatistics.recordResponse(rxDocumentServiceRequest, null); } List<ClientSideRequestStatistics.StoreResponseStatistics> storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics); ObjectMapper objectMapper = new ObjectMapper(); String diagnostics = objectMapper.writeValueAsString(clientSideRequestStatistics); JsonNode jsonNode = objectMapper.readTree(diagnostics); ArrayNode supplementalResponseStatisticsListNode = (ArrayNode) jsonNode.get("supplementalResponseStatisticsList"); assertThat(storeResponseStatistics.size()).isEqualTo(15); assertThat(supplementalResponseStatisticsListNode.size()).isEqualTo(10); clearStoreResponseStatistics(clientSideRequestStatistics); storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics); assertThat(storeResponseStatistics.size()).isEqualTo(0); for (int i = 0; i < 7; i++) { RxDocumentServiceRequest rxDocumentServiceRequest = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), OperationType.Head, ResourceType.Document); clientSideRequestStatistics.recordResponse(rxDocumentServiceRequest, null); } storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics); objectMapper = new ObjectMapper(); diagnostics = objectMapper.writeValueAsString(clientSideRequestStatistics); jsonNode = objectMapper.readTree(diagnostics); supplementalResponseStatisticsListNode = (ArrayNode) jsonNode.get("supplementalResponseStatisticsList"); assertThat(storeResponseStatistics.size()).isEqualTo(7); assertThat(supplementalResponseStatisticsListNode.size()).isEqualTo(7); for(JsonNode node : supplementalResponseStatisticsListNode) { assertThat(node.get("storeResult").asText()).isNotNull(); String requestResponseTimeUTC = node.get("requestResponseTimeUTC").asText(); Instant instant = Instant.from(RESPONSE_TIME_FORMATTER.parse(requestResponseTimeUTC)); assertThat(Instant.now().toEpochMilli() - instant.toEpochMilli()).isLessThan(5000); assertThat(node.get("requestResponseTimeUTC")).isNotNull(); assertThat(node.get("requestOperationType")).isNotNull(); assertThat(node.get("requestOperationType")).isNotNull(); } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void serializationOnVariousScenarios() { CosmosDatabaseResponse cosmosDatabase = gatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).read(); String diagnostics = cosmosDatabase.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"DATABASE_DESERIALIZATION\""); CosmosContainerResponse containerResponse = this.container.read(); diagnostics = containerResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"CONTAINER_DESERIALIZATION\""); TestItem testItem = new TestItem(); testItem.id = "TestId"; testItem.mypk = "TestPk"; CosmosItemResponse<TestItem> itemResponse = this.container.createItem(testItem); diagnostics = itemResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); testItem.id = "TestId2"; testItem.mypk = "TestPk"; itemResponse = this.container.createItem(testItem, new PartitionKey("TestPk"), null); diagnostics = itemResponse.getDiagnostics().toString(); assertThat(diagnostics).doesNotContain("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); assertThat(diagnostics).doesNotContain("\"serializationType\":\"ITEM_DESERIALIZATION\""); TestItem readTestItem = itemResponse.getItem(); diagnostics = itemResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"ITEM_DESERIALIZATION\""); CosmosItemResponse<InternalObjectNode> readItemResponse = this.container.readItem(testItem.id, new PartitionKey(testItem.mypk), null, InternalObjectNode.class); InternalObjectNode properties = readItemResponse.getItem(); diagnostics = readItemResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"ITEM_DESERIALIZATION\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void rntbdRequestResponseLengthStatistics() throws Exception { TestItem testItem = new TestItem(); testItem.id = UUID.randomUUID().toString(); testItem.mypk = UUID.randomUUID().toString(); int testItemLength = OBJECT_MAPPER.writeValueAsBytes(testItem).length; CosmosContainer container = directClient.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()).getContainer(this.cosmosAsyncContainer.getId()); CosmosItemResponse<TestItem> createItemResponse = container.createItem(testItem); validate(createItemResponse.getDiagnostics(), testItemLength, ModelBridgeInternal.getPayloadLength(createItemResponse)); try { container.createItem(testItem); fail("expected to fail due to 409"); } catch (CosmosException e) { validate(e.getDiagnostics(), testItemLength, 0); } CosmosItemResponse<TestItem> readItemResponse = container.readItem(testItem.id, new PartitionKey(testItem.mypk), TestItem.class); validate(readItemResponse.getDiagnostics(), 0, ModelBridgeInternal.getPayloadLength(readItemResponse)); CosmosItemResponse<Object> deleteItemResponse = container.deleteItem(testItem, null); validate(deleteItemResponse.getDiagnostics(), 0, 0); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void rntbdStatistics() throws Exception { Instant beforeClientInitialization = Instant.now(); CosmosClient client1 = null; try { client1 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .directMode() .buildClient(); TestItem testItem = new TestItem(); testItem.id = UUID.randomUUID().toString(); testItem.mypk = UUID.randomUUID().toString(); int testItemLength = OBJECT_MAPPER.writeValueAsBytes(testItem).length; CosmosContainer container = client1.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()).getContainer(this.cosmosAsyncContainer.getId()); Thread.sleep(1000); Instant beforeInitializingRntbdServiceEndpoint = Instant.now(); CosmosItemResponse<TestItem> operation1Response = container.upsertItem(testItem); Instant afterInitializingRntbdServiceEndpoint = Instant.now(); Thread.sleep(1000); Instant beforeOperation2 = Instant.now(); CosmosItemResponse<TestItem> operation2Response = container.upsertItem(testItem); Instant afterOperation2 = Instant.now(); Thread.sleep(1000); Instant beforeOperation3 = Instant.now(); CosmosItemResponse<TestItem> operation3Response = container.upsertItem(testItem); Instant afterOperation3 = Instant.now(); validateRntbdStatistics(operation3Response.getDiagnostics(), beforeClientInitialization, beforeInitializingRntbdServiceEndpoint, afterInitializingRntbdServiceEndpoint, beforeOperation2, afterOperation2, beforeOperation3, afterOperation3); CosmosItemResponse<TestItem> readItemResponse = container.readItem(testItem.id, new PartitionKey(testItem.mypk), TestItem.class); validate(readItemResponse.getDiagnostics(), 0, ModelBridgeInternal.getPayloadLength(readItemResponse)); CosmosItemResponse<Object> deleteItemResponse = container.deleteItem(testItem, null); validate(deleteItemResponse.getDiagnostics(), 0, 0); } finally { LifeCycleUtils.closeQuietly(client1); } } private void validateRntbdStatistics(CosmosDiagnostics cosmosDiagnostics, Instant clientInitializationTime, Instant beforeInitializingRntbdServiceEndpoint, Instant afterInitializingRntbdServiceEndpoint, Instant beforeOperation2, Instant afterOperation2, Instant beforeOperation3, Instant afterOperation3) throws Exception { ObjectNode diagnostics = (ObjectNode) OBJECT_MAPPER.readTree(cosmosDiagnostics.toString()); JsonNode responseStatisticsList = diagnostics.get("responseStatisticsList"); assertThat(responseStatisticsList.isArray()).isTrue(); assertThat(responseStatisticsList.size()).isGreaterThan(0); JsonNode storeResult = responseStatisticsList.get(0).get("storeResult"); assertThat(storeResult).isNotNull(); boolean hasPayload = storeResult.get("exception").isNull(); assertThat(storeResult.get("channelTaskQueueSize").asInt(-1)).isGreaterThan(0); assertThat(storeResult.get("pendingRequestsCount").asInt(-1)).isGreaterThanOrEqualTo(0); JsonNode serviceEndpointStatistics = storeResult.get("serviceEndpointStatistics"); assertThat(serviceEndpointStatistics).isNotNull(); assertThat(serviceEndpointStatistics.get("availableChannels").asInt(-1)).isGreaterThan(0); assertThat(serviceEndpointStatistics.get("acquiredChannels").asInt(-1)).isEqualTo(0); assertThat(serviceEndpointStatistics.get("inflightRequests").asInt(-1)).isEqualTo(1); assertThat(serviceEndpointStatistics.get("isClosed").asBoolean()).isEqualTo(false); Instant beforeInitializationThreshold = beforeInitializingRntbdServiceEndpoint.minusMillis(1); assertThat(Instant.parse(serviceEndpointStatistics.get("createdTime").asText())) .isAfterOrEqualTo(beforeInitializationThreshold); Instant afterInitializationThreshold = afterInitializingRntbdServiceEndpoint.plusMillis(2); assertThat(Instant.parse(serviceEndpointStatistics.get("createdTime").asText())) .isBeforeOrEqualTo(afterInitializationThreshold); Instant afterOperation2Threshold = afterOperation2.plusMillis(2); Instant beforeOperation2Threshold = beforeOperation2.minusMillis(2); assertThat(Instant.parse(serviceEndpointStatistics.get("lastRequestTime").asText())) .isAfterOrEqualTo(beforeOperation2Threshold) .isBeforeOrEqualTo(afterOperation2Threshold); assertThat(Instant.parse(serviceEndpointStatistics.get("lastSuccessfulRequestTime").asText())) .isAfterOrEqualTo(beforeOperation2Threshold) .isBeforeOrEqualTo(afterOperation2Threshold); } private void validate(CosmosDiagnostics cosmosDiagnostics, int expectedRequestPayloadSize, int expectedResponsePayloadSize) throws Exception { ObjectNode diagnostics = (ObjectNode) OBJECT_MAPPER.readTree(cosmosDiagnostics.toString()); JsonNode responseStatisticsList = diagnostics.get("responseStatisticsList"); assertThat(responseStatisticsList.isArray()).isTrue(); assertThat(responseStatisticsList.size()).isGreaterThan(0); JsonNode storeResult = responseStatisticsList.get(0).get("storeResult"); boolean hasPayload = storeResult.get("exception").isNull(); assertThat(storeResult).isNotNull(); assertThat(storeResult.get("rntbdRequestLengthInBytes").asInt(-1)).isGreaterThan(expectedRequestPayloadSize); assertThat(storeResult.get("rntbdRequestLengthInBytes").asInt(-1)).isGreaterThan(expectedRequestPayloadSize); assertThat(storeResult.get("requestPayloadLengthInBytes").asInt(-1)).isEqualTo(expectedRequestPayloadSize); if (hasPayload) { assertThat(storeResult.get("responsePayloadLengthInBytes").asInt(-1)).isEqualTo(expectedResponsePayloadSize); } assertThat(storeResult.get("rntbdResponseLengthInBytes").asInt(-1)).isGreaterThan(expectedResponsePayloadSize); } @Test(groups = {"emulator"}, timeOut = TIMEOUT) public void addressResolutionStatistics() { CosmosClient client1 = null; CosmosClient client2 = null; String databaseId = DatabaseForTest.generateId(); String containerId = UUID.randomUUID().toString(); CosmosDatabase cosmosDatabase = null; CosmosContainer cosmosContainer = null; try { client1 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); client1.createDatabase(databaseId); cosmosDatabase = client1.getDatabase(databaseId); cosmosDatabase.createContainer(containerId, "/mypk"); InternalObjectNode internalObjectNode = getInternalObjectNode(); cosmosContainer = cosmosDatabase.getContainer(containerId); CosmosItemResponse<InternalObjectNode> writeResourceResponse = cosmosContainer.createItem(internalObjectNode); assertThat(writeResourceResponse.getDiagnostics().toString()).contains("addressResolutionStatistics"); assertThat(writeResourceResponse.getDiagnostics().toString()).contains("\"inflightRequest\":false"); assertThat(writeResourceResponse.getDiagnostics().toString()).doesNotContain("endTime=\"null\""); assertThat(writeResourceResponse.getDiagnostics().toString()).contains("\"errorMessage\":null"); assertThat(writeResourceResponse.getDiagnostics().toString()).doesNotContain("\"errorMessage\":\"io.netty" + ".channel.AbstractChannel$AnnotatedConnectException: Connection refused: no further information"); client2 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); cosmosDatabase = client2.getDatabase(databaseId); cosmosContainer = cosmosDatabase.getContainer(containerId); AsyncDocumentClient asyncDocumentClient = client2.asyncClient().getContextClient(); GlobalAddressResolver addressResolver = (GlobalAddressResolver) FieldUtils.readField(asyncDocumentClient, "addressResolver", true); @SuppressWarnings("rawtypes") Map addressCacheByEndpoint = (Map) FieldUtils.readField(addressResolver, "addressCacheByEndpoint", true); Object endpointCache = addressCacheByEndpoint.values().toArray()[0]; GatewayAddressCache addressCache = (GatewayAddressCache) FieldUtils.readField(endpointCache, "addressCache", true); HttpClient httpClient = httpClient(true); FieldUtils.writeField(addressCache, "httpClient", httpClient, true); new Thread(() -> { try { Thread.sleep(5000); HttpClient httpClient1 = httpClient(false); FieldUtils.writeField(addressCache, "httpClient", httpClient1, true); } catch (Exception e) { fail(e.getMessage()); } }).start(); PartitionKey partitionKey = new PartitionKey(internalObjectNode.get("mypk")); CosmosItemResponse<InternalObjectNode> readResourceResponse = cosmosContainer.readItem(internalObjectNode.getId(), partitionKey, new CosmosItemRequestOptions(), InternalObjectNode.class); assertThat(readResourceResponse.getDiagnostics().toString()).contains("addressResolutionStatistics"); assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"inflightRequest\":false"); assertThat(readResourceResponse.getDiagnostics().toString()).doesNotContain("endTime=\"null\""); assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"errorMessage\":null"); assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"errorMessage\":\"io.netty" + ".channel.AbstractChannel$AnnotatedConnectException: Connection refused"); } catch (Exception ex) { logger.error("Error in test addressResolutionStatistics", ex); fail("This test should not throw exception " + ex); } finally { safeDeleteSyncDatabase(cosmosDatabase); if (client1 != null) { client1.close(); } if (client2 != null) { client2.close(); } } } private InternalObjectNode getInternalObjectNode() { InternalObjectNode internalObjectNode = new InternalObjectNode(); String uuid = UUID.randomUUID().toString(); internalObjectNode.setId(uuid); BridgeInternal.setProperty(internalObjectNode, "mypk", uuid); return internalObjectNode; } private InternalObjectNode getInternalObjectNode(String pkValue) { InternalObjectNode internalObjectNode = new InternalObjectNode(); String uuid = UUID.randomUUID().toString(); internalObjectNode.setId(uuid); BridgeInternal.setProperty(internalObjectNode, "mypk", pkValue); return internalObjectNode; } private List<ClientSideRequestStatistics.StoreResponseStatistics> getStoreResponseStatistics(ClientSideRequestStatistics requestStatistics) throws Exception { Field storeResponseStatisticsField = ClientSideRequestStatistics.class.getDeclaredField("supplementalResponseStatisticsList"); storeResponseStatisticsField.setAccessible(true); @SuppressWarnings({"unchecked"}) List<ClientSideRequestStatistics.StoreResponseStatistics> list = (List<ClientSideRequestStatistics.StoreResponseStatistics>) storeResponseStatisticsField.get(requestStatistics); return list; } private void clearStoreResponseStatistics(ClientSideRequestStatistics requestStatistics) throws Exception { Field storeResponseStatisticsField = ClientSideRequestStatistics.class.getDeclaredField("supplementalResponseStatisticsList"); storeResponseStatisticsField.setAccessible(true); storeResponseStatisticsField.set(requestStatistics, new ArrayList<ClientSideRequestStatistics.StoreResponseStatistics>()); } private void validateTransportRequestTimelineGateway(String diagnostics) { assertThat(diagnostics).contains("\"eventName\":\"connectionConfigured\""); assertThat(diagnostics).contains("\"eventName\":\"connectionConfigured\""); assertThat(diagnostics).contains("\"eventName\":\"requestSent\""); assertThat(diagnostics).contains("\"eventName\":\"transitTime\""); assertThat(diagnostics).contains("\"eventName\":\"received\""); } private void validateTransportRequestTimelineDirect(String diagnostics) { assertThat(diagnostics).contains("\"eventName\":\"created\""); assertThat(diagnostics).contains("\"eventName\":\"queued\""); assertThat(diagnostics).contains("\"eventName\":\"channelAcquisitionStarted\""); assertThat(diagnostics).contains("\"eventName\":\"pipelined\""); assertThat(diagnostics).contains("\"eventName\":\"transitTime\""); assertThat(diagnostics).contains("\"eventName\":\"received\""); assertThat(diagnostics).contains("\"eventName\":\"completed\""); assertThat(diagnostics).contains("\"startTimeUTC\""); assertThat(diagnostics).contains("\"durationInMicroSec\""); } public void isValidJSON(final String json) { try { final JsonParser parser = new ObjectMapper().createParser(json); while (parser.nextToken() != null) { } } catch (IOException ex) { fail("Diagnostic string is not in json format ", ex); } } private HttpClient httpClient(boolean fakeProxy) { HttpClientConfig httpClientConfig; if(fakeProxy) { httpClientConfig = new HttpClientConfig(new Configs()) .withProxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress("localhost", 8888))); } else { httpClientConfig = new HttpClientConfig(new Configs()); } return HttpClient.createFixed(httpClientConfig); } private IndexUtilizationInfo createFromJSONString(String jsonString) { ObjectMapper indexUtilizationInfoObjectMapper = new ObjectMapper(); IndexUtilizationInfo indexUtilizationInfo = null; try { indexUtilizationInfo = indexUtilizationInfoObjectMapper.readValue(jsonString, IndexUtilizationInfo.class); } catch (JsonProcessingException e) { logger.error("Json not correctly formed ", e); } return indexUtilizationInfo; } private void validateRegionContacted(CosmosDiagnostics cosmosDiagnostics, CosmosAsyncClient cosmosAsyncClient) throws Exception { RxDocumentClientImpl rxDocumentClient = (RxDocumentClientImpl) ReflectionUtils.getAsyncDocumentClient(cosmosAsyncClient); GlobalEndpointManager globalEndpointManager = ReflectionUtils.getGlobalEndpointManager(rxDocumentClient); LocationCache locationCache = ReflectionUtils.getLocationCache(globalEndpointManager); Field locationInfoField = LocationCache.class.getDeclaredField("locationInfo"); locationInfoField.setAccessible(true); Object locationInfo = locationInfoField.get(locationCache); Class<?> DatabaseAccountLocationsInfoClass = Class.forName("com.azure.cosmos.implementation.routing" + ".LocationCache$DatabaseAccountLocationsInfo"); Field availableWriteEndpointByLocation = DatabaseAccountLocationsInfoClass.getDeclaredField( "availableWriteEndpointByLocation"); availableWriteEndpointByLocation.setAccessible(true); @SuppressWarnings("unchecked") Map<String, URI> map = (Map<String, URI>) availableWriteEndpointByLocation.get(locationInfo); String regionName = map.keySet().iterator().next(); assertThat(cosmosDiagnostics.getContactedRegionNames().size()).isEqualTo(1); assertThat(cosmosDiagnostics.getContactedRegionNames().iterator().next()).isEqualTo(regionName.toLowerCase()); } public static class TestItem { public String id; public String mypk; public TestItem() { } } }
class CosmosDiagnosticsTest extends TestSuiteBase { private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private static final DateTimeFormatter RESPONSE_TIME_FORMATTER = DateTimeFormatter.ISO_INSTANT; private CosmosClient gatewayClient; private CosmosClient directClient; private CosmosAsyncDatabase cosmosAsyncDatabase; private CosmosContainer container; private CosmosAsyncContainer cosmosAsyncContainer; @BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT) public void beforeClass() { assertThat(this.gatewayClient).isNull(); gatewayClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .gatewayMode() .buildClient(); directClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); cosmosAsyncContainer = getSharedMultiPartitionCosmosContainer(this.gatewayClient.asyncClient()); cosmosAsyncDatabase = directClient.asyncClient().getDatabase(cosmosAsyncContainer.getDatabase().getId()); container = gatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); } @AfterClass(groups = {"simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { if (this.gatewayClient != null) { this.gatewayClient.close(); } if (this.directClient != null) { this.directClient.close(); } } @DataProvider(name = "query") private Object[][] query() { return new Object[][]{ new Object[] { "Select * from c where c.id = 'wrongId'", true }, new Object[] { "Select top 1 * from c where c.id = 'wrongId'", true }, new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", true }, new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", true }, new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", true }, new Object[] { "Select * from c where c.id = 'wrongId'", false }, new Object[] { "Select top 1 * from c where c.id = 'wrongId'", false }, new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", false }, new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", false }, new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", false }, new Object[] { "Select * from c where c.id = 'wrongId'", false }, new Object[] { "Select top 1 * from c where c.id = 'wrongId'", false }, new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", false }, new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", false }, new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", false }, }; } @DataProvider(name = "readAllItemsOfLogicalPartition") private Object[][] readAllItemsOfLogicalPartition() { return new Object[][]{ new Object[] { 1, true }, new Object[] { 5, null }, new Object[] { 20, null }, new Object[] { 1, false }, new Object[] { 5, false }, new Object[] { 20, false }, }; } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void gatewayDiagnostics() throws Exception { CosmosClient testGatewayClient = null; try { testGatewayClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .gatewayMode() .buildClient(); CosmosContainer container = testGatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = container.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\""); assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null")); assertThat(diagnostics).contains("\"operationType\":\"Create\""); assertThat(diagnostics).contains("\"metaDataName\":\"CONTAINER_LOOK_UP\""); assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(createResponse.getDiagnostics().getDuration()).isNotNull(); assertThat(createResponse.getDiagnostics().getContactedRegionNames()).isNotNull(); validateTransportRequestTimelineGateway(diagnostics); validateRegionContacted(createResponse.getDiagnostics(), testGatewayClient.asyncClient()); isValidJSON(diagnostics); } finally { if (testGatewayClient != null) { testGatewayClient.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void gatewayDiagnosticsOnException() throws Exception { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = null; try { createResponse = this.container.createItem(internalObjectNode); CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions(); ModelBridgeInternal.setPartitionKey(cosmosItemRequestOptions, new PartitionKey("wrongPartitionKey")); CosmosItemResponse<InternalObjectNode> readResponse = this.container.readItem(BridgeInternal.getProperties(createResponse).getId(), new PartitionKey("wrongPartitionKey"), InternalObjectNode.class); fail("request should fail as partition key is wrong"); } catch (CosmosException exception) { isValidJSON(exception.toString()); isValidJSON(exception.getMessage()); String diagnostics = exception.getDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\""); assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null")); assertThat(diagnostics).contains("\"statusCode\":404"); assertThat(diagnostics).contains("\"operationType\":\"Read\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null")); assertThat(createResponse.getDiagnostics().getContactedRegionNames()).isNotNull(); validateRegionContacted(createResponse.getDiagnostics(), this.container.asyncContainer.getDatabase().getClient()); assertThat(exception.getDiagnostics().getDuration()).isNotNull(); validateTransportRequestTimelineGateway(diagnostics); isValidJSON(diagnostics); } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void systemDiagnosticsForSystemStateInformation() { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = this.container.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("systemInformation"); assertThat(diagnostics).contains("usedMemory"); assertThat(diagnostics).contains("availableMemory"); assertThat(diagnostics).contains("systemCpuLoad"); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(createResponse.getDiagnostics().getDuration()).isNotNull(); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void directDiagnostics() throws Exception { CosmosClient testDirectClient = null; try { testDirectClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); CosmosContainer cosmosContainer = testDirectClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).contains("supplementalResponseStatisticsList"); assertThat(diagnostics).contains("\"gatewayStatistics\":null"); assertThat(diagnostics).contains("addressResolutionStatistics"); assertThat(diagnostics).contains("\"metaDataName\":\"CONTAINER_LOOK_UP\""); assertThat(diagnostics).contains("\"metaDataName\":\"PARTITION_KEY_RANGE_LOOK_UP\""); assertThat(diagnostics).contains("\"metaDataName\":\"SERVER_ADDRESS_LOOKUP\""); assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(diagnostics).contains("\"backendLatencyInMs\""); assertThat(createResponse.getDiagnostics().getContactedRegionNames()).isNotEmpty(); assertThat(createResponse.getDiagnostics().getDuration()).isNotNull(); validateTransportRequestTimelineDirect(diagnostics); validateRegionContacted(createResponse.getDiagnostics(), testDirectClient.asyncClient()); isValidJSON(diagnostics); try { cosmosContainer.createItem(internalObjectNode); fail("expected 409"); } catch (CosmosException e) { diagnostics = e.getDiagnostics().toString(); assertThat(diagnostics).contains("\"backendLatencyInMs\""); validateTransportRequestTimelineDirect(e.getDiagnostics().toString()); } } finally { if (testDirectClient != null) { testDirectClient.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryPlanDiagnostics() throws JsonProcessingException { CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); List<String> itemIdList = new ArrayList<>(); for(int i = 0; i< 100; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); if(i%20 == 0) { itemIdList.add(internalObjectNode.getId()); } } String queryDiagnostics = null; List<String> queryList = new ArrayList<>(); queryList.add("Select * from c"); StringBuilder queryBuilder = new StringBuilder("SELECT * from c where c.mypk in ("); for(int i = 0 ; i < itemIdList.size(); i++){ queryBuilder.append("'").append(itemIdList.get(i)).append("'"); if(i < (itemIdList.size()-1)) { queryBuilder.append(","); } else { queryBuilder.append(")"); } } queryList.add(queryBuilder.toString()); queryList.add("Select * from c where c.id = 'wrongId'"); for(String query : queryList) { int feedResponseCounter = 0; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setQueryMetricsEnabled(true); Iterator<FeedResponse<InternalObjectNode>> iterator = cosmosContainer.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); if (feedResponseCounter == 0) { assertThat(queryDiagnostics).contains("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan Duration (ms)="); String requestTimeLine = OBJECT_MAPPER.writeValueAsString(feedResponse.getCosmosDiagnostics().getFeedResponseDiagnostics().getQueryPlanDiagnosticsContext().getRequestTimeline()); assertThat(requestTimeLine).contains("connectionConfigured"); assertThat(requestTimeLine).contains("requestSent"); assertThat(requestTimeLine).contains("transitTime"); assertThat(requestTimeLine).contains("received"); } else { assertThat(queryDiagnostics).doesNotContain("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan Duration (ms)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan RequestTimeline ="); } feedResponseCounter++; } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryMetricsWithIndexMetrics() { CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); List<String> itemIdList = new ArrayList<>(); for(int i = 0; i< 100; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); if(i%20 == 0) { itemIdList.add(internalObjectNode.getId()); } } String queryDiagnostics = null; List<String> queryList = new ArrayList<>(); StringBuilder queryBuilder = new StringBuilder("SELECT * from c where c.mypk in ("); for(int i = 0 ; i < itemIdList.size(); i++){ queryBuilder.append("'").append(itemIdList.get(i)).append("'"); if(i < (itemIdList.size()-1)) { queryBuilder.append(","); } else { queryBuilder.append(")"); } } queryList.add(queryBuilder.toString()); for (String query : queryList) { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setQueryMetricsEnabled(true); options.setIndexMetricsEnabled(true); Iterator<FeedResponse<InternalObjectNode>> iterator = cosmosContainer.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); logger.info("This is query diagnostics {}", queryDiagnostics); if (feedResponse.getResponseHeaders().containsKey(HttpConstants.HttpHeaders.INDEX_UTILIZATION)) { assertThat(feedResponse.getResponseHeaders().get(HttpConstants.HttpHeaders.INDEX_UTILIZATION)).isNotNull(); assertThat(createFromJSONString(Utils.decodeBase64String(feedResponse.getResponseHeaders().get(HttpConstants.HttpHeaders.INDEX_UTILIZATION))).getUtilizedSingleIndexes()).isNotNull(); } } } } @Test(groups = {"simple"}, dataProvider = "query", timeOut = TIMEOUT) public void queryMetrics(String query, Boolean qmEnabled) { CosmosContainer directContainer = this.directClient.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()) .getContainer(this.cosmosAsyncContainer.getId()); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); if (qmEnabled != null) { options.setQueryMetricsEnabled(qmEnabled); } boolean qroupByFirstResponse = true; Iterator<FeedResponse<InternalObjectNode>> iterator = directContainer.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator(); assertThat(iterator.hasNext()).isTrue(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); assertThat(feedResponse.getResults().size()).isEqualTo(0); if (!query.contains("group by") || qroupByFirstResponse) { validateQueryDiagnostics(queryDiagnostics, qmEnabled, true); validateDirectModeQueryDiagnostics(queryDiagnostics); if (query.contains("group by")) { qroupByFirstResponse = false; } } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) private void validateDirectModeQueryDiagnostics(String diagnostics) { assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).contains("supplementalResponseStatisticsList"); assertThat(diagnostics).contains("responseStatisticsList"); assertThat(diagnostics).contains("\"gatewayStatistics\":null"); assertThat(diagnostics).contains("addressResolutionStatistics"); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); } private void validateGatewayModeQueryDiagnostics(String diagnostics) { assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\""); assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null")); assertThat(diagnostics).contains("\"operationType\":\"Query\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(diagnostics).contains("\"regionsContacted\""); } @Test(groups = {"simple"}, dataProvider = "query", timeOut = TIMEOUT*2) public void queryDiagnosticsGatewayMode(String query, Boolean qmEnabled) { CosmosClient testDirectClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .gatewayMode() .buildClient(); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); CosmosContainer cosmosContainer = testDirectClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()) .getContainer(cosmosAsyncContainer.getId()); List<String> itemIdList = new ArrayList<>(); for (int i = 0; i < 100; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); if (i % 20 == 0) { itemIdList.add(internalObjectNode.getId()); } } boolean qroupByFirstResponse = true; if (qmEnabled != null) { options.setQueryMetricsEnabled(qmEnabled); } Iterator<FeedResponse<InternalObjectNode>> iterator = cosmosContainer .queryItems(query, options, InternalObjectNode.class) .iterableByPage() .iterator(); assertThat(iterator.hasNext()).isTrue(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); assertThat(feedResponse.getResults().size()).isEqualTo(0); if (!query.contains("group by") || qroupByFirstResponse) { validateQueryDiagnostics(queryDiagnostics, qmEnabled, true); validateGatewayModeQueryDiagnostics(queryDiagnostics); if (query.contains("group by")) { qroupByFirstResponse = false; } } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryMetricsWithADifferentLocale() { Locale.setDefault(Locale.GERMAN); String query = "select * from root where root.id= \"someid\""; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); Iterator<FeedResponse<InternalObjectNode>> iterator = this.container.queryItems(query, options, InternalObjectNode.class) .iterableByPage().iterator(); double requestCharge = 0; while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); requestCharge += feedResponse.getRequestCharge(); } assertThat(requestCharge).isGreaterThan(0); Locale.setDefault(Locale.ROOT); } private static void validateQueryDiagnostics( String queryDiagnostics, Boolean qmEnabled, boolean expectQueryPlanDiagnostics) { if (qmEnabled == null || qmEnabled) { assertThat(queryDiagnostics).contains("Retrieved Document Count"); assertThat(queryDiagnostics).contains("Query Preparation Times"); assertThat(queryDiagnostics).contains("Runtime Execution Times"); assertThat(queryDiagnostics).contains("Partition Execution Timeline"); } else { assertThat(queryDiagnostics).doesNotContain("Retrieved Document Count"); assertThat(queryDiagnostics).doesNotContain("Query Preparation Times"); assertThat(queryDiagnostics).doesNotContain("Runtime Execution Times"); assertThat(queryDiagnostics).doesNotContain("Partition Execution Timeline"); } if (expectQueryPlanDiagnostics) { assertThat(queryDiagnostics).contains("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan Duration (ms)="); } else { assertThat(queryDiagnostics).doesNotContain("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan Duration (ms)="); } } @Test(groups = {"simple"}, dataProvider = "readAllItemsOfLogicalPartition", timeOut = TIMEOUT) public void queryMetricsForReadAllItemsOfLogicalPartition(Integer expectedItemCount, Boolean qmEnabled) { String pkValue = UUID.randomUUID().toString(); for (int i = 0; i < expectedItemCount; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(pkValue); CosmosItemResponse<InternalObjectNode> createResponse = container.createItem(internalObjectNode); } CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); if (qmEnabled != null) { options = options.setQueryMetricsEnabled(qmEnabled); } ModelBridgeInternal.setQueryRequestOptionsMaxItemCount(options, 5); Iterator<FeedResponse<InternalObjectNode>> iterator = this.container .readAllItems( new PartitionKey(pkValue), options, InternalObjectNode.class) .iterableByPage().iterator(); assertThat(iterator.hasNext()).isTrue(); int actualItemCount = 0; while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); actualItemCount += feedResponse.getResults().size(); validateQueryDiagnostics(queryDiagnostics, qmEnabled, false); } assertThat(actualItemCount).isEqualTo(expectedItemCount); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void directDiagnosticsOnException() throws Exception { CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = null; CosmosClient client = null; try { client = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); CosmosContainer container = client.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); createResponse = container.createItem(internalObjectNode); CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions(); ModelBridgeInternal.setPartitionKey(cosmosItemRequestOptions, new PartitionKey("wrongPartitionKey")); CosmosItemResponse<InternalObjectNode> readResponse = cosmosContainer.readItem(BridgeInternal.getProperties(createResponse).getId(), new PartitionKey("wrongPartitionKey"), InternalObjectNode.class); fail("request should fail as partition key is wrong"); } catch (CosmosException exception) { isValidJSON(exception.toString()); isValidJSON(exception.getMessage()); String diagnostics = exception.getDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null")); assertThat(exception.getDiagnostics().getContactedRegionNames()).isNotEmpty(); assertThat(exception.getDiagnostics().getDuration()).isNotNull(); assertThat(diagnostics).contains("\"backendLatencyInMs\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); isValidJSON(diagnostics); validateTransportRequestTimelineDirect(diagnostics); validateRegionContacted(createResponse.getDiagnostics(), client.asyncClient()); } finally { if (client != null) { client.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void directDiagnosticsOnMetadataException() { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosClient client = null; try { client = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); CosmosContainer container = client.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); HttpClient mockHttpClient = Mockito.mock(HttpClient.class); Mockito.when(mockHttpClient.send(Mockito.any(HttpRequest.class), Mockito.any(Duration.class))) .thenReturn(Mono.error(new CosmosException(400, "TestBadRequest"))); RxStoreModel rxGatewayStoreModel = rxGatewayStoreModel = ReflectionUtils.getGatewayProxy((RxDocumentClientImpl) client.asyncClient().getDocClientWrapper()); ReflectionUtils.setGatewayHttpClient(rxGatewayStoreModel, mockHttpClient); container.createItem(internalObjectNode); fail("request should fail as bad request"); } catch (CosmosException exception) { isValidJSON(exception.toString()); isValidJSON(exception.getMessage()); String diagnostics = exception.getDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.BADREQUEST); assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null")); assertThat(diagnostics).contains("\"resourceType\":\"DocumentCollection\""); assertThat(exception.getDiagnostics().getContactedRegionNames()).isNotEmpty(); assertThat(exception.getDiagnostics().getDuration()).isNotNull(); isValidJSON(diagnostics); } finally { if (client != null) { client.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void supplementalResponseStatisticsList() throws Exception { ClientSideRequestStatistics clientSideRequestStatistics = new ClientSideRequestStatistics(mockDiagnosticsClientContext(),null); for (int i = 0; i < 15; i++) { RxDocumentServiceRequest rxDocumentServiceRequest = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), OperationType.Head, ResourceType.Document); clientSideRequestStatistics.recordResponse(rxDocumentServiceRequest, null); } List<ClientSideRequestStatistics.StoreResponseStatistics> storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics); ObjectMapper objectMapper = new ObjectMapper(); String diagnostics = objectMapper.writeValueAsString(clientSideRequestStatistics); JsonNode jsonNode = objectMapper.readTree(diagnostics); ArrayNode supplementalResponseStatisticsListNode = (ArrayNode) jsonNode.get("supplementalResponseStatisticsList"); assertThat(storeResponseStatistics.size()).isEqualTo(15); assertThat(supplementalResponseStatisticsListNode.size()).isEqualTo(10); clearStoreResponseStatistics(clientSideRequestStatistics); storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics); assertThat(storeResponseStatistics.size()).isEqualTo(0); for (int i = 0; i < 7; i++) { RxDocumentServiceRequest rxDocumentServiceRequest = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), OperationType.Head, ResourceType.Document); clientSideRequestStatistics.recordResponse(rxDocumentServiceRequest, null); } storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics); objectMapper = new ObjectMapper(); diagnostics = objectMapper.writeValueAsString(clientSideRequestStatistics); jsonNode = objectMapper.readTree(diagnostics); supplementalResponseStatisticsListNode = (ArrayNode) jsonNode.get("supplementalResponseStatisticsList"); assertThat(storeResponseStatistics.size()).isEqualTo(7); assertThat(supplementalResponseStatisticsListNode.size()).isEqualTo(7); for(JsonNode node : supplementalResponseStatisticsListNode) { assertThat(node.get("storeResult").asText()).isNotNull(); String requestResponseTimeUTC = node.get("requestResponseTimeUTC").asText(); Instant instant = Instant.from(RESPONSE_TIME_FORMATTER.parse(requestResponseTimeUTC)); assertThat(Instant.now().toEpochMilli() - instant.toEpochMilli()).isLessThan(5000); assertThat(node.get("requestResponseTimeUTC")).isNotNull(); assertThat(node.get("requestOperationType")).isNotNull(); assertThat(node.get("requestOperationType")).isNotNull(); } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void serializationOnVariousScenarios() { CosmosDatabaseResponse cosmosDatabase = gatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).read(); String diagnostics = cosmosDatabase.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"DATABASE_DESERIALIZATION\""); CosmosContainerResponse containerResponse = this.container.read(); diagnostics = containerResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"CONTAINER_DESERIALIZATION\""); TestItem testItem = new TestItem(); testItem.id = "TestId"; testItem.mypk = "TestPk"; CosmosItemResponse<TestItem> itemResponse = this.container.createItem(testItem); diagnostics = itemResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); testItem.id = "TestId2"; testItem.mypk = "TestPk"; itemResponse = this.container.createItem(testItem, new PartitionKey("TestPk"), null); diagnostics = itemResponse.getDiagnostics().toString(); assertThat(diagnostics).doesNotContain("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); assertThat(diagnostics).doesNotContain("\"serializationType\":\"ITEM_DESERIALIZATION\""); TestItem readTestItem = itemResponse.getItem(); diagnostics = itemResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"ITEM_DESERIALIZATION\""); CosmosItemResponse<InternalObjectNode> readItemResponse = this.container.readItem(testItem.id, new PartitionKey(testItem.mypk), null, InternalObjectNode.class); InternalObjectNode properties = readItemResponse.getItem(); diagnostics = readItemResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"ITEM_DESERIALIZATION\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void rntbdRequestResponseLengthStatistics() throws Exception { TestItem testItem = new TestItem(); testItem.id = UUID.randomUUID().toString(); testItem.mypk = UUID.randomUUID().toString(); int testItemLength = OBJECT_MAPPER.writeValueAsBytes(testItem).length; CosmosContainer container = directClient.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()).getContainer(this.cosmosAsyncContainer.getId()); CosmosItemResponse<TestItem> createItemResponse = container.createItem(testItem); validate(createItemResponse.getDiagnostics(), testItemLength, ModelBridgeInternal.getPayloadLength(createItemResponse)); try { container.createItem(testItem); fail("expected to fail due to 409"); } catch (CosmosException e) { validate(e.getDiagnostics(), testItemLength, 0); } CosmosItemResponse<TestItem> readItemResponse = container.readItem(testItem.id, new PartitionKey(testItem.mypk), TestItem.class); validate(readItemResponse.getDiagnostics(), 0, ModelBridgeInternal.getPayloadLength(readItemResponse)); CosmosItemResponse<Object> deleteItemResponse = container.deleteItem(testItem, null); validate(deleteItemResponse.getDiagnostics(), 0, 0); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void rntbdStatistics() throws Exception { Instant beforeClientInitialization = Instant.now(); CosmosClient client1 = null; try { client1 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .directMode() .buildClient(); TestItem testItem = new TestItem(); testItem.id = UUID.randomUUID().toString(); testItem.mypk = UUID.randomUUID().toString(); int testItemLength = OBJECT_MAPPER.writeValueAsBytes(testItem).length; CosmosContainer container = client1.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()).getContainer(this.cosmosAsyncContainer.getId()); Thread.sleep(1000); Instant beforeInitializingRntbdServiceEndpoint = Instant.now(); CosmosItemResponse<TestItem> operation1Response = container.upsertItem(testItem); Instant afterInitializingRntbdServiceEndpoint = Instant.now(); Thread.sleep(1000); Instant beforeOperation2 = Instant.now(); CosmosItemResponse<TestItem> operation2Response = container.upsertItem(testItem); Instant afterOperation2 = Instant.now(); Thread.sleep(1000); Instant beforeOperation3 = Instant.now(); CosmosItemResponse<TestItem> operation3Response = container.upsertItem(testItem); Instant afterOperation3 = Instant.now(); validateRntbdStatistics(operation3Response.getDiagnostics(), beforeClientInitialization, beforeInitializingRntbdServiceEndpoint, afterInitializingRntbdServiceEndpoint, beforeOperation2, afterOperation2, beforeOperation3, afterOperation3); CosmosItemResponse<TestItem> readItemResponse = container.readItem(testItem.id, new PartitionKey(testItem.mypk), TestItem.class); validate(readItemResponse.getDiagnostics(), 0, ModelBridgeInternal.getPayloadLength(readItemResponse)); CosmosItemResponse<Object> deleteItemResponse = container.deleteItem(testItem, null); validate(deleteItemResponse.getDiagnostics(), 0, 0); } finally { LifeCycleUtils.closeQuietly(client1); } } private void validateRntbdStatistics(CosmosDiagnostics cosmosDiagnostics, Instant clientInitializationTime, Instant beforeInitializingRntbdServiceEndpoint, Instant afterInitializingRntbdServiceEndpoint, Instant beforeOperation2, Instant afterOperation2, Instant beforeOperation3, Instant afterOperation3) throws Exception { ObjectNode diagnostics = (ObjectNode) OBJECT_MAPPER.readTree(cosmosDiagnostics.toString()); JsonNode responseStatisticsList = diagnostics.get("responseStatisticsList"); assertThat(responseStatisticsList.isArray()).isTrue(); assertThat(responseStatisticsList.size()).isGreaterThan(0); JsonNode storeResult = responseStatisticsList.get(0).get("storeResult"); assertThat(storeResult).isNotNull(); boolean hasPayload = storeResult.get("exception").isNull(); assertThat(storeResult.get("channelTaskQueueSize").asInt(-1)).isGreaterThan(0); assertThat(storeResult.get("pendingRequestsCount").asInt(-1)).isGreaterThanOrEqualTo(0); JsonNode serviceEndpointStatistics = storeResult.get("serviceEndpointStatistics"); assertThat(serviceEndpointStatistics).isNotNull(); assertThat(serviceEndpointStatistics.get("availableChannels").asInt(-1)).isGreaterThan(0); assertThat(serviceEndpointStatistics.get("acquiredChannels").asInt(-1)).isEqualTo(0); assertThat(serviceEndpointStatistics.get("inflightRequests").asInt(-1)).isEqualTo(1); assertThat(serviceEndpointStatistics.get("isClosed").asBoolean()).isEqualTo(false); Instant beforeInitializationThreshold = beforeInitializingRntbdServiceEndpoint.minusMillis(1); assertThat(Instant.parse(serviceEndpointStatistics.get("createdTime").asText())) .isAfterOrEqualTo(beforeInitializationThreshold); Instant afterInitializationThreshold = afterInitializingRntbdServiceEndpoint.plusMillis(2); assertThat(Instant.parse(serviceEndpointStatistics.get("createdTime").asText())) .isBeforeOrEqualTo(afterInitializationThreshold); Instant afterOperation2Threshold = afterOperation2.plusMillis(2); Instant beforeOperation2Threshold = beforeOperation2.minusMillis(2); assertThat(Instant.parse(serviceEndpointStatistics.get("lastRequestTime").asText())) .isAfterOrEqualTo(beforeOperation2Threshold) .isBeforeOrEqualTo(afterOperation2Threshold); assertThat(Instant.parse(serviceEndpointStatistics.get("lastSuccessfulRequestTime").asText())) .isAfterOrEqualTo(beforeOperation2Threshold) .isBeforeOrEqualTo(afterOperation2Threshold); } private void validate(CosmosDiagnostics cosmosDiagnostics, int expectedRequestPayloadSize, int expectedResponsePayloadSize) throws Exception { ObjectNode diagnostics = (ObjectNode) OBJECT_MAPPER.readTree(cosmosDiagnostics.toString()); JsonNode responseStatisticsList = diagnostics.get("responseStatisticsList"); assertThat(responseStatisticsList.isArray()).isTrue(); assertThat(responseStatisticsList.size()).isGreaterThan(0); JsonNode storeResult = responseStatisticsList.get(0).get("storeResult"); boolean hasPayload = storeResult.get("exception").isNull(); assertThat(storeResult).isNotNull(); assertThat(storeResult.get("rntbdRequestLengthInBytes").asInt(-1)).isGreaterThan(expectedRequestPayloadSize); assertThat(storeResult.get("rntbdRequestLengthInBytes").asInt(-1)).isGreaterThan(expectedRequestPayloadSize); assertThat(storeResult.get("requestPayloadLengthInBytes").asInt(-1)).isEqualTo(expectedRequestPayloadSize); if (hasPayload) { assertThat(storeResult.get("responsePayloadLengthInBytes").asInt(-1)).isEqualTo(expectedResponsePayloadSize); } assertThat(storeResult.get("rntbdResponseLengthInBytes").asInt(-1)).isGreaterThan(expectedResponsePayloadSize); } @Test(groups = {"emulator"}, timeOut = TIMEOUT) public void addressResolutionStatistics() { CosmosClient client1 = null; CosmosClient client2 = null; String databaseId = DatabaseForTest.generateId(); String containerId = UUID.randomUUID().toString(); CosmosDatabase cosmosDatabase = null; CosmosContainer cosmosContainer = null; try { client1 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); client1.createDatabase(databaseId); cosmosDatabase = client1.getDatabase(databaseId); cosmosDatabase.createContainer(containerId, "/mypk"); InternalObjectNode internalObjectNode = getInternalObjectNode(); cosmosContainer = cosmosDatabase.getContainer(containerId); CosmosItemResponse<InternalObjectNode> writeResourceResponse = cosmosContainer.createItem(internalObjectNode); assertThat(writeResourceResponse.getDiagnostics().toString()).contains("addressResolutionStatistics"); assertThat(writeResourceResponse.getDiagnostics().toString()).contains("\"inflightRequest\":false"); assertThat(writeResourceResponse.getDiagnostics().toString()).doesNotContain("endTime=\"null\""); assertThat(writeResourceResponse.getDiagnostics().toString()).contains("\"errorMessage\":null"); assertThat(writeResourceResponse.getDiagnostics().toString()).doesNotContain("\"errorMessage\":\"io.netty" + ".channel.AbstractChannel$AnnotatedConnectException: Connection refused: no further information"); client2 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); cosmosDatabase = client2.getDatabase(databaseId); cosmosContainer = cosmosDatabase.getContainer(containerId); AsyncDocumentClient asyncDocumentClient = client2.asyncClient().getContextClient(); GlobalAddressResolver addressResolver = (GlobalAddressResolver) FieldUtils.readField(asyncDocumentClient, "addressResolver", true); @SuppressWarnings("rawtypes") Map addressCacheByEndpoint = (Map) FieldUtils.readField(addressResolver, "addressCacheByEndpoint", true); Object endpointCache = addressCacheByEndpoint.values().toArray()[0]; GatewayAddressCache addressCache = (GatewayAddressCache) FieldUtils.readField(endpointCache, "addressCache", true); HttpClient httpClient = httpClient(true); FieldUtils.writeField(addressCache, "httpClient", httpClient, true); new Thread(() -> { try { Thread.sleep(5000); HttpClient httpClient1 = httpClient(false); FieldUtils.writeField(addressCache, "httpClient", httpClient1, true); } catch (Exception e) { fail(e.getMessage()); } }).start(); PartitionKey partitionKey = new PartitionKey(internalObjectNode.get("mypk")); CosmosItemResponse<InternalObjectNode> readResourceResponse = cosmosContainer.readItem(internalObjectNode.getId(), partitionKey, new CosmosItemRequestOptions(), InternalObjectNode.class); assertThat(readResourceResponse.getDiagnostics().toString()).contains("addressResolutionStatistics"); assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"inflightRequest\":false"); assertThat(readResourceResponse.getDiagnostics().toString()).doesNotContain("endTime=\"null\""); assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"errorMessage\":null"); assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"errorMessage\":\"io.netty" + ".channel.AbstractChannel$AnnotatedConnectException: Connection refused"); } catch (Exception ex) { logger.error("Error in test addressResolutionStatistics", ex); fail("This test should not throw exception " + ex); } finally { safeDeleteSyncDatabase(cosmosDatabase); if (client1 != null) { client1.close(); } if (client2 != null) { client2.close(); } } } private InternalObjectNode getInternalObjectNode() { InternalObjectNode internalObjectNode = new InternalObjectNode(); String uuid = UUID.randomUUID().toString(); internalObjectNode.setId(uuid); BridgeInternal.setProperty(internalObjectNode, "mypk", uuid); return internalObjectNode; } private InternalObjectNode getInternalObjectNode(String pkValue) { InternalObjectNode internalObjectNode = new InternalObjectNode(); String uuid = UUID.randomUUID().toString(); internalObjectNode.setId(uuid); BridgeInternal.setProperty(internalObjectNode, "mypk", pkValue); return internalObjectNode; } private List<ClientSideRequestStatistics.StoreResponseStatistics> getStoreResponseStatistics(ClientSideRequestStatistics requestStatistics) throws Exception { Field storeResponseStatisticsField = ClientSideRequestStatistics.class.getDeclaredField("supplementalResponseStatisticsList"); storeResponseStatisticsField.setAccessible(true); @SuppressWarnings({"unchecked"}) List<ClientSideRequestStatistics.StoreResponseStatistics> list = (List<ClientSideRequestStatistics.StoreResponseStatistics>) storeResponseStatisticsField.get(requestStatistics); return list; } private void clearStoreResponseStatistics(ClientSideRequestStatistics requestStatistics) throws Exception { Field storeResponseStatisticsField = ClientSideRequestStatistics.class.getDeclaredField("supplementalResponseStatisticsList"); storeResponseStatisticsField.setAccessible(true); storeResponseStatisticsField.set(requestStatistics, new ArrayList<ClientSideRequestStatistics.StoreResponseStatistics>()); } private void validateTransportRequestTimelineGateway(String diagnostics) { assertThat(diagnostics).contains("\"eventName\":\"connectionConfigured\""); assertThat(diagnostics).contains("\"eventName\":\"connectionConfigured\""); assertThat(diagnostics).contains("\"eventName\":\"requestSent\""); assertThat(diagnostics).contains("\"eventName\":\"transitTime\""); assertThat(diagnostics).contains("\"eventName\":\"received\""); } private void validateTransportRequestTimelineDirect(String diagnostics) { assertThat(diagnostics).contains("\"eventName\":\"created\""); assertThat(diagnostics).contains("\"eventName\":\"queued\""); assertThat(diagnostics).contains("\"eventName\":\"channelAcquisitionStarted\""); assertThat(diagnostics).contains("\"eventName\":\"pipelined\""); assertThat(diagnostics).contains("\"eventName\":\"transitTime\""); assertThat(diagnostics).contains("\"eventName\":\"received\""); assertThat(diagnostics).contains("\"eventName\":\"completed\""); assertThat(diagnostics).contains("\"startTimeUTC\""); assertThat(diagnostics).contains("\"durationInMicroSec\""); } public void isValidJSON(final String json) { try { final JsonParser parser = new ObjectMapper().createParser(json); while (parser.nextToken() != null) { } } catch (IOException ex) { fail("Diagnostic string is not in json format ", ex); } } private HttpClient httpClient(boolean fakeProxy) { HttpClientConfig httpClientConfig; if(fakeProxy) { httpClientConfig = new HttpClientConfig(new Configs()) .withProxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress("localhost", 8888))); } else { httpClientConfig = new HttpClientConfig(new Configs()); } return HttpClient.createFixed(httpClientConfig); } private IndexUtilizationInfo createFromJSONString(String jsonString) { ObjectMapper indexUtilizationInfoObjectMapper = new ObjectMapper(); IndexUtilizationInfo indexUtilizationInfo = null; try { indexUtilizationInfo = indexUtilizationInfoObjectMapper.readValue(jsonString, IndexUtilizationInfo.class); } catch (JsonProcessingException e) { logger.error("Json not correctly formed ", e); } return indexUtilizationInfo; } private void validateRegionContacted(CosmosDiagnostics cosmosDiagnostics, CosmosAsyncClient cosmosAsyncClient) throws Exception { RxDocumentClientImpl rxDocumentClient = (RxDocumentClientImpl) ReflectionUtils.getAsyncDocumentClient(cosmosAsyncClient); GlobalEndpointManager globalEndpointManager = ReflectionUtils.getGlobalEndpointManager(rxDocumentClient); LocationCache locationCache = ReflectionUtils.getLocationCache(globalEndpointManager); Field locationInfoField = LocationCache.class.getDeclaredField("locationInfo"); locationInfoField.setAccessible(true); Object locationInfo = locationInfoField.get(locationCache); Class<?> DatabaseAccountLocationsInfoClass = Class.forName("com.azure.cosmos.implementation.routing" + ".LocationCache$DatabaseAccountLocationsInfo"); Field availableWriteEndpointByLocation = DatabaseAccountLocationsInfoClass.getDeclaredField( "availableWriteEndpointByLocation"); availableWriteEndpointByLocation.setAccessible(true); @SuppressWarnings("unchecked") Map<String, URI> map = (Map<String, URI>) availableWriteEndpointByLocation.get(locationInfo); String regionName = map.keySet().iterator().next(); assertThat(cosmosDiagnostics.getContactedRegionNames().size()).isEqualTo(1); assertThat(cosmosDiagnostics.getContactedRegionNames().iterator().next()).isEqualTo(regionName.toLowerCase()); } public static class TestItem { public String id; public String mypk; public TestItem() { } } }
I wonder should we check the unique value of the pkRangeId. because sometimes retry could happen, and the test may fail due to it
public void queryDiagnosticsOnOrderBy() { String containerId = "testcontainer"; cosmosAsyncDatabase.createContainer(containerId, "/mypk", ThroughputProperties.createManualThroughput(40000)).block(); CosmosAsyncContainer testcontainer = cosmosAsyncDatabase.getContainer(containerId); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); testcontainer.createItem(getInternalObjectNode()).block(); options.setMaxDegreeOfParallelism(-1); String query = "SELECT * from c ORDER BY c._ts DESC"; CosmosPagedFlux<InternalObjectNode> cosmosPagedFlux = testcontainer.queryItems(query, options, InternalObjectNode.class); AtomicInteger counterPkRid = new AtomicInteger(); AtomicInteger counterPartitionKeyRangeId = new AtomicInteger(); cosmosPagedFlux.byPage().flatMap(feedResponse -> { String cosmosDiagnosticsString = feedResponse.getCosmosDiagnostics().toString(); Pattern pattern = Pattern.compile("\"partitionKeyRangeId\":\""); Matcher matcher = pattern.matcher(cosmosDiagnosticsString); while (matcher.find()) { counterPartitionKeyRangeId.incrementAndGet(); } pattern = Pattern.compile("pkrId:"); matcher = pattern.matcher(cosmosDiagnosticsString); while (matcher.find()) { counterPkRid.incrementAndGet(); } return Flux.just(feedResponse); }).blockLast(); assertThat(counterPkRid.get() * 2).isEqualTo(counterPartitionKeyRangeId.get()); deleteCollection(testcontainer); }
Pattern pattern = Pattern.compile("\"partitionKeyRangeId\":\"");
public void queryDiagnosticsOnOrderBy() { String containerId = "testcontainer"; cosmosAsyncDatabase.createContainer(containerId, "/mypk", ThroughputProperties.createManualThroughput(40000)).block(); CosmosAsyncContainer testcontainer = cosmosAsyncDatabase.getContainer(containerId); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setConsistencyLevel(ConsistencyLevel.EVENTUAL); testcontainer.createItem(getInternalObjectNode()).block(); options.setMaxDegreeOfParallelism(-1); String query = "SELECT * from c ORDER BY c._ts DESC"; CosmosPagedFlux<InternalObjectNode> cosmosPagedFlux = testcontainer.queryItems(query, options, InternalObjectNode.class); Set<String> partitionKeyRangeIds = new HashSet<>(); Set<String> pkRids = new HashSet<>(); cosmosPagedFlux.byPage().flatMap(feedResponse -> { String cosmosDiagnosticsString = feedResponse.getCosmosDiagnostics().toString(); Pattern pattern = Pattern.compile("(\"partitionKeyRangeId\":\")(\\d)"); Matcher matcher = pattern.matcher(cosmosDiagnosticsString); while (matcher.find()) { String group = matcher.group(2); partitionKeyRangeIds.add(group); } pattern = Pattern.compile("(pkrId:)(\\d)"); matcher = pattern.matcher(cosmosDiagnosticsString); while (matcher.find()) { String group = matcher.group(2); pkRids.add(group); } return Flux.just(feedResponse); }).blockLast(); assertThat(pkRids).isNotEmpty(); assertThat(pkRids).isEqualTo(partitionKeyRangeIds); deleteCollection(testcontainer); }
class CosmosDiagnosticsTest extends TestSuiteBase { private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private static final DateTimeFormatter RESPONSE_TIME_FORMATTER = DateTimeFormatter.ISO_INSTANT; private CosmosClient gatewayClient; private CosmosClient directClient; private CosmosAsyncDatabase cosmosAsyncDatabase; private CosmosContainer container; private CosmosAsyncContainer cosmosAsyncContainer; @BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT) public void beforeClass() { assertThat(this.gatewayClient).isNull(); gatewayClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .gatewayMode() .buildClient(); directClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); cosmosAsyncContainer = getSharedMultiPartitionCosmosContainer(this.gatewayClient.asyncClient()); cosmosAsyncDatabase = directClient.asyncClient().getDatabase(cosmosAsyncContainer.getDatabase().getId()); container = gatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); } @AfterClass(groups = {"simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { if (this.gatewayClient != null) { this.gatewayClient.close(); } if (this.directClient != null) { this.directClient.close(); } } @DataProvider(name = "query") private Object[][] query() { return new Object[][]{ new Object[] { "Select * from c where c.id = 'wrongId'", true }, new Object[] { "Select top 1 * from c where c.id = 'wrongId'", true }, new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", true }, new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", true }, new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", true }, new Object[] { "Select * from c where c.id = 'wrongId'", false }, new Object[] { "Select top 1 * from c where c.id = 'wrongId'", false }, new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", false }, new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", false }, new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", false }, new Object[] { "Select * from c where c.id = 'wrongId'", false }, new Object[] { "Select top 1 * from c where c.id = 'wrongId'", false }, new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", false }, new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", false }, new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", false }, }; } @DataProvider(name = "readAllItemsOfLogicalPartition") private Object[][] readAllItemsOfLogicalPartition() { return new Object[][]{ new Object[] { 1, true }, new Object[] { 5, null }, new Object[] { 20, null }, new Object[] { 1, false }, new Object[] { 5, false }, new Object[] { 20, false }, }; } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void gatewayDiagnostics() throws Exception { CosmosClient testGatewayClient = null; try { testGatewayClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .gatewayMode() .buildClient(); CosmosContainer container = testGatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = container.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\""); assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null")); assertThat(diagnostics).contains("\"operationType\":\"Create\""); assertThat(diagnostics).contains("\"metaDataName\":\"CONTAINER_LOOK_UP\""); assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(createResponse.getDiagnostics().getDuration()).isNotNull(); assertThat(createResponse.getDiagnostics().getContactedRegionNames()).isNotNull(); validateTransportRequestTimelineGateway(diagnostics); validateRegionContacted(createResponse.getDiagnostics(), testGatewayClient.asyncClient()); isValidJSON(diagnostics); } finally { if (testGatewayClient != null) { testGatewayClient.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void gatewayDiagnosticsOnException() throws Exception { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = null; try { createResponse = this.container.createItem(internalObjectNode); CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions(); ModelBridgeInternal.setPartitionKey(cosmosItemRequestOptions, new PartitionKey("wrongPartitionKey")); CosmosItemResponse<InternalObjectNode> readResponse = this.container.readItem(BridgeInternal.getProperties(createResponse).getId(), new PartitionKey("wrongPartitionKey"), InternalObjectNode.class); fail("request should fail as partition key is wrong"); } catch (CosmosException exception) { isValidJSON(exception.toString()); isValidJSON(exception.getMessage()); String diagnostics = exception.getDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\""); assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null")); assertThat(diagnostics).contains("\"statusCode\":404"); assertThat(diagnostics).contains("\"operationType\":\"Read\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null")); assertThat(createResponse.getDiagnostics().getContactedRegionNames()).isNotNull(); validateRegionContacted(createResponse.getDiagnostics(), this.container.asyncContainer.getDatabase().getClient()); assertThat(exception.getDiagnostics().getDuration()).isNotNull(); validateTransportRequestTimelineGateway(diagnostics); isValidJSON(diagnostics); } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void systemDiagnosticsForSystemStateInformation() { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = this.container.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("systemInformation"); assertThat(diagnostics).contains("usedMemory"); assertThat(diagnostics).contains("availableMemory"); assertThat(diagnostics).contains("systemCpuLoad"); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(createResponse.getDiagnostics().getDuration()).isNotNull(); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void directDiagnostics() throws Exception { CosmosClient testDirectClient = null; try { testDirectClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); CosmosContainer cosmosContainer = testDirectClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).contains("supplementalResponseStatisticsList"); assertThat(diagnostics).contains("\"gatewayStatistics\":null"); assertThat(diagnostics).contains("addressResolutionStatistics"); assertThat(diagnostics).contains("\"metaDataName\":\"CONTAINER_LOOK_UP\""); assertThat(diagnostics).contains("\"metaDataName\":\"PARTITION_KEY_RANGE_LOOK_UP\""); assertThat(diagnostics).contains("\"metaDataName\":\"SERVER_ADDRESS_LOOKUP\""); assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(diagnostics).contains("\"backendLatencyInMs\""); assertThat(createResponse.getDiagnostics().getContactedRegionNames()).isNotEmpty(); assertThat(createResponse.getDiagnostics().getDuration()).isNotNull(); validateTransportRequestTimelineDirect(diagnostics); validateRegionContacted(createResponse.getDiagnostics(), testDirectClient.asyncClient()); isValidJSON(diagnostics); try { cosmosContainer.createItem(internalObjectNode); fail("expected 409"); } catch (CosmosException e) { diagnostics = e.getDiagnostics().toString(); assertThat(diagnostics).contains("\"backendLatencyInMs\""); validateTransportRequestTimelineDirect(e.getDiagnostics().toString()); } } finally { if (testDirectClient != null) { testDirectClient.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryPlanDiagnostics() throws JsonProcessingException { CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); List<String> itemIdList = new ArrayList<>(); for(int i = 0; i< 100; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); if(i%20 == 0) { itemIdList.add(internalObjectNode.getId()); } } String queryDiagnostics = null; List<String> queryList = new ArrayList<>(); queryList.add("Select * from c"); StringBuilder queryBuilder = new StringBuilder("SELECT * from c where c.mypk in ("); for(int i = 0 ; i < itemIdList.size(); i++){ queryBuilder.append("'").append(itemIdList.get(i)).append("'"); if(i < (itemIdList.size()-1)) { queryBuilder.append(","); } else { queryBuilder.append(")"); } } queryList.add(queryBuilder.toString()); queryList.add("Select * from c where c.id = 'wrongId'"); for(String query : queryList) { int feedResponseCounter = 0; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setQueryMetricsEnabled(true); Iterator<FeedResponse<InternalObjectNode>> iterator = cosmosContainer.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); if (feedResponseCounter == 0) { assertThat(queryDiagnostics).contains("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan Duration (ms)="); String requestTimeLine = OBJECT_MAPPER.writeValueAsString(feedResponse.getCosmosDiagnostics().getFeedResponseDiagnostics().getQueryPlanDiagnosticsContext().getRequestTimeline()); assertThat(requestTimeLine).contains("connectionConfigured"); assertThat(requestTimeLine).contains("requestSent"); assertThat(requestTimeLine).contains("transitTime"); assertThat(requestTimeLine).contains("received"); } else { assertThat(queryDiagnostics).doesNotContain("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan Duration (ms)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan RequestTimeline ="); } feedResponseCounter++; } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryMetricsWithIndexMetrics() { CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); List<String> itemIdList = new ArrayList<>(); for(int i = 0; i< 100; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); if(i%20 == 0) { itemIdList.add(internalObjectNode.getId()); } } String queryDiagnostics = null; List<String> queryList = new ArrayList<>(); StringBuilder queryBuilder = new StringBuilder("SELECT * from c where c.mypk in ("); for(int i = 0 ; i < itemIdList.size(); i++){ queryBuilder.append("'").append(itemIdList.get(i)).append("'"); if(i < (itemIdList.size()-1)) { queryBuilder.append(","); } else { queryBuilder.append(")"); } } queryList.add(queryBuilder.toString()); for (String query : queryList) { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setQueryMetricsEnabled(true); options.setIndexMetricsEnabled(true); Iterator<FeedResponse<InternalObjectNode>> iterator = cosmosContainer.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); logger.info("This is query diagnostics {}", queryDiagnostics); if (feedResponse.getResponseHeaders().containsKey(HttpConstants.HttpHeaders.INDEX_UTILIZATION)) { assertThat(feedResponse.getResponseHeaders().get(HttpConstants.HttpHeaders.INDEX_UTILIZATION)).isNotNull(); assertThat(createFromJSONString(Utils.decodeBase64String(feedResponse.getResponseHeaders().get(HttpConstants.HttpHeaders.INDEX_UTILIZATION))).getUtilizedSingleIndexes()).isNotNull(); } } } } @Test(groups = {"simple"}, dataProvider = "query", timeOut = TIMEOUT) public void queryMetrics(String query, Boolean qmEnabled) { CosmosContainer directContainer = this.directClient.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()) .getContainer(this.cosmosAsyncContainer.getId()); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); if (qmEnabled != null) { options.setQueryMetricsEnabled(qmEnabled); } boolean qroupByFirstResponse = true; Iterator<FeedResponse<InternalObjectNode>> iterator = directContainer.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator(); assertThat(iterator.hasNext()).isTrue(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); assertThat(feedResponse.getResults().size()).isEqualTo(0); if (!query.contains("group by") || qroupByFirstResponse) { validateQueryDiagnostics(queryDiagnostics, qmEnabled, true); validateDirectModeQueryDiagnostics(queryDiagnostics); if (query.contains("group by")) { qroupByFirstResponse = false; } } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) private void validateDirectModeQueryDiagnostics(String diagnostics) { assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).contains("supplementalResponseStatisticsList"); assertThat(diagnostics).contains("responseStatisticsList"); assertThat(diagnostics).contains("\"gatewayStatistics\":null"); assertThat(diagnostics).contains("addressResolutionStatistics"); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); } private void validateGatewayModeQueryDiagnostics(String diagnostics) { assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\""); assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null")); assertThat(diagnostics).contains("\"operationType\":\"Query\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(diagnostics).contains("\"regionsContacted\""); } @Test(groups = {"simple"}, dataProvider = "query", timeOut = TIMEOUT*2) public void queryDiagnosticsGatewayMode(String query, Boolean qmEnabled) { CosmosClient testDirectClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .gatewayMode() .buildClient(); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); CosmosContainer cosmosContainer = testDirectClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()) .getContainer(cosmosAsyncContainer.getId()); List<String> itemIdList = new ArrayList<>(); for (int i = 0; i < 100; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); if (i % 20 == 0) { itemIdList.add(internalObjectNode.getId()); } } boolean qroupByFirstResponse = true; if (qmEnabled != null) { options.setQueryMetricsEnabled(qmEnabled); } Iterator<FeedResponse<InternalObjectNode>> iterator = cosmosContainer .queryItems(query, options, InternalObjectNode.class) .iterableByPage() .iterator(); assertThat(iterator.hasNext()).isTrue(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); assertThat(feedResponse.getResults().size()).isEqualTo(0); if (!query.contains("group by") || qroupByFirstResponse) { validateQueryDiagnostics(queryDiagnostics, qmEnabled, true); validateGatewayModeQueryDiagnostics(queryDiagnostics); if (query.contains("group by")) { qroupByFirstResponse = false; } } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryMetricsWithADifferentLocale() { Locale.setDefault(Locale.GERMAN); String query = "select * from root where root.id= \"someid\""; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); Iterator<FeedResponse<InternalObjectNode>> iterator = this.container.queryItems(query, options, InternalObjectNode.class) .iterableByPage().iterator(); double requestCharge = 0; while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); requestCharge += feedResponse.getRequestCharge(); } assertThat(requestCharge).isGreaterThan(0); Locale.setDefault(Locale.ROOT); } private static void validateQueryDiagnostics( String queryDiagnostics, Boolean qmEnabled, boolean expectQueryPlanDiagnostics) { if (qmEnabled == null || qmEnabled) { assertThat(queryDiagnostics).contains("Retrieved Document Count"); assertThat(queryDiagnostics).contains("Query Preparation Times"); assertThat(queryDiagnostics).contains("Runtime Execution Times"); assertThat(queryDiagnostics).contains("Partition Execution Timeline"); } else { assertThat(queryDiagnostics).doesNotContain("Retrieved Document Count"); assertThat(queryDiagnostics).doesNotContain("Query Preparation Times"); assertThat(queryDiagnostics).doesNotContain("Runtime Execution Times"); assertThat(queryDiagnostics).doesNotContain("Partition Execution Timeline"); } if (expectQueryPlanDiagnostics) { assertThat(queryDiagnostics).contains("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan Duration (ms)="); } else { assertThat(queryDiagnostics).doesNotContain("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan Duration (ms)="); } } @Test(groups = {"simple"}, dataProvider = "readAllItemsOfLogicalPartition", timeOut = TIMEOUT) public void queryMetricsForReadAllItemsOfLogicalPartition(Integer expectedItemCount, Boolean qmEnabled) { String pkValue = UUID.randomUUID().toString(); for (int i = 0; i < expectedItemCount; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(pkValue); CosmosItemResponse<InternalObjectNode> createResponse = container.createItem(internalObjectNode); } CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); if (qmEnabled != null) { options = options.setQueryMetricsEnabled(qmEnabled); } ModelBridgeInternal.setQueryRequestOptionsMaxItemCount(options, 5); Iterator<FeedResponse<InternalObjectNode>> iterator = this.container .readAllItems( new PartitionKey(pkValue), options, InternalObjectNode.class) .iterableByPage().iterator(); assertThat(iterator.hasNext()).isTrue(); int actualItemCount = 0; while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); actualItemCount += feedResponse.getResults().size(); validateQueryDiagnostics(queryDiagnostics, qmEnabled, false); } assertThat(actualItemCount).isEqualTo(expectedItemCount); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void directDiagnosticsOnException() throws Exception { CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = null; CosmosClient client = null; try { client = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); CosmosContainer container = client.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); createResponse = container.createItem(internalObjectNode); CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions(); ModelBridgeInternal.setPartitionKey(cosmosItemRequestOptions, new PartitionKey("wrongPartitionKey")); CosmosItemResponse<InternalObjectNode> readResponse = cosmosContainer.readItem(BridgeInternal.getProperties(createResponse).getId(), new PartitionKey("wrongPartitionKey"), InternalObjectNode.class); fail("request should fail as partition key is wrong"); } catch (CosmosException exception) { isValidJSON(exception.toString()); isValidJSON(exception.getMessage()); String diagnostics = exception.getDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null")); assertThat(exception.getDiagnostics().getContactedRegionNames()).isNotEmpty(); assertThat(exception.getDiagnostics().getDuration()).isNotNull(); assertThat(diagnostics).contains("\"backendLatencyInMs\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); isValidJSON(diagnostics); validateTransportRequestTimelineDirect(diagnostics); validateRegionContacted(createResponse.getDiagnostics(), client.asyncClient()); } finally { if (client != null) { client.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void directDiagnosticsOnMetadataException() { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosClient client = null; try { client = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); CosmosContainer container = client.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); HttpClient mockHttpClient = Mockito.mock(HttpClient.class); Mockito.when(mockHttpClient.send(Mockito.any(HttpRequest.class), Mockito.any(Duration.class))) .thenReturn(Mono.error(new CosmosException(400, "TestBadRequest"))); RxStoreModel rxGatewayStoreModel = rxGatewayStoreModel = ReflectionUtils.getGatewayProxy((RxDocumentClientImpl) client.asyncClient().getDocClientWrapper()); ReflectionUtils.setGatewayHttpClient(rxGatewayStoreModel, mockHttpClient); container.createItem(internalObjectNode); fail("request should fail as bad request"); } catch (CosmosException exception) { isValidJSON(exception.toString()); isValidJSON(exception.getMessage()); String diagnostics = exception.getDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.BADREQUEST); assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null")); assertThat(diagnostics).contains("\"resourceType\":\"DocumentCollection\""); assertThat(exception.getDiagnostics().getContactedRegionNames()).isNotEmpty(); assertThat(exception.getDiagnostics().getDuration()).isNotNull(); isValidJSON(diagnostics); } finally { if (client != null) { client.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void supplementalResponseStatisticsList() throws Exception { ClientSideRequestStatistics clientSideRequestStatistics = new ClientSideRequestStatistics(mockDiagnosticsClientContext(),null); for (int i = 0; i < 15; i++) { RxDocumentServiceRequest rxDocumentServiceRequest = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), OperationType.Head, ResourceType.Document); clientSideRequestStatistics.recordResponse(rxDocumentServiceRequest, null); } List<ClientSideRequestStatistics.StoreResponseStatistics> storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics); ObjectMapper objectMapper = new ObjectMapper(); String diagnostics = objectMapper.writeValueAsString(clientSideRequestStatistics); JsonNode jsonNode = objectMapper.readTree(diagnostics); ArrayNode supplementalResponseStatisticsListNode = (ArrayNode) jsonNode.get("supplementalResponseStatisticsList"); assertThat(storeResponseStatistics.size()).isEqualTo(15); assertThat(supplementalResponseStatisticsListNode.size()).isEqualTo(10); clearStoreResponseStatistics(clientSideRequestStatistics); storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics); assertThat(storeResponseStatistics.size()).isEqualTo(0); for (int i = 0; i < 7; i++) { RxDocumentServiceRequest rxDocumentServiceRequest = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), OperationType.Head, ResourceType.Document); clientSideRequestStatistics.recordResponse(rxDocumentServiceRequest, null); } storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics); objectMapper = new ObjectMapper(); diagnostics = objectMapper.writeValueAsString(clientSideRequestStatistics); jsonNode = objectMapper.readTree(diagnostics); supplementalResponseStatisticsListNode = (ArrayNode) jsonNode.get("supplementalResponseStatisticsList"); assertThat(storeResponseStatistics.size()).isEqualTo(7); assertThat(supplementalResponseStatisticsListNode.size()).isEqualTo(7); for(JsonNode node : supplementalResponseStatisticsListNode) { assertThat(node.get("storeResult").asText()).isNotNull(); String requestResponseTimeUTC = node.get("requestResponseTimeUTC").asText(); Instant instant = Instant.from(RESPONSE_TIME_FORMATTER.parse(requestResponseTimeUTC)); assertThat(Instant.now().toEpochMilli() - instant.toEpochMilli()).isLessThan(5000); assertThat(node.get("requestResponseTimeUTC")).isNotNull(); assertThat(node.get("requestOperationType")).isNotNull(); assertThat(node.get("requestOperationType")).isNotNull(); } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void serializationOnVariousScenarios() { CosmosDatabaseResponse cosmosDatabase = gatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).read(); String diagnostics = cosmosDatabase.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"DATABASE_DESERIALIZATION\""); CosmosContainerResponse containerResponse = this.container.read(); diagnostics = containerResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"CONTAINER_DESERIALIZATION\""); TestItem testItem = new TestItem(); testItem.id = "TestId"; testItem.mypk = "TestPk"; CosmosItemResponse<TestItem> itemResponse = this.container.createItem(testItem); diagnostics = itemResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); testItem.id = "TestId2"; testItem.mypk = "TestPk"; itemResponse = this.container.createItem(testItem, new PartitionKey("TestPk"), null); diagnostics = itemResponse.getDiagnostics().toString(); assertThat(diagnostics).doesNotContain("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); assertThat(diagnostics).doesNotContain("\"serializationType\":\"ITEM_DESERIALIZATION\""); TestItem readTestItem = itemResponse.getItem(); diagnostics = itemResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"ITEM_DESERIALIZATION\""); CosmosItemResponse<InternalObjectNode> readItemResponse = this.container.readItem(testItem.id, new PartitionKey(testItem.mypk), null, InternalObjectNode.class); InternalObjectNode properties = readItemResponse.getItem(); diagnostics = readItemResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"ITEM_DESERIALIZATION\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void rntbdRequestResponseLengthStatistics() throws Exception { TestItem testItem = new TestItem(); testItem.id = UUID.randomUUID().toString(); testItem.mypk = UUID.randomUUID().toString(); int testItemLength = OBJECT_MAPPER.writeValueAsBytes(testItem).length; CosmosContainer container = directClient.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()).getContainer(this.cosmosAsyncContainer.getId()); CosmosItemResponse<TestItem> createItemResponse = container.createItem(testItem); validate(createItemResponse.getDiagnostics(), testItemLength, ModelBridgeInternal.getPayloadLength(createItemResponse)); try { container.createItem(testItem); fail("expected to fail due to 409"); } catch (CosmosException e) { validate(e.getDiagnostics(), testItemLength, 0); } CosmosItemResponse<TestItem> readItemResponse = container.readItem(testItem.id, new PartitionKey(testItem.mypk), TestItem.class); validate(readItemResponse.getDiagnostics(), 0, ModelBridgeInternal.getPayloadLength(readItemResponse)); CosmosItemResponse<Object> deleteItemResponse = container.deleteItem(testItem, null); validate(deleteItemResponse.getDiagnostics(), 0, 0); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void rntbdStatistics() throws Exception { Instant beforeClientInitialization = Instant.now(); CosmosClient client1 = null; try { client1 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .directMode() .buildClient(); TestItem testItem = new TestItem(); testItem.id = UUID.randomUUID().toString(); testItem.mypk = UUID.randomUUID().toString(); int testItemLength = OBJECT_MAPPER.writeValueAsBytes(testItem).length; CosmosContainer container = client1.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()).getContainer(this.cosmosAsyncContainer.getId()); Thread.sleep(1000); Instant beforeInitializingRntbdServiceEndpoint = Instant.now(); CosmosItemResponse<TestItem> operation1Response = container.upsertItem(testItem); Instant afterInitializingRntbdServiceEndpoint = Instant.now(); Thread.sleep(1000); Instant beforeOperation2 = Instant.now(); CosmosItemResponse<TestItem> operation2Response = container.upsertItem(testItem); Instant afterOperation2 = Instant.now(); Thread.sleep(1000); Instant beforeOperation3 = Instant.now(); CosmosItemResponse<TestItem> operation3Response = container.upsertItem(testItem); Instant afterOperation3 = Instant.now(); validateRntbdStatistics(operation3Response.getDiagnostics(), beforeClientInitialization, beforeInitializingRntbdServiceEndpoint, afterInitializingRntbdServiceEndpoint, beforeOperation2, afterOperation2, beforeOperation3, afterOperation3); CosmosItemResponse<TestItem> readItemResponse = container.readItem(testItem.id, new PartitionKey(testItem.mypk), TestItem.class); validate(readItemResponse.getDiagnostics(), 0, ModelBridgeInternal.getPayloadLength(readItemResponse)); CosmosItemResponse<Object> deleteItemResponse = container.deleteItem(testItem, null); validate(deleteItemResponse.getDiagnostics(), 0, 0); } finally { LifeCycleUtils.closeQuietly(client1); } } private void validateRntbdStatistics(CosmosDiagnostics cosmosDiagnostics, Instant clientInitializationTime, Instant beforeInitializingRntbdServiceEndpoint, Instant afterInitializingRntbdServiceEndpoint, Instant beforeOperation2, Instant afterOperation2, Instant beforeOperation3, Instant afterOperation3) throws Exception { ObjectNode diagnostics = (ObjectNode) OBJECT_MAPPER.readTree(cosmosDiagnostics.toString()); JsonNode responseStatisticsList = diagnostics.get("responseStatisticsList"); assertThat(responseStatisticsList.isArray()).isTrue(); assertThat(responseStatisticsList.size()).isGreaterThan(0); JsonNode storeResult = responseStatisticsList.get(0).get("storeResult"); assertThat(storeResult).isNotNull(); boolean hasPayload = storeResult.get("exception").isNull(); assertThat(storeResult.get("channelTaskQueueSize").asInt(-1)).isGreaterThan(0); assertThat(storeResult.get("pendingRequestsCount").asInt(-1)).isGreaterThanOrEqualTo(0); JsonNode serviceEndpointStatistics = storeResult.get("serviceEndpointStatistics"); assertThat(serviceEndpointStatistics).isNotNull(); assertThat(serviceEndpointStatistics.get("availableChannels").asInt(-1)).isGreaterThan(0); assertThat(serviceEndpointStatistics.get("acquiredChannels").asInt(-1)).isEqualTo(0); assertThat(serviceEndpointStatistics.get("inflightRequests").asInt(-1)).isEqualTo(1); assertThat(serviceEndpointStatistics.get("isClosed").asBoolean()).isEqualTo(false); Instant beforeInitializationThreshold = beforeInitializingRntbdServiceEndpoint.minusMillis(1); assertThat(Instant.parse(serviceEndpointStatistics.get("createdTime").asText())) .isAfterOrEqualTo(beforeInitializationThreshold); Instant afterInitializationThreshold = afterInitializingRntbdServiceEndpoint.plusMillis(2); assertThat(Instant.parse(serviceEndpointStatistics.get("createdTime").asText())) .isBeforeOrEqualTo(afterInitializationThreshold); Instant afterOperation2Threshold = afterOperation2.plusMillis(2); Instant beforeOperation2Threshold = beforeOperation2.minusMillis(2); assertThat(Instant.parse(serviceEndpointStatistics.get("lastRequestTime").asText())) .isAfterOrEqualTo(beforeOperation2Threshold) .isBeforeOrEqualTo(afterOperation2Threshold); assertThat(Instant.parse(serviceEndpointStatistics.get("lastSuccessfulRequestTime").asText())) .isAfterOrEqualTo(beforeOperation2Threshold) .isBeforeOrEqualTo(afterOperation2Threshold); } private void validate(CosmosDiagnostics cosmosDiagnostics, int expectedRequestPayloadSize, int expectedResponsePayloadSize) throws Exception { ObjectNode diagnostics = (ObjectNode) OBJECT_MAPPER.readTree(cosmosDiagnostics.toString()); JsonNode responseStatisticsList = diagnostics.get("responseStatisticsList"); assertThat(responseStatisticsList.isArray()).isTrue(); assertThat(responseStatisticsList.size()).isGreaterThan(0); JsonNode storeResult = responseStatisticsList.get(0).get("storeResult"); boolean hasPayload = storeResult.get("exception").isNull(); assertThat(storeResult).isNotNull(); assertThat(storeResult.get("rntbdRequestLengthInBytes").asInt(-1)).isGreaterThan(expectedRequestPayloadSize); assertThat(storeResult.get("rntbdRequestLengthInBytes").asInt(-1)).isGreaterThan(expectedRequestPayloadSize); assertThat(storeResult.get("requestPayloadLengthInBytes").asInt(-1)).isEqualTo(expectedRequestPayloadSize); if (hasPayload) { assertThat(storeResult.get("responsePayloadLengthInBytes").asInt(-1)).isEqualTo(expectedResponsePayloadSize); } assertThat(storeResult.get("rntbdResponseLengthInBytes").asInt(-1)).isGreaterThan(expectedResponsePayloadSize); } @Test(groups = {"emulator"}, timeOut = TIMEOUT) public void addressResolutionStatistics() { CosmosClient client1 = null; CosmosClient client2 = null; String databaseId = DatabaseForTest.generateId(); String containerId = UUID.randomUUID().toString(); CosmosDatabase cosmosDatabase = null; CosmosContainer cosmosContainer = null; try { client1 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); client1.createDatabase(databaseId); cosmosDatabase = client1.getDatabase(databaseId); cosmosDatabase.createContainer(containerId, "/mypk"); InternalObjectNode internalObjectNode = getInternalObjectNode(); cosmosContainer = cosmosDatabase.getContainer(containerId); CosmosItemResponse<InternalObjectNode> writeResourceResponse = cosmosContainer.createItem(internalObjectNode); assertThat(writeResourceResponse.getDiagnostics().toString()).contains("addressResolutionStatistics"); assertThat(writeResourceResponse.getDiagnostics().toString()).contains("\"inflightRequest\":false"); assertThat(writeResourceResponse.getDiagnostics().toString()).doesNotContain("endTime=\"null\""); assertThat(writeResourceResponse.getDiagnostics().toString()).contains("\"errorMessage\":null"); assertThat(writeResourceResponse.getDiagnostics().toString()).doesNotContain("\"errorMessage\":\"io.netty" + ".channel.AbstractChannel$AnnotatedConnectException: Connection refused: no further information"); client2 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); cosmosDatabase = client2.getDatabase(databaseId); cosmosContainer = cosmosDatabase.getContainer(containerId); AsyncDocumentClient asyncDocumentClient = client2.asyncClient().getContextClient(); GlobalAddressResolver addressResolver = (GlobalAddressResolver) FieldUtils.readField(asyncDocumentClient, "addressResolver", true); @SuppressWarnings("rawtypes") Map addressCacheByEndpoint = (Map) FieldUtils.readField(addressResolver, "addressCacheByEndpoint", true); Object endpointCache = addressCacheByEndpoint.values().toArray()[0]; GatewayAddressCache addressCache = (GatewayAddressCache) FieldUtils.readField(endpointCache, "addressCache", true); HttpClient httpClient = httpClient(true); FieldUtils.writeField(addressCache, "httpClient", httpClient, true); new Thread(() -> { try { Thread.sleep(5000); HttpClient httpClient1 = httpClient(false); FieldUtils.writeField(addressCache, "httpClient", httpClient1, true); } catch (Exception e) { fail(e.getMessage()); } }).start(); PartitionKey partitionKey = new PartitionKey(internalObjectNode.get("mypk")); CosmosItemResponse<InternalObjectNode> readResourceResponse = cosmosContainer.readItem(internalObjectNode.getId(), partitionKey, new CosmosItemRequestOptions(), InternalObjectNode.class); assertThat(readResourceResponse.getDiagnostics().toString()).contains("addressResolutionStatistics"); assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"inflightRequest\":false"); assertThat(readResourceResponse.getDiagnostics().toString()).doesNotContain("endTime=\"null\""); assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"errorMessage\":null"); assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"errorMessage\":\"io.netty" + ".channel.AbstractChannel$AnnotatedConnectException: Connection refused"); } catch (Exception ex) { logger.error("Error in test addressResolutionStatistics", ex); fail("This test should not throw exception " + ex); } finally { safeDeleteSyncDatabase(cosmosDatabase); if (client1 != null) { client1.close(); } if (client2 != null) { client2.close(); } } } private InternalObjectNode getInternalObjectNode() { InternalObjectNode internalObjectNode = new InternalObjectNode(); String uuid = UUID.randomUUID().toString(); internalObjectNode.setId(uuid); BridgeInternal.setProperty(internalObjectNode, "mypk", uuid); return internalObjectNode; } private InternalObjectNode getInternalObjectNode(String pkValue) { InternalObjectNode internalObjectNode = new InternalObjectNode(); String uuid = UUID.randomUUID().toString(); internalObjectNode.setId(uuid); BridgeInternal.setProperty(internalObjectNode, "mypk", pkValue); return internalObjectNode; } private List<ClientSideRequestStatistics.StoreResponseStatistics> getStoreResponseStatistics(ClientSideRequestStatistics requestStatistics) throws Exception { Field storeResponseStatisticsField = ClientSideRequestStatistics.class.getDeclaredField("supplementalResponseStatisticsList"); storeResponseStatisticsField.setAccessible(true); @SuppressWarnings({"unchecked"}) List<ClientSideRequestStatistics.StoreResponseStatistics> list = (List<ClientSideRequestStatistics.StoreResponseStatistics>) storeResponseStatisticsField.get(requestStatistics); return list; } private void clearStoreResponseStatistics(ClientSideRequestStatistics requestStatistics) throws Exception { Field storeResponseStatisticsField = ClientSideRequestStatistics.class.getDeclaredField("supplementalResponseStatisticsList"); storeResponseStatisticsField.setAccessible(true); storeResponseStatisticsField.set(requestStatistics, new ArrayList<ClientSideRequestStatistics.StoreResponseStatistics>()); } private void validateTransportRequestTimelineGateway(String diagnostics) { assertThat(diagnostics).contains("\"eventName\":\"connectionConfigured\""); assertThat(diagnostics).contains("\"eventName\":\"connectionConfigured\""); assertThat(diagnostics).contains("\"eventName\":\"requestSent\""); assertThat(diagnostics).contains("\"eventName\":\"transitTime\""); assertThat(diagnostics).contains("\"eventName\":\"received\""); } private void validateTransportRequestTimelineDirect(String diagnostics) { assertThat(diagnostics).contains("\"eventName\":\"created\""); assertThat(diagnostics).contains("\"eventName\":\"queued\""); assertThat(diagnostics).contains("\"eventName\":\"channelAcquisitionStarted\""); assertThat(diagnostics).contains("\"eventName\":\"pipelined\""); assertThat(diagnostics).contains("\"eventName\":\"transitTime\""); assertThat(diagnostics).contains("\"eventName\":\"received\""); assertThat(diagnostics).contains("\"eventName\":\"completed\""); assertThat(diagnostics).contains("\"startTimeUTC\""); assertThat(diagnostics).contains("\"durationInMicroSec\""); } public void isValidJSON(final String json) { try { final JsonParser parser = new ObjectMapper().createParser(json); while (parser.nextToken() != null) { } } catch (IOException ex) { fail("Diagnostic string is not in json format ", ex); } } private HttpClient httpClient(boolean fakeProxy) { HttpClientConfig httpClientConfig; if(fakeProxy) { httpClientConfig = new HttpClientConfig(new Configs()) .withProxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress("localhost", 8888))); } else { httpClientConfig = new HttpClientConfig(new Configs()); } return HttpClient.createFixed(httpClientConfig); } private IndexUtilizationInfo createFromJSONString(String jsonString) { ObjectMapper indexUtilizationInfoObjectMapper = new ObjectMapper(); IndexUtilizationInfo indexUtilizationInfo = null; try { indexUtilizationInfo = indexUtilizationInfoObjectMapper.readValue(jsonString, IndexUtilizationInfo.class); } catch (JsonProcessingException e) { logger.error("Json not correctly formed ", e); } return indexUtilizationInfo; } private void validateRegionContacted(CosmosDiagnostics cosmosDiagnostics, CosmosAsyncClient cosmosAsyncClient) throws Exception { RxDocumentClientImpl rxDocumentClient = (RxDocumentClientImpl) ReflectionUtils.getAsyncDocumentClient(cosmosAsyncClient); GlobalEndpointManager globalEndpointManager = ReflectionUtils.getGlobalEndpointManager(rxDocumentClient); LocationCache locationCache = ReflectionUtils.getLocationCache(globalEndpointManager); Field locationInfoField = LocationCache.class.getDeclaredField("locationInfo"); locationInfoField.setAccessible(true); Object locationInfo = locationInfoField.get(locationCache); Class<?> DatabaseAccountLocationsInfoClass = Class.forName("com.azure.cosmos.implementation.routing" + ".LocationCache$DatabaseAccountLocationsInfo"); Field availableWriteEndpointByLocation = DatabaseAccountLocationsInfoClass.getDeclaredField( "availableWriteEndpointByLocation"); availableWriteEndpointByLocation.setAccessible(true); @SuppressWarnings("unchecked") Map<String, URI> map = (Map<String, URI>) availableWriteEndpointByLocation.get(locationInfo); String regionName = map.keySet().iterator().next(); assertThat(cosmosDiagnostics.getContactedRegionNames().size()).isEqualTo(1); assertThat(cosmosDiagnostics.getContactedRegionNames().iterator().next()).isEqualTo(regionName.toLowerCase()); } public static class TestItem { public String id; public String mypk; public TestItem() { } } }
class CosmosDiagnosticsTest extends TestSuiteBase { private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private static final DateTimeFormatter RESPONSE_TIME_FORMATTER = DateTimeFormatter.ISO_INSTANT; private CosmosClient gatewayClient; private CosmosClient directClient; private CosmosAsyncDatabase cosmosAsyncDatabase; private CosmosContainer container; private CosmosAsyncContainer cosmosAsyncContainer; @BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT) public void beforeClass() { assertThat(this.gatewayClient).isNull(); gatewayClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .gatewayMode() .buildClient(); directClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); cosmosAsyncContainer = getSharedMultiPartitionCosmosContainer(this.gatewayClient.asyncClient()); cosmosAsyncDatabase = directClient.asyncClient().getDatabase(cosmosAsyncContainer.getDatabase().getId()); container = gatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); } @AfterClass(groups = {"simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { if (this.gatewayClient != null) { this.gatewayClient.close(); } if (this.directClient != null) { this.directClient.close(); } } @DataProvider(name = "query") private Object[][] query() { return new Object[][]{ new Object[] { "Select * from c where c.id = 'wrongId'", true }, new Object[] { "Select top 1 * from c where c.id = 'wrongId'", true }, new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", true }, new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", true }, new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", true }, new Object[] { "Select * from c where c.id = 'wrongId'", false }, new Object[] { "Select top 1 * from c where c.id = 'wrongId'", false }, new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", false }, new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", false }, new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", false }, new Object[] { "Select * from c where c.id = 'wrongId'", false }, new Object[] { "Select top 1 * from c where c.id = 'wrongId'", false }, new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", false }, new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", false }, new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", false }, }; } @DataProvider(name = "readAllItemsOfLogicalPartition") private Object[][] readAllItemsOfLogicalPartition() { return new Object[][]{ new Object[] { 1, true }, new Object[] { 5, null }, new Object[] { 20, null }, new Object[] { 1, false }, new Object[] { 5, false }, new Object[] { 20, false }, }; } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void gatewayDiagnostics() throws Exception { CosmosClient testGatewayClient = null; try { testGatewayClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .gatewayMode() .buildClient(); CosmosContainer container = testGatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = container.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\""); assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null")); assertThat(diagnostics).contains("\"operationType\":\"Create\""); assertThat(diagnostics).contains("\"metaDataName\":\"CONTAINER_LOOK_UP\""); assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(createResponse.getDiagnostics().getDuration()).isNotNull(); assertThat(createResponse.getDiagnostics().getContactedRegionNames()).isNotNull(); validateTransportRequestTimelineGateway(diagnostics); validateRegionContacted(createResponse.getDiagnostics(), testGatewayClient.asyncClient()); isValidJSON(diagnostics); } finally { if (testGatewayClient != null) { testGatewayClient.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void gatewayDiagnosticsOnException() throws Exception { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = null; try { createResponse = this.container.createItem(internalObjectNode); CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions(); ModelBridgeInternal.setPartitionKey(cosmosItemRequestOptions, new PartitionKey("wrongPartitionKey")); CosmosItemResponse<InternalObjectNode> readResponse = this.container.readItem(BridgeInternal.getProperties(createResponse).getId(), new PartitionKey("wrongPartitionKey"), InternalObjectNode.class); fail("request should fail as partition key is wrong"); } catch (CosmosException exception) { isValidJSON(exception.toString()); isValidJSON(exception.getMessage()); String diagnostics = exception.getDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\""); assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null")); assertThat(diagnostics).contains("\"statusCode\":404"); assertThat(diagnostics).contains("\"operationType\":\"Read\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null")); assertThat(createResponse.getDiagnostics().getContactedRegionNames()).isNotNull(); validateRegionContacted(createResponse.getDiagnostics(), this.container.asyncContainer.getDatabase().getClient()); assertThat(exception.getDiagnostics().getDuration()).isNotNull(); validateTransportRequestTimelineGateway(diagnostics); isValidJSON(diagnostics); } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void systemDiagnosticsForSystemStateInformation() { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = this.container.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("systemInformation"); assertThat(diagnostics).contains("usedMemory"); assertThat(diagnostics).contains("availableMemory"); assertThat(diagnostics).contains("systemCpuLoad"); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(createResponse.getDiagnostics().getDuration()).isNotNull(); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void directDiagnostics() throws Exception { CosmosClient testDirectClient = null; try { testDirectClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); CosmosContainer cosmosContainer = testDirectClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).contains("supplementalResponseStatisticsList"); assertThat(diagnostics).contains("\"gatewayStatistics\":null"); assertThat(diagnostics).contains("addressResolutionStatistics"); assertThat(diagnostics).contains("\"metaDataName\":\"CONTAINER_LOOK_UP\""); assertThat(diagnostics).contains("\"metaDataName\":\"PARTITION_KEY_RANGE_LOOK_UP\""); assertThat(diagnostics).contains("\"metaDataName\":\"SERVER_ADDRESS_LOOKUP\""); assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(diagnostics).contains("\"backendLatencyInMs\""); assertThat(createResponse.getDiagnostics().getContactedRegionNames()).isNotEmpty(); assertThat(createResponse.getDiagnostics().getDuration()).isNotNull(); validateTransportRequestTimelineDirect(diagnostics); validateRegionContacted(createResponse.getDiagnostics(), testDirectClient.asyncClient()); isValidJSON(diagnostics); try { cosmosContainer.createItem(internalObjectNode); fail("expected 409"); } catch (CosmosException e) { diagnostics = e.getDiagnostics().toString(); assertThat(diagnostics).contains("\"backendLatencyInMs\""); validateTransportRequestTimelineDirect(e.getDiagnostics().toString()); } } finally { if (testDirectClient != null) { testDirectClient.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryPlanDiagnostics() throws JsonProcessingException { CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); List<String> itemIdList = new ArrayList<>(); for(int i = 0; i< 100; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); if(i%20 == 0) { itemIdList.add(internalObjectNode.getId()); } } String queryDiagnostics = null; List<String> queryList = new ArrayList<>(); queryList.add("Select * from c"); StringBuilder queryBuilder = new StringBuilder("SELECT * from c where c.mypk in ("); for(int i = 0 ; i < itemIdList.size(); i++){ queryBuilder.append("'").append(itemIdList.get(i)).append("'"); if(i < (itemIdList.size()-1)) { queryBuilder.append(","); } else { queryBuilder.append(")"); } } queryList.add(queryBuilder.toString()); queryList.add("Select * from c where c.id = 'wrongId'"); for(String query : queryList) { int feedResponseCounter = 0; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setQueryMetricsEnabled(true); Iterator<FeedResponse<InternalObjectNode>> iterator = cosmosContainer.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); if (feedResponseCounter == 0) { assertThat(queryDiagnostics).contains("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan Duration (ms)="); String requestTimeLine = OBJECT_MAPPER.writeValueAsString(feedResponse.getCosmosDiagnostics().getFeedResponseDiagnostics().getQueryPlanDiagnosticsContext().getRequestTimeline()); assertThat(requestTimeLine).contains("connectionConfigured"); assertThat(requestTimeLine).contains("requestSent"); assertThat(requestTimeLine).contains("transitTime"); assertThat(requestTimeLine).contains("received"); } else { assertThat(queryDiagnostics).doesNotContain("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan Duration (ms)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan RequestTimeline ="); } feedResponseCounter++; } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryMetricsWithIndexMetrics() { CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); List<String> itemIdList = new ArrayList<>(); for(int i = 0; i< 100; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); if(i%20 == 0) { itemIdList.add(internalObjectNode.getId()); } } String queryDiagnostics = null; List<String> queryList = new ArrayList<>(); StringBuilder queryBuilder = new StringBuilder("SELECT * from c where c.mypk in ("); for(int i = 0 ; i < itemIdList.size(); i++){ queryBuilder.append("'").append(itemIdList.get(i)).append("'"); if(i < (itemIdList.size()-1)) { queryBuilder.append(","); } else { queryBuilder.append(")"); } } queryList.add(queryBuilder.toString()); for (String query : queryList) { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setQueryMetricsEnabled(true); options.setIndexMetricsEnabled(true); Iterator<FeedResponse<InternalObjectNode>> iterator = cosmosContainer.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); logger.info("This is query diagnostics {}", queryDiagnostics); if (feedResponse.getResponseHeaders().containsKey(HttpConstants.HttpHeaders.INDEX_UTILIZATION)) { assertThat(feedResponse.getResponseHeaders().get(HttpConstants.HttpHeaders.INDEX_UTILIZATION)).isNotNull(); assertThat(createFromJSONString(Utils.decodeBase64String(feedResponse.getResponseHeaders().get(HttpConstants.HttpHeaders.INDEX_UTILIZATION))).getUtilizedSingleIndexes()).isNotNull(); } } } } @Test(groups = {"simple"}, dataProvider = "query", timeOut = TIMEOUT) public void queryMetrics(String query, Boolean qmEnabled) { CosmosContainer directContainer = this.directClient.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()) .getContainer(this.cosmosAsyncContainer.getId()); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); if (qmEnabled != null) { options.setQueryMetricsEnabled(qmEnabled); } boolean qroupByFirstResponse = true; Iterator<FeedResponse<InternalObjectNode>> iterator = directContainer.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator(); assertThat(iterator.hasNext()).isTrue(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); assertThat(feedResponse.getResults().size()).isEqualTo(0); if (!query.contains("group by") || qroupByFirstResponse) { validateQueryDiagnostics(queryDiagnostics, qmEnabled, true); validateDirectModeQueryDiagnostics(queryDiagnostics); if (query.contains("group by")) { qroupByFirstResponse = false; } } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) private void validateDirectModeQueryDiagnostics(String diagnostics) { assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).contains("supplementalResponseStatisticsList"); assertThat(diagnostics).contains("responseStatisticsList"); assertThat(diagnostics).contains("\"gatewayStatistics\":null"); assertThat(diagnostics).contains("addressResolutionStatistics"); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); } private void validateGatewayModeQueryDiagnostics(String diagnostics) { assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\""); assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null")); assertThat(diagnostics).contains("\"operationType\":\"Query\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(diagnostics).contains("\"regionsContacted\""); } @Test(groups = {"simple"}, dataProvider = "query", timeOut = TIMEOUT*2) public void queryDiagnosticsGatewayMode(String query, Boolean qmEnabled) { CosmosClient testDirectClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .gatewayMode() .buildClient(); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); CosmosContainer cosmosContainer = testDirectClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()) .getContainer(cosmosAsyncContainer.getId()); List<String> itemIdList = new ArrayList<>(); for (int i = 0; i < 100; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); if (i % 20 == 0) { itemIdList.add(internalObjectNode.getId()); } } boolean qroupByFirstResponse = true; if (qmEnabled != null) { options.setQueryMetricsEnabled(qmEnabled); } Iterator<FeedResponse<InternalObjectNode>> iterator = cosmosContainer .queryItems(query, options, InternalObjectNode.class) .iterableByPage() .iterator(); assertThat(iterator.hasNext()).isTrue(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); assertThat(feedResponse.getResults().size()).isEqualTo(0); if (!query.contains("group by") || qroupByFirstResponse) { validateQueryDiagnostics(queryDiagnostics, qmEnabled, true); validateGatewayModeQueryDiagnostics(queryDiagnostics); if (query.contains("group by")) { qroupByFirstResponse = false; } } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryMetricsWithADifferentLocale() { Locale.setDefault(Locale.GERMAN); String query = "select * from root where root.id= \"someid\""; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); Iterator<FeedResponse<InternalObjectNode>> iterator = this.container.queryItems(query, options, InternalObjectNode.class) .iterableByPage().iterator(); double requestCharge = 0; while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); requestCharge += feedResponse.getRequestCharge(); } assertThat(requestCharge).isGreaterThan(0); Locale.setDefault(Locale.ROOT); } private static void validateQueryDiagnostics( String queryDiagnostics, Boolean qmEnabled, boolean expectQueryPlanDiagnostics) { if (qmEnabled == null || qmEnabled) { assertThat(queryDiagnostics).contains("Retrieved Document Count"); assertThat(queryDiagnostics).contains("Query Preparation Times"); assertThat(queryDiagnostics).contains("Runtime Execution Times"); assertThat(queryDiagnostics).contains("Partition Execution Timeline"); } else { assertThat(queryDiagnostics).doesNotContain("Retrieved Document Count"); assertThat(queryDiagnostics).doesNotContain("Query Preparation Times"); assertThat(queryDiagnostics).doesNotContain("Runtime Execution Times"); assertThat(queryDiagnostics).doesNotContain("Partition Execution Timeline"); } if (expectQueryPlanDiagnostics) { assertThat(queryDiagnostics).contains("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan Duration (ms)="); } else { assertThat(queryDiagnostics).doesNotContain("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan Duration (ms)="); } } @Test(groups = {"simple"}, dataProvider = "readAllItemsOfLogicalPartition", timeOut = TIMEOUT) public void queryMetricsForReadAllItemsOfLogicalPartition(Integer expectedItemCount, Boolean qmEnabled) { String pkValue = UUID.randomUUID().toString(); for (int i = 0; i < expectedItemCount; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(pkValue); CosmosItemResponse<InternalObjectNode> createResponse = container.createItem(internalObjectNode); } CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); if (qmEnabled != null) { options = options.setQueryMetricsEnabled(qmEnabled); } ModelBridgeInternal.setQueryRequestOptionsMaxItemCount(options, 5); Iterator<FeedResponse<InternalObjectNode>> iterator = this.container .readAllItems( new PartitionKey(pkValue), options, InternalObjectNode.class) .iterableByPage().iterator(); assertThat(iterator.hasNext()).isTrue(); int actualItemCount = 0; while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); actualItemCount += feedResponse.getResults().size(); validateQueryDiagnostics(queryDiagnostics, qmEnabled, false); } assertThat(actualItemCount).isEqualTo(expectedItemCount); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void directDiagnosticsOnException() throws Exception { CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = null; CosmosClient client = null; try { client = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); CosmosContainer container = client.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); createResponse = container.createItem(internalObjectNode); CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions(); ModelBridgeInternal.setPartitionKey(cosmosItemRequestOptions, new PartitionKey("wrongPartitionKey")); CosmosItemResponse<InternalObjectNode> readResponse = cosmosContainer.readItem(BridgeInternal.getProperties(createResponse).getId(), new PartitionKey("wrongPartitionKey"), InternalObjectNode.class); fail("request should fail as partition key is wrong"); } catch (CosmosException exception) { isValidJSON(exception.toString()); isValidJSON(exception.getMessage()); String diagnostics = exception.getDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null")); assertThat(exception.getDiagnostics().getContactedRegionNames()).isNotEmpty(); assertThat(exception.getDiagnostics().getDuration()).isNotNull(); assertThat(diagnostics).contains("\"backendLatencyInMs\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); isValidJSON(diagnostics); validateTransportRequestTimelineDirect(diagnostics); validateRegionContacted(createResponse.getDiagnostics(), client.asyncClient()); } finally { if (client != null) { client.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void directDiagnosticsOnMetadataException() { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosClient client = null; try { client = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); CosmosContainer container = client.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); HttpClient mockHttpClient = Mockito.mock(HttpClient.class); Mockito.when(mockHttpClient.send(Mockito.any(HttpRequest.class), Mockito.any(Duration.class))) .thenReturn(Mono.error(new CosmosException(400, "TestBadRequest"))); RxStoreModel rxGatewayStoreModel = rxGatewayStoreModel = ReflectionUtils.getGatewayProxy((RxDocumentClientImpl) client.asyncClient().getDocClientWrapper()); ReflectionUtils.setGatewayHttpClient(rxGatewayStoreModel, mockHttpClient); container.createItem(internalObjectNode); fail("request should fail as bad request"); } catch (CosmosException exception) { isValidJSON(exception.toString()); isValidJSON(exception.getMessage()); String diagnostics = exception.getDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.BADREQUEST); assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null")); assertThat(diagnostics).contains("\"resourceType\":\"DocumentCollection\""); assertThat(exception.getDiagnostics().getContactedRegionNames()).isNotEmpty(); assertThat(exception.getDiagnostics().getDuration()).isNotNull(); isValidJSON(diagnostics); } finally { if (client != null) { client.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void supplementalResponseStatisticsList() throws Exception { ClientSideRequestStatistics clientSideRequestStatistics = new ClientSideRequestStatistics(mockDiagnosticsClientContext(),null); for (int i = 0; i < 15; i++) { RxDocumentServiceRequest rxDocumentServiceRequest = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), OperationType.Head, ResourceType.Document); clientSideRequestStatistics.recordResponse(rxDocumentServiceRequest, null); } List<ClientSideRequestStatistics.StoreResponseStatistics> storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics); ObjectMapper objectMapper = new ObjectMapper(); String diagnostics = objectMapper.writeValueAsString(clientSideRequestStatistics); JsonNode jsonNode = objectMapper.readTree(diagnostics); ArrayNode supplementalResponseStatisticsListNode = (ArrayNode) jsonNode.get("supplementalResponseStatisticsList"); assertThat(storeResponseStatistics.size()).isEqualTo(15); assertThat(supplementalResponseStatisticsListNode.size()).isEqualTo(10); clearStoreResponseStatistics(clientSideRequestStatistics); storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics); assertThat(storeResponseStatistics.size()).isEqualTo(0); for (int i = 0; i < 7; i++) { RxDocumentServiceRequest rxDocumentServiceRequest = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), OperationType.Head, ResourceType.Document); clientSideRequestStatistics.recordResponse(rxDocumentServiceRequest, null); } storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics); objectMapper = new ObjectMapper(); diagnostics = objectMapper.writeValueAsString(clientSideRequestStatistics); jsonNode = objectMapper.readTree(diagnostics); supplementalResponseStatisticsListNode = (ArrayNode) jsonNode.get("supplementalResponseStatisticsList"); assertThat(storeResponseStatistics.size()).isEqualTo(7); assertThat(supplementalResponseStatisticsListNode.size()).isEqualTo(7); for(JsonNode node : supplementalResponseStatisticsListNode) { assertThat(node.get("storeResult").asText()).isNotNull(); String requestResponseTimeUTC = node.get("requestResponseTimeUTC").asText(); Instant instant = Instant.from(RESPONSE_TIME_FORMATTER.parse(requestResponseTimeUTC)); assertThat(Instant.now().toEpochMilli() - instant.toEpochMilli()).isLessThan(5000); assertThat(node.get("requestResponseTimeUTC")).isNotNull(); assertThat(node.get("requestOperationType")).isNotNull(); assertThat(node.get("requestOperationType")).isNotNull(); } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void serializationOnVariousScenarios() { CosmosDatabaseResponse cosmosDatabase = gatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).read(); String diagnostics = cosmosDatabase.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"DATABASE_DESERIALIZATION\""); CosmosContainerResponse containerResponse = this.container.read(); diagnostics = containerResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"CONTAINER_DESERIALIZATION\""); TestItem testItem = new TestItem(); testItem.id = "TestId"; testItem.mypk = "TestPk"; CosmosItemResponse<TestItem> itemResponse = this.container.createItem(testItem); diagnostics = itemResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); testItem.id = "TestId2"; testItem.mypk = "TestPk"; itemResponse = this.container.createItem(testItem, new PartitionKey("TestPk"), null); diagnostics = itemResponse.getDiagnostics().toString(); assertThat(diagnostics).doesNotContain("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); assertThat(diagnostics).doesNotContain("\"serializationType\":\"ITEM_DESERIALIZATION\""); TestItem readTestItem = itemResponse.getItem(); diagnostics = itemResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"ITEM_DESERIALIZATION\""); CosmosItemResponse<InternalObjectNode> readItemResponse = this.container.readItem(testItem.id, new PartitionKey(testItem.mypk), null, InternalObjectNode.class); InternalObjectNode properties = readItemResponse.getItem(); diagnostics = readItemResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"ITEM_DESERIALIZATION\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void rntbdRequestResponseLengthStatistics() throws Exception { TestItem testItem = new TestItem(); testItem.id = UUID.randomUUID().toString(); testItem.mypk = UUID.randomUUID().toString(); int testItemLength = OBJECT_MAPPER.writeValueAsBytes(testItem).length; CosmosContainer container = directClient.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()).getContainer(this.cosmosAsyncContainer.getId()); CosmosItemResponse<TestItem> createItemResponse = container.createItem(testItem); validate(createItemResponse.getDiagnostics(), testItemLength, ModelBridgeInternal.getPayloadLength(createItemResponse)); try { container.createItem(testItem); fail("expected to fail due to 409"); } catch (CosmosException e) { validate(e.getDiagnostics(), testItemLength, 0); } CosmosItemResponse<TestItem> readItemResponse = container.readItem(testItem.id, new PartitionKey(testItem.mypk), TestItem.class); validate(readItemResponse.getDiagnostics(), 0, ModelBridgeInternal.getPayloadLength(readItemResponse)); CosmosItemResponse<Object> deleteItemResponse = container.deleteItem(testItem, null); validate(deleteItemResponse.getDiagnostics(), 0, 0); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void rntbdStatistics() throws Exception { Instant beforeClientInitialization = Instant.now(); CosmosClient client1 = null; try { client1 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .directMode() .buildClient(); TestItem testItem = new TestItem(); testItem.id = UUID.randomUUID().toString(); testItem.mypk = UUID.randomUUID().toString(); int testItemLength = OBJECT_MAPPER.writeValueAsBytes(testItem).length; CosmosContainer container = client1.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()).getContainer(this.cosmosAsyncContainer.getId()); Thread.sleep(1000); Instant beforeInitializingRntbdServiceEndpoint = Instant.now(); CosmosItemResponse<TestItem> operation1Response = container.upsertItem(testItem); Instant afterInitializingRntbdServiceEndpoint = Instant.now(); Thread.sleep(1000); Instant beforeOperation2 = Instant.now(); CosmosItemResponse<TestItem> operation2Response = container.upsertItem(testItem); Instant afterOperation2 = Instant.now(); Thread.sleep(1000); Instant beforeOperation3 = Instant.now(); CosmosItemResponse<TestItem> operation3Response = container.upsertItem(testItem); Instant afterOperation3 = Instant.now(); validateRntbdStatistics(operation3Response.getDiagnostics(), beforeClientInitialization, beforeInitializingRntbdServiceEndpoint, afterInitializingRntbdServiceEndpoint, beforeOperation2, afterOperation2, beforeOperation3, afterOperation3); CosmosItemResponse<TestItem> readItemResponse = container.readItem(testItem.id, new PartitionKey(testItem.mypk), TestItem.class); validate(readItemResponse.getDiagnostics(), 0, ModelBridgeInternal.getPayloadLength(readItemResponse)); CosmosItemResponse<Object> deleteItemResponse = container.deleteItem(testItem, null); validate(deleteItemResponse.getDiagnostics(), 0, 0); } finally { LifeCycleUtils.closeQuietly(client1); } } private void validateRntbdStatistics(CosmosDiagnostics cosmosDiagnostics, Instant clientInitializationTime, Instant beforeInitializingRntbdServiceEndpoint, Instant afterInitializingRntbdServiceEndpoint, Instant beforeOperation2, Instant afterOperation2, Instant beforeOperation3, Instant afterOperation3) throws Exception { ObjectNode diagnostics = (ObjectNode) OBJECT_MAPPER.readTree(cosmosDiagnostics.toString()); JsonNode responseStatisticsList = diagnostics.get("responseStatisticsList"); assertThat(responseStatisticsList.isArray()).isTrue(); assertThat(responseStatisticsList.size()).isGreaterThan(0); JsonNode storeResult = responseStatisticsList.get(0).get("storeResult"); assertThat(storeResult).isNotNull(); boolean hasPayload = storeResult.get("exception").isNull(); assertThat(storeResult.get("channelTaskQueueSize").asInt(-1)).isGreaterThan(0); assertThat(storeResult.get("pendingRequestsCount").asInt(-1)).isGreaterThanOrEqualTo(0); JsonNode serviceEndpointStatistics = storeResult.get("serviceEndpointStatistics"); assertThat(serviceEndpointStatistics).isNotNull(); assertThat(serviceEndpointStatistics.get("availableChannels").asInt(-1)).isGreaterThan(0); assertThat(serviceEndpointStatistics.get("acquiredChannels").asInt(-1)).isEqualTo(0); assertThat(serviceEndpointStatistics.get("inflightRequests").asInt(-1)).isEqualTo(1); assertThat(serviceEndpointStatistics.get("isClosed").asBoolean()).isEqualTo(false); Instant beforeInitializationThreshold = beforeInitializingRntbdServiceEndpoint.minusMillis(1); assertThat(Instant.parse(serviceEndpointStatistics.get("createdTime").asText())) .isAfterOrEqualTo(beforeInitializationThreshold); Instant afterInitializationThreshold = afterInitializingRntbdServiceEndpoint.plusMillis(2); assertThat(Instant.parse(serviceEndpointStatistics.get("createdTime").asText())) .isBeforeOrEqualTo(afterInitializationThreshold); Instant afterOperation2Threshold = afterOperation2.plusMillis(2); Instant beforeOperation2Threshold = beforeOperation2.minusMillis(2); assertThat(Instant.parse(serviceEndpointStatistics.get("lastRequestTime").asText())) .isAfterOrEqualTo(beforeOperation2Threshold) .isBeforeOrEqualTo(afterOperation2Threshold); assertThat(Instant.parse(serviceEndpointStatistics.get("lastSuccessfulRequestTime").asText())) .isAfterOrEqualTo(beforeOperation2Threshold) .isBeforeOrEqualTo(afterOperation2Threshold); } private void validate(CosmosDiagnostics cosmosDiagnostics, int expectedRequestPayloadSize, int expectedResponsePayloadSize) throws Exception { ObjectNode diagnostics = (ObjectNode) OBJECT_MAPPER.readTree(cosmosDiagnostics.toString()); JsonNode responseStatisticsList = diagnostics.get("responseStatisticsList"); assertThat(responseStatisticsList.isArray()).isTrue(); assertThat(responseStatisticsList.size()).isGreaterThan(0); JsonNode storeResult = responseStatisticsList.get(0).get("storeResult"); boolean hasPayload = storeResult.get("exception").isNull(); assertThat(storeResult).isNotNull(); assertThat(storeResult.get("rntbdRequestLengthInBytes").asInt(-1)).isGreaterThan(expectedRequestPayloadSize); assertThat(storeResult.get("rntbdRequestLengthInBytes").asInt(-1)).isGreaterThan(expectedRequestPayloadSize); assertThat(storeResult.get("requestPayloadLengthInBytes").asInt(-1)).isEqualTo(expectedRequestPayloadSize); if (hasPayload) { assertThat(storeResult.get("responsePayloadLengthInBytes").asInt(-1)).isEqualTo(expectedResponsePayloadSize); } assertThat(storeResult.get("rntbdResponseLengthInBytes").asInt(-1)).isGreaterThan(expectedResponsePayloadSize); } @Test(groups = {"emulator"}, timeOut = TIMEOUT) public void addressResolutionStatistics() { CosmosClient client1 = null; CosmosClient client2 = null; String databaseId = DatabaseForTest.generateId(); String containerId = UUID.randomUUID().toString(); CosmosDatabase cosmosDatabase = null; CosmosContainer cosmosContainer = null; try { client1 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); client1.createDatabase(databaseId); cosmosDatabase = client1.getDatabase(databaseId); cosmosDatabase.createContainer(containerId, "/mypk"); InternalObjectNode internalObjectNode = getInternalObjectNode(); cosmosContainer = cosmosDatabase.getContainer(containerId); CosmosItemResponse<InternalObjectNode> writeResourceResponse = cosmosContainer.createItem(internalObjectNode); assertThat(writeResourceResponse.getDiagnostics().toString()).contains("addressResolutionStatistics"); assertThat(writeResourceResponse.getDiagnostics().toString()).contains("\"inflightRequest\":false"); assertThat(writeResourceResponse.getDiagnostics().toString()).doesNotContain("endTime=\"null\""); assertThat(writeResourceResponse.getDiagnostics().toString()).contains("\"errorMessage\":null"); assertThat(writeResourceResponse.getDiagnostics().toString()).doesNotContain("\"errorMessage\":\"io.netty" + ".channel.AbstractChannel$AnnotatedConnectException: Connection refused: no further information"); client2 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); cosmosDatabase = client2.getDatabase(databaseId); cosmosContainer = cosmosDatabase.getContainer(containerId); AsyncDocumentClient asyncDocumentClient = client2.asyncClient().getContextClient(); GlobalAddressResolver addressResolver = (GlobalAddressResolver) FieldUtils.readField(asyncDocumentClient, "addressResolver", true); @SuppressWarnings("rawtypes") Map addressCacheByEndpoint = (Map) FieldUtils.readField(addressResolver, "addressCacheByEndpoint", true); Object endpointCache = addressCacheByEndpoint.values().toArray()[0]; GatewayAddressCache addressCache = (GatewayAddressCache) FieldUtils.readField(endpointCache, "addressCache", true); HttpClient httpClient = httpClient(true); FieldUtils.writeField(addressCache, "httpClient", httpClient, true); new Thread(() -> { try { Thread.sleep(5000); HttpClient httpClient1 = httpClient(false); FieldUtils.writeField(addressCache, "httpClient", httpClient1, true); } catch (Exception e) { fail(e.getMessage()); } }).start(); PartitionKey partitionKey = new PartitionKey(internalObjectNode.get("mypk")); CosmosItemResponse<InternalObjectNode> readResourceResponse = cosmosContainer.readItem(internalObjectNode.getId(), partitionKey, new CosmosItemRequestOptions(), InternalObjectNode.class); assertThat(readResourceResponse.getDiagnostics().toString()).contains("addressResolutionStatistics"); assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"inflightRequest\":false"); assertThat(readResourceResponse.getDiagnostics().toString()).doesNotContain("endTime=\"null\""); assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"errorMessage\":null"); assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"errorMessage\":\"io.netty" + ".channel.AbstractChannel$AnnotatedConnectException: Connection refused"); } catch (Exception ex) { logger.error("Error in test addressResolutionStatistics", ex); fail("This test should not throw exception " + ex); } finally { safeDeleteSyncDatabase(cosmosDatabase); if (client1 != null) { client1.close(); } if (client2 != null) { client2.close(); } } } private InternalObjectNode getInternalObjectNode() { InternalObjectNode internalObjectNode = new InternalObjectNode(); String uuid = UUID.randomUUID().toString(); internalObjectNode.setId(uuid); BridgeInternal.setProperty(internalObjectNode, "mypk", uuid); return internalObjectNode; } private InternalObjectNode getInternalObjectNode(String pkValue) { InternalObjectNode internalObjectNode = new InternalObjectNode(); String uuid = UUID.randomUUID().toString(); internalObjectNode.setId(uuid); BridgeInternal.setProperty(internalObjectNode, "mypk", pkValue); return internalObjectNode; } private List<ClientSideRequestStatistics.StoreResponseStatistics> getStoreResponseStatistics(ClientSideRequestStatistics requestStatistics) throws Exception { Field storeResponseStatisticsField = ClientSideRequestStatistics.class.getDeclaredField("supplementalResponseStatisticsList"); storeResponseStatisticsField.setAccessible(true); @SuppressWarnings({"unchecked"}) List<ClientSideRequestStatistics.StoreResponseStatistics> list = (List<ClientSideRequestStatistics.StoreResponseStatistics>) storeResponseStatisticsField.get(requestStatistics); return list; } private void clearStoreResponseStatistics(ClientSideRequestStatistics requestStatistics) throws Exception { Field storeResponseStatisticsField = ClientSideRequestStatistics.class.getDeclaredField("supplementalResponseStatisticsList"); storeResponseStatisticsField.setAccessible(true); storeResponseStatisticsField.set(requestStatistics, new ArrayList<ClientSideRequestStatistics.StoreResponseStatistics>()); } private void validateTransportRequestTimelineGateway(String diagnostics) { assertThat(diagnostics).contains("\"eventName\":\"connectionConfigured\""); assertThat(diagnostics).contains("\"eventName\":\"connectionConfigured\""); assertThat(diagnostics).contains("\"eventName\":\"requestSent\""); assertThat(diagnostics).contains("\"eventName\":\"transitTime\""); assertThat(diagnostics).contains("\"eventName\":\"received\""); } private void validateTransportRequestTimelineDirect(String diagnostics) { assertThat(diagnostics).contains("\"eventName\":\"created\""); assertThat(diagnostics).contains("\"eventName\":\"queued\""); assertThat(diagnostics).contains("\"eventName\":\"channelAcquisitionStarted\""); assertThat(diagnostics).contains("\"eventName\":\"pipelined\""); assertThat(diagnostics).contains("\"eventName\":\"transitTime\""); assertThat(diagnostics).contains("\"eventName\":\"received\""); assertThat(diagnostics).contains("\"eventName\":\"completed\""); assertThat(diagnostics).contains("\"startTimeUTC\""); assertThat(diagnostics).contains("\"durationInMicroSec\""); } public void isValidJSON(final String json) { try { final JsonParser parser = new ObjectMapper().createParser(json); while (parser.nextToken() != null) { } } catch (IOException ex) { fail("Diagnostic string is not in json format ", ex); } } private HttpClient httpClient(boolean fakeProxy) { HttpClientConfig httpClientConfig; if(fakeProxy) { httpClientConfig = new HttpClientConfig(new Configs()) .withProxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress("localhost", 8888))); } else { httpClientConfig = new HttpClientConfig(new Configs()); } return HttpClient.createFixed(httpClientConfig); } private IndexUtilizationInfo createFromJSONString(String jsonString) { ObjectMapper indexUtilizationInfoObjectMapper = new ObjectMapper(); IndexUtilizationInfo indexUtilizationInfo = null; try { indexUtilizationInfo = indexUtilizationInfoObjectMapper.readValue(jsonString, IndexUtilizationInfo.class); } catch (JsonProcessingException e) { logger.error("Json not correctly formed ", e); } return indexUtilizationInfo; } private void validateRegionContacted(CosmosDiagnostics cosmosDiagnostics, CosmosAsyncClient cosmosAsyncClient) throws Exception { RxDocumentClientImpl rxDocumentClient = (RxDocumentClientImpl) ReflectionUtils.getAsyncDocumentClient(cosmosAsyncClient); GlobalEndpointManager globalEndpointManager = ReflectionUtils.getGlobalEndpointManager(rxDocumentClient); LocationCache locationCache = ReflectionUtils.getLocationCache(globalEndpointManager); Field locationInfoField = LocationCache.class.getDeclaredField("locationInfo"); locationInfoField.setAccessible(true); Object locationInfo = locationInfoField.get(locationCache); Class<?> DatabaseAccountLocationsInfoClass = Class.forName("com.azure.cosmos.implementation.routing" + ".LocationCache$DatabaseAccountLocationsInfo"); Field availableWriteEndpointByLocation = DatabaseAccountLocationsInfoClass.getDeclaredField( "availableWriteEndpointByLocation"); availableWriteEndpointByLocation.setAccessible(true); @SuppressWarnings("unchecked") Map<String, URI> map = (Map<String, URI>) availableWriteEndpointByLocation.get(locationInfo); String regionName = map.keySet().iterator().next(); assertThat(cosmosDiagnostics.getContactedRegionNames().size()).isEqualTo(1); assertThat(cosmosDiagnostics.getContactedRegionNames().iterator().next()).isEqualTo(regionName.toLowerCase()); } public static class TestItem { public String id; public String mypk; public TestItem() { } } }
Great point, will fix the test by counting the partition key range ids.
public void queryDiagnosticsOnOrderBy() { String containerId = "testcontainer"; cosmosAsyncDatabase.createContainer(containerId, "/mypk", ThroughputProperties.createManualThroughput(40000)).block(); CosmosAsyncContainer testcontainer = cosmosAsyncDatabase.getContainer(containerId); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); testcontainer.createItem(getInternalObjectNode()).block(); options.setMaxDegreeOfParallelism(-1); String query = "SELECT * from c ORDER BY c._ts DESC"; CosmosPagedFlux<InternalObjectNode> cosmosPagedFlux = testcontainer.queryItems(query, options, InternalObjectNode.class); AtomicInteger counterPkRid = new AtomicInteger(); AtomicInteger counterPartitionKeyRangeId = new AtomicInteger(); cosmosPagedFlux.byPage().flatMap(feedResponse -> { String cosmosDiagnosticsString = feedResponse.getCosmosDiagnostics().toString(); Pattern pattern = Pattern.compile("\"partitionKeyRangeId\":\""); Matcher matcher = pattern.matcher(cosmosDiagnosticsString); while (matcher.find()) { counterPartitionKeyRangeId.incrementAndGet(); } pattern = Pattern.compile("pkrId:"); matcher = pattern.matcher(cosmosDiagnosticsString); while (matcher.find()) { counterPkRid.incrementAndGet(); } return Flux.just(feedResponse); }).blockLast(); assertThat(counterPkRid.get() * 2).isEqualTo(counterPartitionKeyRangeId.get()); deleteCollection(testcontainer); }
Pattern pattern = Pattern.compile("\"partitionKeyRangeId\":\"");
public void queryDiagnosticsOnOrderBy() { String containerId = "testcontainer"; cosmosAsyncDatabase.createContainer(containerId, "/mypk", ThroughputProperties.createManualThroughput(40000)).block(); CosmosAsyncContainer testcontainer = cosmosAsyncDatabase.getContainer(containerId); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setConsistencyLevel(ConsistencyLevel.EVENTUAL); testcontainer.createItem(getInternalObjectNode()).block(); options.setMaxDegreeOfParallelism(-1); String query = "SELECT * from c ORDER BY c._ts DESC"; CosmosPagedFlux<InternalObjectNode> cosmosPagedFlux = testcontainer.queryItems(query, options, InternalObjectNode.class); Set<String> partitionKeyRangeIds = new HashSet<>(); Set<String> pkRids = new HashSet<>(); cosmosPagedFlux.byPage().flatMap(feedResponse -> { String cosmosDiagnosticsString = feedResponse.getCosmosDiagnostics().toString(); Pattern pattern = Pattern.compile("(\"partitionKeyRangeId\":\")(\\d)"); Matcher matcher = pattern.matcher(cosmosDiagnosticsString); while (matcher.find()) { String group = matcher.group(2); partitionKeyRangeIds.add(group); } pattern = Pattern.compile("(pkrId:)(\\d)"); matcher = pattern.matcher(cosmosDiagnosticsString); while (matcher.find()) { String group = matcher.group(2); pkRids.add(group); } return Flux.just(feedResponse); }).blockLast(); assertThat(pkRids).isNotEmpty(); assertThat(pkRids).isEqualTo(partitionKeyRangeIds); deleteCollection(testcontainer); }
class CosmosDiagnosticsTest extends TestSuiteBase { private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private static final DateTimeFormatter RESPONSE_TIME_FORMATTER = DateTimeFormatter.ISO_INSTANT; private CosmosClient gatewayClient; private CosmosClient directClient; private CosmosAsyncDatabase cosmosAsyncDatabase; private CosmosContainer container; private CosmosAsyncContainer cosmosAsyncContainer; @BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT) public void beforeClass() { assertThat(this.gatewayClient).isNull(); gatewayClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .gatewayMode() .buildClient(); directClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); cosmosAsyncContainer = getSharedMultiPartitionCosmosContainer(this.gatewayClient.asyncClient()); cosmosAsyncDatabase = directClient.asyncClient().getDatabase(cosmosAsyncContainer.getDatabase().getId()); container = gatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); } @AfterClass(groups = {"simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { if (this.gatewayClient != null) { this.gatewayClient.close(); } if (this.directClient != null) { this.directClient.close(); } } @DataProvider(name = "query") private Object[][] query() { return new Object[][]{ new Object[] { "Select * from c where c.id = 'wrongId'", true }, new Object[] { "Select top 1 * from c where c.id = 'wrongId'", true }, new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", true }, new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", true }, new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", true }, new Object[] { "Select * from c where c.id = 'wrongId'", false }, new Object[] { "Select top 1 * from c where c.id = 'wrongId'", false }, new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", false }, new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", false }, new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", false }, new Object[] { "Select * from c where c.id = 'wrongId'", false }, new Object[] { "Select top 1 * from c where c.id = 'wrongId'", false }, new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", false }, new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", false }, new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", false }, }; } @DataProvider(name = "readAllItemsOfLogicalPartition") private Object[][] readAllItemsOfLogicalPartition() { return new Object[][]{ new Object[] { 1, true }, new Object[] { 5, null }, new Object[] { 20, null }, new Object[] { 1, false }, new Object[] { 5, false }, new Object[] { 20, false }, }; } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void gatewayDiagnostics() throws Exception { CosmosClient testGatewayClient = null; try { testGatewayClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .gatewayMode() .buildClient(); CosmosContainer container = testGatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = container.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\""); assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null")); assertThat(diagnostics).contains("\"operationType\":\"Create\""); assertThat(diagnostics).contains("\"metaDataName\":\"CONTAINER_LOOK_UP\""); assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(createResponse.getDiagnostics().getDuration()).isNotNull(); assertThat(createResponse.getDiagnostics().getContactedRegionNames()).isNotNull(); validateTransportRequestTimelineGateway(diagnostics); validateRegionContacted(createResponse.getDiagnostics(), testGatewayClient.asyncClient()); isValidJSON(diagnostics); } finally { if (testGatewayClient != null) { testGatewayClient.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void gatewayDiagnosticsOnException() throws Exception { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = null; try { createResponse = this.container.createItem(internalObjectNode); CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions(); ModelBridgeInternal.setPartitionKey(cosmosItemRequestOptions, new PartitionKey("wrongPartitionKey")); CosmosItemResponse<InternalObjectNode> readResponse = this.container.readItem(BridgeInternal.getProperties(createResponse).getId(), new PartitionKey("wrongPartitionKey"), InternalObjectNode.class); fail("request should fail as partition key is wrong"); } catch (CosmosException exception) { isValidJSON(exception.toString()); isValidJSON(exception.getMessage()); String diagnostics = exception.getDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\""); assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null")); assertThat(diagnostics).contains("\"statusCode\":404"); assertThat(diagnostics).contains("\"operationType\":\"Read\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null")); assertThat(createResponse.getDiagnostics().getContactedRegionNames()).isNotNull(); validateRegionContacted(createResponse.getDiagnostics(), this.container.asyncContainer.getDatabase().getClient()); assertThat(exception.getDiagnostics().getDuration()).isNotNull(); validateTransportRequestTimelineGateway(diagnostics); isValidJSON(diagnostics); } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void systemDiagnosticsForSystemStateInformation() { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = this.container.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("systemInformation"); assertThat(diagnostics).contains("usedMemory"); assertThat(diagnostics).contains("availableMemory"); assertThat(diagnostics).contains("systemCpuLoad"); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(createResponse.getDiagnostics().getDuration()).isNotNull(); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void directDiagnostics() throws Exception { CosmosClient testDirectClient = null; try { testDirectClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); CosmosContainer cosmosContainer = testDirectClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).contains("supplementalResponseStatisticsList"); assertThat(diagnostics).contains("\"gatewayStatistics\":null"); assertThat(diagnostics).contains("addressResolutionStatistics"); assertThat(diagnostics).contains("\"metaDataName\":\"CONTAINER_LOOK_UP\""); assertThat(diagnostics).contains("\"metaDataName\":\"PARTITION_KEY_RANGE_LOOK_UP\""); assertThat(diagnostics).contains("\"metaDataName\":\"SERVER_ADDRESS_LOOKUP\""); assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(diagnostics).contains("\"backendLatencyInMs\""); assertThat(createResponse.getDiagnostics().getContactedRegionNames()).isNotEmpty(); assertThat(createResponse.getDiagnostics().getDuration()).isNotNull(); validateTransportRequestTimelineDirect(diagnostics); validateRegionContacted(createResponse.getDiagnostics(), testDirectClient.asyncClient()); isValidJSON(diagnostics); try { cosmosContainer.createItem(internalObjectNode); fail("expected 409"); } catch (CosmosException e) { diagnostics = e.getDiagnostics().toString(); assertThat(diagnostics).contains("\"backendLatencyInMs\""); validateTransportRequestTimelineDirect(e.getDiagnostics().toString()); } } finally { if (testDirectClient != null) { testDirectClient.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryPlanDiagnostics() throws JsonProcessingException { CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); List<String> itemIdList = new ArrayList<>(); for(int i = 0; i< 100; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); if(i%20 == 0) { itemIdList.add(internalObjectNode.getId()); } } String queryDiagnostics = null; List<String> queryList = new ArrayList<>(); queryList.add("Select * from c"); StringBuilder queryBuilder = new StringBuilder("SELECT * from c where c.mypk in ("); for(int i = 0 ; i < itemIdList.size(); i++){ queryBuilder.append("'").append(itemIdList.get(i)).append("'"); if(i < (itemIdList.size()-1)) { queryBuilder.append(","); } else { queryBuilder.append(")"); } } queryList.add(queryBuilder.toString()); queryList.add("Select * from c where c.id = 'wrongId'"); for(String query : queryList) { int feedResponseCounter = 0; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setQueryMetricsEnabled(true); Iterator<FeedResponse<InternalObjectNode>> iterator = cosmosContainer.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); if (feedResponseCounter == 0) { assertThat(queryDiagnostics).contains("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan Duration (ms)="); String requestTimeLine = OBJECT_MAPPER.writeValueAsString(feedResponse.getCosmosDiagnostics().getFeedResponseDiagnostics().getQueryPlanDiagnosticsContext().getRequestTimeline()); assertThat(requestTimeLine).contains("connectionConfigured"); assertThat(requestTimeLine).contains("requestSent"); assertThat(requestTimeLine).contains("transitTime"); assertThat(requestTimeLine).contains("received"); } else { assertThat(queryDiagnostics).doesNotContain("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan Duration (ms)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan RequestTimeline ="); } feedResponseCounter++; } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryMetricsWithIndexMetrics() { CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); List<String> itemIdList = new ArrayList<>(); for(int i = 0; i< 100; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); if(i%20 == 0) { itemIdList.add(internalObjectNode.getId()); } } String queryDiagnostics = null; List<String> queryList = new ArrayList<>(); StringBuilder queryBuilder = new StringBuilder("SELECT * from c where c.mypk in ("); for(int i = 0 ; i < itemIdList.size(); i++){ queryBuilder.append("'").append(itemIdList.get(i)).append("'"); if(i < (itemIdList.size()-1)) { queryBuilder.append(","); } else { queryBuilder.append(")"); } } queryList.add(queryBuilder.toString()); for (String query : queryList) { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setQueryMetricsEnabled(true); options.setIndexMetricsEnabled(true); Iterator<FeedResponse<InternalObjectNode>> iterator = cosmosContainer.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); logger.info("This is query diagnostics {}", queryDiagnostics); if (feedResponse.getResponseHeaders().containsKey(HttpConstants.HttpHeaders.INDEX_UTILIZATION)) { assertThat(feedResponse.getResponseHeaders().get(HttpConstants.HttpHeaders.INDEX_UTILIZATION)).isNotNull(); assertThat(createFromJSONString(Utils.decodeBase64String(feedResponse.getResponseHeaders().get(HttpConstants.HttpHeaders.INDEX_UTILIZATION))).getUtilizedSingleIndexes()).isNotNull(); } } } } @Test(groups = {"simple"}, dataProvider = "query", timeOut = TIMEOUT) public void queryMetrics(String query, Boolean qmEnabled) { CosmosContainer directContainer = this.directClient.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()) .getContainer(this.cosmosAsyncContainer.getId()); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); if (qmEnabled != null) { options.setQueryMetricsEnabled(qmEnabled); } boolean qroupByFirstResponse = true; Iterator<FeedResponse<InternalObjectNode>> iterator = directContainer.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator(); assertThat(iterator.hasNext()).isTrue(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); assertThat(feedResponse.getResults().size()).isEqualTo(0); if (!query.contains("group by") || qroupByFirstResponse) { validateQueryDiagnostics(queryDiagnostics, qmEnabled, true); validateDirectModeQueryDiagnostics(queryDiagnostics); if (query.contains("group by")) { qroupByFirstResponse = false; } } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) private void validateDirectModeQueryDiagnostics(String diagnostics) { assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).contains("supplementalResponseStatisticsList"); assertThat(diagnostics).contains("responseStatisticsList"); assertThat(diagnostics).contains("\"gatewayStatistics\":null"); assertThat(diagnostics).contains("addressResolutionStatistics"); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); } private void validateGatewayModeQueryDiagnostics(String diagnostics) { assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\""); assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null")); assertThat(diagnostics).contains("\"operationType\":\"Query\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(diagnostics).contains("\"regionsContacted\""); } @Test(groups = {"simple"}, dataProvider = "query", timeOut = TIMEOUT*2) public void queryDiagnosticsGatewayMode(String query, Boolean qmEnabled) { CosmosClient testDirectClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .gatewayMode() .buildClient(); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); CosmosContainer cosmosContainer = testDirectClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()) .getContainer(cosmosAsyncContainer.getId()); List<String> itemIdList = new ArrayList<>(); for (int i = 0; i < 100; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); if (i % 20 == 0) { itemIdList.add(internalObjectNode.getId()); } } boolean qroupByFirstResponse = true; if (qmEnabled != null) { options.setQueryMetricsEnabled(qmEnabled); } Iterator<FeedResponse<InternalObjectNode>> iterator = cosmosContainer .queryItems(query, options, InternalObjectNode.class) .iterableByPage() .iterator(); assertThat(iterator.hasNext()).isTrue(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); assertThat(feedResponse.getResults().size()).isEqualTo(0); if (!query.contains("group by") || qroupByFirstResponse) { validateQueryDiagnostics(queryDiagnostics, qmEnabled, true); validateGatewayModeQueryDiagnostics(queryDiagnostics); if (query.contains("group by")) { qroupByFirstResponse = false; } } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryMetricsWithADifferentLocale() { Locale.setDefault(Locale.GERMAN); String query = "select * from root where root.id= \"someid\""; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); Iterator<FeedResponse<InternalObjectNode>> iterator = this.container.queryItems(query, options, InternalObjectNode.class) .iterableByPage().iterator(); double requestCharge = 0; while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); requestCharge += feedResponse.getRequestCharge(); } assertThat(requestCharge).isGreaterThan(0); Locale.setDefault(Locale.ROOT); } private static void validateQueryDiagnostics( String queryDiagnostics, Boolean qmEnabled, boolean expectQueryPlanDiagnostics) { if (qmEnabled == null || qmEnabled) { assertThat(queryDiagnostics).contains("Retrieved Document Count"); assertThat(queryDiagnostics).contains("Query Preparation Times"); assertThat(queryDiagnostics).contains("Runtime Execution Times"); assertThat(queryDiagnostics).contains("Partition Execution Timeline"); } else { assertThat(queryDiagnostics).doesNotContain("Retrieved Document Count"); assertThat(queryDiagnostics).doesNotContain("Query Preparation Times"); assertThat(queryDiagnostics).doesNotContain("Runtime Execution Times"); assertThat(queryDiagnostics).doesNotContain("Partition Execution Timeline"); } if (expectQueryPlanDiagnostics) { assertThat(queryDiagnostics).contains("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan Duration (ms)="); } else { assertThat(queryDiagnostics).doesNotContain("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan Duration (ms)="); } } @Test(groups = {"simple"}, dataProvider = "readAllItemsOfLogicalPartition", timeOut = TIMEOUT) public void queryMetricsForReadAllItemsOfLogicalPartition(Integer expectedItemCount, Boolean qmEnabled) { String pkValue = UUID.randomUUID().toString(); for (int i = 0; i < expectedItemCount; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(pkValue); CosmosItemResponse<InternalObjectNode> createResponse = container.createItem(internalObjectNode); } CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); if (qmEnabled != null) { options = options.setQueryMetricsEnabled(qmEnabled); } ModelBridgeInternal.setQueryRequestOptionsMaxItemCount(options, 5); Iterator<FeedResponse<InternalObjectNode>> iterator = this.container .readAllItems( new PartitionKey(pkValue), options, InternalObjectNode.class) .iterableByPage().iterator(); assertThat(iterator.hasNext()).isTrue(); int actualItemCount = 0; while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); actualItemCount += feedResponse.getResults().size(); validateQueryDiagnostics(queryDiagnostics, qmEnabled, false); } assertThat(actualItemCount).isEqualTo(expectedItemCount); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void directDiagnosticsOnException() throws Exception { CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = null; CosmosClient client = null; try { client = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); CosmosContainer container = client.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); createResponse = container.createItem(internalObjectNode); CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions(); ModelBridgeInternal.setPartitionKey(cosmosItemRequestOptions, new PartitionKey("wrongPartitionKey")); CosmosItemResponse<InternalObjectNode> readResponse = cosmosContainer.readItem(BridgeInternal.getProperties(createResponse).getId(), new PartitionKey("wrongPartitionKey"), InternalObjectNode.class); fail("request should fail as partition key is wrong"); } catch (CosmosException exception) { isValidJSON(exception.toString()); isValidJSON(exception.getMessage()); String diagnostics = exception.getDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null")); assertThat(exception.getDiagnostics().getContactedRegionNames()).isNotEmpty(); assertThat(exception.getDiagnostics().getDuration()).isNotNull(); assertThat(diagnostics).contains("\"backendLatencyInMs\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); isValidJSON(diagnostics); validateTransportRequestTimelineDirect(diagnostics); validateRegionContacted(createResponse.getDiagnostics(), client.asyncClient()); } finally { if (client != null) { client.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void directDiagnosticsOnMetadataException() { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosClient client = null; try { client = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); CosmosContainer container = client.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); HttpClient mockHttpClient = Mockito.mock(HttpClient.class); Mockito.when(mockHttpClient.send(Mockito.any(HttpRequest.class), Mockito.any(Duration.class))) .thenReturn(Mono.error(new CosmosException(400, "TestBadRequest"))); RxStoreModel rxGatewayStoreModel = rxGatewayStoreModel = ReflectionUtils.getGatewayProxy((RxDocumentClientImpl) client.asyncClient().getDocClientWrapper()); ReflectionUtils.setGatewayHttpClient(rxGatewayStoreModel, mockHttpClient); container.createItem(internalObjectNode); fail("request should fail as bad request"); } catch (CosmosException exception) { isValidJSON(exception.toString()); isValidJSON(exception.getMessage()); String diagnostics = exception.getDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.BADREQUEST); assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null")); assertThat(diagnostics).contains("\"resourceType\":\"DocumentCollection\""); assertThat(exception.getDiagnostics().getContactedRegionNames()).isNotEmpty(); assertThat(exception.getDiagnostics().getDuration()).isNotNull(); isValidJSON(diagnostics); } finally { if (client != null) { client.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void supplementalResponseStatisticsList() throws Exception { ClientSideRequestStatistics clientSideRequestStatistics = new ClientSideRequestStatistics(mockDiagnosticsClientContext(),null); for (int i = 0; i < 15; i++) { RxDocumentServiceRequest rxDocumentServiceRequest = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), OperationType.Head, ResourceType.Document); clientSideRequestStatistics.recordResponse(rxDocumentServiceRequest, null); } List<ClientSideRequestStatistics.StoreResponseStatistics> storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics); ObjectMapper objectMapper = new ObjectMapper(); String diagnostics = objectMapper.writeValueAsString(clientSideRequestStatistics); JsonNode jsonNode = objectMapper.readTree(diagnostics); ArrayNode supplementalResponseStatisticsListNode = (ArrayNode) jsonNode.get("supplementalResponseStatisticsList"); assertThat(storeResponseStatistics.size()).isEqualTo(15); assertThat(supplementalResponseStatisticsListNode.size()).isEqualTo(10); clearStoreResponseStatistics(clientSideRequestStatistics); storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics); assertThat(storeResponseStatistics.size()).isEqualTo(0); for (int i = 0; i < 7; i++) { RxDocumentServiceRequest rxDocumentServiceRequest = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), OperationType.Head, ResourceType.Document); clientSideRequestStatistics.recordResponse(rxDocumentServiceRequest, null); } storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics); objectMapper = new ObjectMapper(); diagnostics = objectMapper.writeValueAsString(clientSideRequestStatistics); jsonNode = objectMapper.readTree(diagnostics); supplementalResponseStatisticsListNode = (ArrayNode) jsonNode.get("supplementalResponseStatisticsList"); assertThat(storeResponseStatistics.size()).isEqualTo(7); assertThat(supplementalResponseStatisticsListNode.size()).isEqualTo(7); for(JsonNode node : supplementalResponseStatisticsListNode) { assertThat(node.get("storeResult").asText()).isNotNull(); String requestResponseTimeUTC = node.get("requestResponseTimeUTC").asText(); Instant instant = Instant.from(RESPONSE_TIME_FORMATTER.parse(requestResponseTimeUTC)); assertThat(Instant.now().toEpochMilli() - instant.toEpochMilli()).isLessThan(5000); assertThat(node.get("requestResponseTimeUTC")).isNotNull(); assertThat(node.get("requestOperationType")).isNotNull(); assertThat(node.get("requestOperationType")).isNotNull(); } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void serializationOnVariousScenarios() { CosmosDatabaseResponse cosmosDatabase = gatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).read(); String diagnostics = cosmosDatabase.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"DATABASE_DESERIALIZATION\""); CosmosContainerResponse containerResponse = this.container.read(); diagnostics = containerResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"CONTAINER_DESERIALIZATION\""); TestItem testItem = new TestItem(); testItem.id = "TestId"; testItem.mypk = "TestPk"; CosmosItemResponse<TestItem> itemResponse = this.container.createItem(testItem); diagnostics = itemResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); testItem.id = "TestId2"; testItem.mypk = "TestPk"; itemResponse = this.container.createItem(testItem, new PartitionKey("TestPk"), null); diagnostics = itemResponse.getDiagnostics().toString(); assertThat(diagnostics).doesNotContain("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); assertThat(diagnostics).doesNotContain("\"serializationType\":\"ITEM_DESERIALIZATION\""); TestItem readTestItem = itemResponse.getItem(); diagnostics = itemResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"ITEM_DESERIALIZATION\""); CosmosItemResponse<InternalObjectNode> readItemResponse = this.container.readItem(testItem.id, new PartitionKey(testItem.mypk), null, InternalObjectNode.class); InternalObjectNode properties = readItemResponse.getItem(); diagnostics = readItemResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"ITEM_DESERIALIZATION\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void rntbdRequestResponseLengthStatistics() throws Exception { TestItem testItem = new TestItem(); testItem.id = UUID.randomUUID().toString(); testItem.mypk = UUID.randomUUID().toString(); int testItemLength = OBJECT_MAPPER.writeValueAsBytes(testItem).length; CosmosContainer container = directClient.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()).getContainer(this.cosmosAsyncContainer.getId()); CosmosItemResponse<TestItem> createItemResponse = container.createItem(testItem); validate(createItemResponse.getDiagnostics(), testItemLength, ModelBridgeInternal.getPayloadLength(createItemResponse)); try { container.createItem(testItem); fail("expected to fail due to 409"); } catch (CosmosException e) { validate(e.getDiagnostics(), testItemLength, 0); } CosmosItemResponse<TestItem> readItemResponse = container.readItem(testItem.id, new PartitionKey(testItem.mypk), TestItem.class); validate(readItemResponse.getDiagnostics(), 0, ModelBridgeInternal.getPayloadLength(readItemResponse)); CosmosItemResponse<Object> deleteItemResponse = container.deleteItem(testItem, null); validate(deleteItemResponse.getDiagnostics(), 0, 0); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void rntbdStatistics() throws Exception { Instant beforeClientInitialization = Instant.now(); CosmosClient client1 = null; try { client1 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .directMode() .buildClient(); TestItem testItem = new TestItem(); testItem.id = UUID.randomUUID().toString(); testItem.mypk = UUID.randomUUID().toString(); int testItemLength = OBJECT_MAPPER.writeValueAsBytes(testItem).length; CosmosContainer container = client1.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()).getContainer(this.cosmosAsyncContainer.getId()); Thread.sleep(1000); Instant beforeInitializingRntbdServiceEndpoint = Instant.now(); CosmosItemResponse<TestItem> operation1Response = container.upsertItem(testItem); Instant afterInitializingRntbdServiceEndpoint = Instant.now(); Thread.sleep(1000); Instant beforeOperation2 = Instant.now(); CosmosItemResponse<TestItem> operation2Response = container.upsertItem(testItem); Instant afterOperation2 = Instant.now(); Thread.sleep(1000); Instant beforeOperation3 = Instant.now(); CosmosItemResponse<TestItem> operation3Response = container.upsertItem(testItem); Instant afterOperation3 = Instant.now(); validateRntbdStatistics(operation3Response.getDiagnostics(), beforeClientInitialization, beforeInitializingRntbdServiceEndpoint, afterInitializingRntbdServiceEndpoint, beforeOperation2, afterOperation2, beforeOperation3, afterOperation3); CosmosItemResponse<TestItem> readItemResponse = container.readItem(testItem.id, new PartitionKey(testItem.mypk), TestItem.class); validate(readItemResponse.getDiagnostics(), 0, ModelBridgeInternal.getPayloadLength(readItemResponse)); CosmosItemResponse<Object> deleteItemResponse = container.deleteItem(testItem, null); validate(deleteItemResponse.getDiagnostics(), 0, 0); } finally { LifeCycleUtils.closeQuietly(client1); } } private void validateRntbdStatistics(CosmosDiagnostics cosmosDiagnostics, Instant clientInitializationTime, Instant beforeInitializingRntbdServiceEndpoint, Instant afterInitializingRntbdServiceEndpoint, Instant beforeOperation2, Instant afterOperation2, Instant beforeOperation3, Instant afterOperation3) throws Exception { ObjectNode diagnostics = (ObjectNode) OBJECT_MAPPER.readTree(cosmosDiagnostics.toString()); JsonNode responseStatisticsList = diagnostics.get("responseStatisticsList"); assertThat(responseStatisticsList.isArray()).isTrue(); assertThat(responseStatisticsList.size()).isGreaterThan(0); JsonNode storeResult = responseStatisticsList.get(0).get("storeResult"); assertThat(storeResult).isNotNull(); boolean hasPayload = storeResult.get("exception").isNull(); assertThat(storeResult.get("channelTaskQueueSize").asInt(-1)).isGreaterThan(0); assertThat(storeResult.get("pendingRequestsCount").asInt(-1)).isGreaterThanOrEqualTo(0); JsonNode serviceEndpointStatistics = storeResult.get("serviceEndpointStatistics"); assertThat(serviceEndpointStatistics).isNotNull(); assertThat(serviceEndpointStatistics.get("availableChannels").asInt(-1)).isGreaterThan(0); assertThat(serviceEndpointStatistics.get("acquiredChannels").asInt(-1)).isEqualTo(0); assertThat(serviceEndpointStatistics.get("inflightRequests").asInt(-1)).isEqualTo(1); assertThat(serviceEndpointStatistics.get("isClosed").asBoolean()).isEqualTo(false); Instant beforeInitializationThreshold = beforeInitializingRntbdServiceEndpoint.minusMillis(1); assertThat(Instant.parse(serviceEndpointStatistics.get("createdTime").asText())) .isAfterOrEqualTo(beforeInitializationThreshold); Instant afterInitializationThreshold = afterInitializingRntbdServiceEndpoint.plusMillis(2); assertThat(Instant.parse(serviceEndpointStatistics.get("createdTime").asText())) .isBeforeOrEqualTo(afterInitializationThreshold); Instant afterOperation2Threshold = afterOperation2.plusMillis(2); Instant beforeOperation2Threshold = beforeOperation2.minusMillis(2); assertThat(Instant.parse(serviceEndpointStatistics.get("lastRequestTime").asText())) .isAfterOrEqualTo(beforeOperation2Threshold) .isBeforeOrEqualTo(afterOperation2Threshold); assertThat(Instant.parse(serviceEndpointStatistics.get("lastSuccessfulRequestTime").asText())) .isAfterOrEqualTo(beforeOperation2Threshold) .isBeforeOrEqualTo(afterOperation2Threshold); } private void validate(CosmosDiagnostics cosmosDiagnostics, int expectedRequestPayloadSize, int expectedResponsePayloadSize) throws Exception { ObjectNode diagnostics = (ObjectNode) OBJECT_MAPPER.readTree(cosmosDiagnostics.toString()); JsonNode responseStatisticsList = diagnostics.get("responseStatisticsList"); assertThat(responseStatisticsList.isArray()).isTrue(); assertThat(responseStatisticsList.size()).isGreaterThan(0); JsonNode storeResult = responseStatisticsList.get(0).get("storeResult"); boolean hasPayload = storeResult.get("exception").isNull(); assertThat(storeResult).isNotNull(); assertThat(storeResult.get("rntbdRequestLengthInBytes").asInt(-1)).isGreaterThan(expectedRequestPayloadSize); assertThat(storeResult.get("rntbdRequestLengthInBytes").asInt(-1)).isGreaterThan(expectedRequestPayloadSize); assertThat(storeResult.get("requestPayloadLengthInBytes").asInt(-1)).isEqualTo(expectedRequestPayloadSize); if (hasPayload) { assertThat(storeResult.get("responsePayloadLengthInBytes").asInt(-1)).isEqualTo(expectedResponsePayloadSize); } assertThat(storeResult.get("rntbdResponseLengthInBytes").asInt(-1)).isGreaterThan(expectedResponsePayloadSize); } @Test(groups = {"emulator"}, timeOut = TIMEOUT) public void addressResolutionStatistics() { CosmosClient client1 = null; CosmosClient client2 = null; String databaseId = DatabaseForTest.generateId(); String containerId = UUID.randomUUID().toString(); CosmosDatabase cosmosDatabase = null; CosmosContainer cosmosContainer = null; try { client1 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); client1.createDatabase(databaseId); cosmosDatabase = client1.getDatabase(databaseId); cosmosDatabase.createContainer(containerId, "/mypk"); InternalObjectNode internalObjectNode = getInternalObjectNode(); cosmosContainer = cosmosDatabase.getContainer(containerId); CosmosItemResponse<InternalObjectNode> writeResourceResponse = cosmosContainer.createItem(internalObjectNode); assertThat(writeResourceResponse.getDiagnostics().toString()).contains("addressResolutionStatistics"); assertThat(writeResourceResponse.getDiagnostics().toString()).contains("\"inflightRequest\":false"); assertThat(writeResourceResponse.getDiagnostics().toString()).doesNotContain("endTime=\"null\""); assertThat(writeResourceResponse.getDiagnostics().toString()).contains("\"errorMessage\":null"); assertThat(writeResourceResponse.getDiagnostics().toString()).doesNotContain("\"errorMessage\":\"io.netty" + ".channel.AbstractChannel$AnnotatedConnectException: Connection refused: no further information"); client2 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); cosmosDatabase = client2.getDatabase(databaseId); cosmosContainer = cosmosDatabase.getContainer(containerId); AsyncDocumentClient asyncDocumentClient = client2.asyncClient().getContextClient(); GlobalAddressResolver addressResolver = (GlobalAddressResolver) FieldUtils.readField(asyncDocumentClient, "addressResolver", true); @SuppressWarnings("rawtypes") Map addressCacheByEndpoint = (Map) FieldUtils.readField(addressResolver, "addressCacheByEndpoint", true); Object endpointCache = addressCacheByEndpoint.values().toArray()[0]; GatewayAddressCache addressCache = (GatewayAddressCache) FieldUtils.readField(endpointCache, "addressCache", true); HttpClient httpClient = httpClient(true); FieldUtils.writeField(addressCache, "httpClient", httpClient, true); new Thread(() -> { try { Thread.sleep(5000); HttpClient httpClient1 = httpClient(false); FieldUtils.writeField(addressCache, "httpClient", httpClient1, true); } catch (Exception e) { fail(e.getMessage()); } }).start(); PartitionKey partitionKey = new PartitionKey(internalObjectNode.get("mypk")); CosmosItemResponse<InternalObjectNode> readResourceResponse = cosmosContainer.readItem(internalObjectNode.getId(), partitionKey, new CosmosItemRequestOptions(), InternalObjectNode.class); assertThat(readResourceResponse.getDiagnostics().toString()).contains("addressResolutionStatistics"); assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"inflightRequest\":false"); assertThat(readResourceResponse.getDiagnostics().toString()).doesNotContain("endTime=\"null\""); assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"errorMessage\":null"); assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"errorMessage\":\"io.netty" + ".channel.AbstractChannel$AnnotatedConnectException: Connection refused"); } catch (Exception ex) { logger.error("Error in test addressResolutionStatistics", ex); fail("This test should not throw exception " + ex); } finally { safeDeleteSyncDatabase(cosmosDatabase); if (client1 != null) { client1.close(); } if (client2 != null) { client2.close(); } } } private InternalObjectNode getInternalObjectNode() { InternalObjectNode internalObjectNode = new InternalObjectNode(); String uuid = UUID.randomUUID().toString(); internalObjectNode.setId(uuid); BridgeInternal.setProperty(internalObjectNode, "mypk", uuid); return internalObjectNode; } private InternalObjectNode getInternalObjectNode(String pkValue) { InternalObjectNode internalObjectNode = new InternalObjectNode(); String uuid = UUID.randomUUID().toString(); internalObjectNode.setId(uuid); BridgeInternal.setProperty(internalObjectNode, "mypk", pkValue); return internalObjectNode; } private List<ClientSideRequestStatistics.StoreResponseStatistics> getStoreResponseStatistics(ClientSideRequestStatistics requestStatistics) throws Exception { Field storeResponseStatisticsField = ClientSideRequestStatistics.class.getDeclaredField("supplementalResponseStatisticsList"); storeResponseStatisticsField.setAccessible(true); @SuppressWarnings({"unchecked"}) List<ClientSideRequestStatistics.StoreResponseStatistics> list = (List<ClientSideRequestStatistics.StoreResponseStatistics>) storeResponseStatisticsField.get(requestStatistics); return list; } private void clearStoreResponseStatistics(ClientSideRequestStatistics requestStatistics) throws Exception { Field storeResponseStatisticsField = ClientSideRequestStatistics.class.getDeclaredField("supplementalResponseStatisticsList"); storeResponseStatisticsField.setAccessible(true); storeResponseStatisticsField.set(requestStatistics, new ArrayList<ClientSideRequestStatistics.StoreResponseStatistics>()); } private void validateTransportRequestTimelineGateway(String diagnostics) { assertThat(diagnostics).contains("\"eventName\":\"connectionConfigured\""); assertThat(diagnostics).contains("\"eventName\":\"connectionConfigured\""); assertThat(diagnostics).contains("\"eventName\":\"requestSent\""); assertThat(diagnostics).contains("\"eventName\":\"transitTime\""); assertThat(diagnostics).contains("\"eventName\":\"received\""); } private void validateTransportRequestTimelineDirect(String diagnostics) { assertThat(diagnostics).contains("\"eventName\":\"created\""); assertThat(diagnostics).contains("\"eventName\":\"queued\""); assertThat(diagnostics).contains("\"eventName\":\"channelAcquisitionStarted\""); assertThat(diagnostics).contains("\"eventName\":\"pipelined\""); assertThat(diagnostics).contains("\"eventName\":\"transitTime\""); assertThat(diagnostics).contains("\"eventName\":\"received\""); assertThat(diagnostics).contains("\"eventName\":\"completed\""); assertThat(diagnostics).contains("\"startTimeUTC\""); assertThat(diagnostics).contains("\"durationInMicroSec\""); } public void isValidJSON(final String json) { try { final JsonParser parser = new ObjectMapper().createParser(json); while (parser.nextToken() != null) { } } catch (IOException ex) { fail("Diagnostic string is not in json format ", ex); } } private HttpClient httpClient(boolean fakeProxy) { HttpClientConfig httpClientConfig; if(fakeProxy) { httpClientConfig = new HttpClientConfig(new Configs()) .withProxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress("localhost", 8888))); } else { httpClientConfig = new HttpClientConfig(new Configs()); } return HttpClient.createFixed(httpClientConfig); } private IndexUtilizationInfo createFromJSONString(String jsonString) { ObjectMapper indexUtilizationInfoObjectMapper = new ObjectMapper(); IndexUtilizationInfo indexUtilizationInfo = null; try { indexUtilizationInfo = indexUtilizationInfoObjectMapper.readValue(jsonString, IndexUtilizationInfo.class); } catch (JsonProcessingException e) { logger.error("Json not correctly formed ", e); } return indexUtilizationInfo; } private void validateRegionContacted(CosmosDiagnostics cosmosDiagnostics, CosmosAsyncClient cosmosAsyncClient) throws Exception { RxDocumentClientImpl rxDocumentClient = (RxDocumentClientImpl) ReflectionUtils.getAsyncDocumentClient(cosmosAsyncClient); GlobalEndpointManager globalEndpointManager = ReflectionUtils.getGlobalEndpointManager(rxDocumentClient); LocationCache locationCache = ReflectionUtils.getLocationCache(globalEndpointManager); Field locationInfoField = LocationCache.class.getDeclaredField("locationInfo"); locationInfoField.setAccessible(true); Object locationInfo = locationInfoField.get(locationCache); Class<?> DatabaseAccountLocationsInfoClass = Class.forName("com.azure.cosmos.implementation.routing" + ".LocationCache$DatabaseAccountLocationsInfo"); Field availableWriteEndpointByLocation = DatabaseAccountLocationsInfoClass.getDeclaredField( "availableWriteEndpointByLocation"); availableWriteEndpointByLocation.setAccessible(true); @SuppressWarnings("unchecked") Map<String, URI> map = (Map<String, URI>) availableWriteEndpointByLocation.get(locationInfo); String regionName = map.keySet().iterator().next(); assertThat(cosmosDiagnostics.getContactedRegionNames().size()).isEqualTo(1); assertThat(cosmosDiagnostics.getContactedRegionNames().iterator().next()).isEqualTo(regionName.toLowerCase()); } public static class TestItem { public String id; public String mypk; public TestItem() { } } }
class CosmosDiagnosticsTest extends TestSuiteBase { private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private static final DateTimeFormatter RESPONSE_TIME_FORMATTER = DateTimeFormatter.ISO_INSTANT; private CosmosClient gatewayClient; private CosmosClient directClient; private CosmosAsyncDatabase cosmosAsyncDatabase; private CosmosContainer container; private CosmosAsyncContainer cosmosAsyncContainer; @BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT) public void beforeClass() { assertThat(this.gatewayClient).isNull(); gatewayClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .gatewayMode() .buildClient(); directClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); cosmosAsyncContainer = getSharedMultiPartitionCosmosContainer(this.gatewayClient.asyncClient()); cosmosAsyncDatabase = directClient.asyncClient().getDatabase(cosmosAsyncContainer.getDatabase().getId()); container = gatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); } @AfterClass(groups = {"simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { if (this.gatewayClient != null) { this.gatewayClient.close(); } if (this.directClient != null) { this.directClient.close(); } } @DataProvider(name = "query") private Object[][] query() { return new Object[][]{ new Object[] { "Select * from c where c.id = 'wrongId'", true }, new Object[] { "Select top 1 * from c where c.id = 'wrongId'", true }, new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", true }, new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", true }, new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", true }, new Object[] { "Select * from c where c.id = 'wrongId'", false }, new Object[] { "Select top 1 * from c where c.id = 'wrongId'", false }, new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", false }, new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", false }, new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", false }, new Object[] { "Select * from c where c.id = 'wrongId'", false }, new Object[] { "Select top 1 * from c where c.id = 'wrongId'", false }, new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", false }, new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", false }, new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", false }, }; } @DataProvider(name = "readAllItemsOfLogicalPartition") private Object[][] readAllItemsOfLogicalPartition() { return new Object[][]{ new Object[] { 1, true }, new Object[] { 5, null }, new Object[] { 20, null }, new Object[] { 1, false }, new Object[] { 5, false }, new Object[] { 20, false }, }; } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void gatewayDiagnostics() throws Exception { CosmosClient testGatewayClient = null; try { testGatewayClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .gatewayMode() .buildClient(); CosmosContainer container = testGatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = container.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\""); assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null")); assertThat(diagnostics).contains("\"operationType\":\"Create\""); assertThat(diagnostics).contains("\"metaDataName\":\"CONTAINER_LOOK_UP\""); assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(createResponse.getDiagnostics().getDuration()).isNotNull(); assertThat(createResponse.getDiagnostics().getContactedRegionNames()).isNotNull(); validateTransportRequestTimelineGateway(diagnostics); validateRegionContacted(createResponse.getDiagnostics(), testGatewayClient.asyncClient()); isValidJSON(diagnostics); } finally { if (testGatewayClient != null) { testGatewayClient.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void gatewayDiagnosticsOnException() throws Exception { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = null; try { createResponse = this.container.createItem(internalObjectNode); CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions(); ModelBridgeInternal.setPartitionKey(cosmosItemRequestOptions, new PartitionKey("wrongPartitionKey")); CosmosItemResponse<InternalObjectNode> readResponse = this.container.readItem(BridgeInternal.getProperties(createResponse).getId(), new PartitionKey("wrongPartitionKey"), InternalObjectNode.class); fail("request should fail as partition key is wrong"); } catch (CosmosException exception) { isValidJSON(exception.toString()); isValidJSON(exception.getMessage()); String diagnostics = exception.getDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\""); assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null")); assertThat(diagnostics).contains("\"statusCode\":404"); assertThat(diagnostics).contains("\"operationType\":\"Read\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null")); assertThat(createResponse.getDiagnostics().getContactedRegionNames()).isNotNull(); validateRegionContacted(createResponse.getDiagnostics(), this.container.asyncContainer.getDatabase().getClient()); assertThat(exception.getDiagnostics().getDuration()).isNotNull(); validateTransportRequestTimelineGateway(diagnostics); isValidJSON(diagnostics); } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void systemDiagnosticsForSystemStateInformation() { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = this.container.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("systemInformation"); assertThat(diagnostics).contains("usedMemory"); assertThat(diagnostics).contains("availableMemory"); assertThat(diagnostics).contains("systemCpuLoad"); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(createResponse.getDiagnostics().getDuration()).isNotNull(); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void directDiagnostics() throws Exception { CosmosClient testDirectClient = null; try { testDirectClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); CosmosContainer cosmosContainer = testDirectClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).contains("supplementalResponseStatisticsList"); assertThat(diagnostics).contains("\"gatewayStatistics\":null"); assertThat(diagnostics).contains("addressResolutionStatistics"); assertThat(diagnostics).contains("\"metaDataName\":\"CONTAINER_LOOK_UP\""); assertThat(diagnostics).contains("\"metaDataName\":\"PARTITION_KEY_RANGE_LOOK_UP\""); assertThat(diagnostics).contains("\"metaDataName\":\"SERVER_ADDRESS_LOOKUP\""); assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(diagnostics).contains("\"backendLatencyInMs\""); assertThat(createResponse.getDiagnostics().getContactedRegionNames()).isNotEmpty(); assertThat(createResponse.getDiagnostics().getDuration()).isNotNull(); validateTransportRequestTimelineDirect(diagnostics); validateRegionContacted(createResponse.getDiagnostics(), testDirectClient.asyncClient()); isValidJSON(diagnostics); try { cosmosContainer.createItem(internalObjectNode); fail("expected 409"); } catch (CosmosException e) { diagnostics = e.getDiagnostics().toString(); assertThat(diagnostics).contains("\"backendLatencyInMs\""); validateTransportRequestTimelineDirect(e.getDiagnostics().toString()); } } finally { if (testDirectClient != null) { testDirectClient.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryPlanDiagnostics() throws JsonProcessingException { CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); List<String> itemIdList = new ArrayList<>(); for(int i = 0; i< 100; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); if(i%20 == 0) { itemIdList.add(internalObjectNode.getId()); } } String queryDiagnostics = null; List<String> queryList = new ArrayList<>(); queryList.add("Select * from c"); StringBuilder queryBuilder = new StringBuilder("SELECT * from c where c.mypk in ("); for(int i = 0 ; i < itemIdList.size(); i++){ queryBuilder.append("'").append(itemIdList.get(i)).append("'"); if(i < (itemIdList.size()-1)) { queryBuilder.append(","); } else { queryBuilder.append(")"); } } queryList.add(queryBuilder.toString()); queryList.add("Select * from c where c.id = 'wrongId'"); for(String query : queryList) { int feedResponseCounter = 0; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setQueryMetricsEnabled(true); Iterator<FeedResponse<InternalObjectNode>> iterator = cosmosContainer.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); if (feedResponseCounter == 0) { assertThat(queryDiagnostics).contains("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan Duration (ms)="); String requestTimeLine = OBJECT_MAPPER.writeValueAsString(feedResponse.getCosmosDiagnostics().getFeedResponseDiagnostics().getQueryPlanDiagnosticsContext().getRequestTimeline()); assertThat(requestTimeLine).contains("connectionConfigured"); assertThat(requestTimeLine).contains("requestSent"); assertThat(requestTimeLine).contains("transitTime"); assertThat(requestTimeLine).contains("received"); } else { assertThat(queryDiagnostics).doesNotContain("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan Duration (ms)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan RequestTimeline ="); } feedResponseCounter++; } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryMetricsWithIndexMetrics() { CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); List<String> itemIdList = new ArrayList<>(); for(int i = 0; i< 100; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); if(i%20 == 0) { itemIdList.add(internalObjectNode.getId()); } } String queryDiagnostics = null; List<String> queryList = new ArrayList<>(); StringBuilder queryBuilder = new StringBuilder("SELECT * from c where c.mypk in ("); for(int i = 0 ; i < itemIdList.size(); i++){ queryBuilder.append("'").append(itemIdList.get(i)).append("'"); if(i < (itemIdList.size()-1)) { queryBuilder.append(","); } else { queryBuilder.append(")"); } } queryList.add(queryBuilder.toString()); for (String query : queryList) { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setQueryMetricsEnabled(true); options.setIndexMetricsEnabled(true); Iterator<FeedResponse<InternalObjectNode>> iterator = cosmosContainer.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); logger.info("This is query diagnostics {}", queryDiagnostics); if (feedResponse.getResponseHeaders().containsKey(HttpConstants.HttpHeaders.INDEX_UTILIZATION)) { assertThat(feedResponse.getResponseHeaders().get(HttpConstants.HttpHeaders.INDEX_UTILIZATION)).isNotNull(); assertThat(createFromJSONString(Utils.decodeBase64String(feedResponse.getResponseHeaders().get(HttpConstants.HttpHeaders.INDEX_UTILIZATION))).getUtilizedSingleIndexes()).isNotNull(); } } } } @Test(groups = {"simple"}, dataProvider = "query", timeOut = TIMEOUT) public void queryMetrics(String query, Boolean qmEnabled) { CosmosContainer directContainer = this.directClient.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()) .getContainer(this.cosmosAsyncContainer.getId()); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); if (qmEnabled != null) { options.setQueryMetricsEnabled(qmEnabled); } boolean qroupByFirstResponse = true; Iterator<FeedResponse<InternalObjectNode>> iterator = directContainer.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator(); assertThat(iterator.hasNext()).isTrue(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); assertThat(feedResponse.getResults().size()).isEqualTo(0); if (!query.contains("group by") || qroupByFirstResponse) { validateQueryDiagnostics(queryDiagnostics, qmEnabled, true); validateDirectModeQueryDiagnostics(queryDiagnostics); if (query.contains("group by")) { qroupByFirstResponse = false; } } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) private void validateDirectModeQueryDiagnostics(String diagnostics) { assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).contains("supplementalResponseStatisticsList"); assertThat(diagnostics).contains("responseStatisticsList"); assertThat(diagnostics).contains("\"gatewayStatistics\":null"); assertThat(diagnostics).contains("addressResolutionStatistics"); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); } private void validateGatewayModeQueryDiagnostics(String diagnostics) { assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\""); assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null")); assertThat(diagnostics).contains("\"operationType\":\"Query\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(diagnostics).contains("\"regionsContacted\""); } @Test(groups = {"simple"}, dataProvider = "query", timeOut = TIMEOUT*2) public void queryDiagnosticsGatewayMode(String query, Boolean qmEnabled) { CosmosClient testDirectClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .gatewayMode() .buildClient(); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); CosmosContainer cosmosContainer = testDirectClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()) .getContainer(cosmosAsyncContainer.getId()); List<String> itemIdList = new ArrayList<>(); for (int i = 0; i < 100; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); if (i % 20 == 0) { itemIdList.add(internalObjectNode.getId()); } } boolean qroupByFirstResponse = true; if (qmEnabled != null) { options.setQueryMetricsEnabled(qmEnabled); } Iterator<FeedResponse<InternalObjectNode>> iterator = cosmosContainer .queryItems(query, options, InternalObjectNode.class) .iterableByPage() .iterator(); assertThat(iterator.hasNext()).isTrue(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); assertThat(feedResponse.getResults().size()).isEqualTo(0); if (!query.contains("group by") || qroupByFirstResponse) { validateQueryDiagnostics(queryDiagnostics, qmEnabled, true); validateGatewayModeQueryDiagnostics(queryDiagnostics); if (query.contains("group by")) { qroupByFirstResponse = false; } } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryMetricsWithADifferentLocale() { Locale.setDefault(Locale.GERMAN); String query = "select * from root where root.id= \"someid\""; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); Iterator<FeedResponse<InternalObjectNode>> iterator = this.container.queryItems(query, options, InternalObjectNode.class) .iterableByPage().iterator(); double requestCharge = 0; while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); requestCharge += feedResponse.getRequestCharge(); } assertThat(requestCharge).isGreaterThan(0); Locale.setDefault(Locale.ROOT); } private static void validateQueryDiagnostics( String queryDiagnostics, Boolean qmEnabled, boolean expectQueryPlanDiagnostics) { if (qmEnabled == null || qmEnabled) { assertThat(queryDiagnostics).contains("Retrieved Document Count"); assertThat(queryDiagnostics).contains("Query Preparation Times"); assertThat(queryDiagnostics).contains("Runtime Execution Times"); assertThat(queryDiagnostics).contains("Partition Execution Timeline"); } else { assertThat(queryDiagnostics).doesNotContain("Retrieved Document Count"); assertThat(queryDiagnostics).doesNotContain("Query Preparation Times"); assertThat(queryDiagnostics).doesNotContain("Runtime Execution Times"); assertThat(queryDiagnostics).doesNotContain("Partition Execution Timeline"); } if (expectQueryPlanDiagnostics) { assertThat(queryDiagnostics).contains("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan Duration (ms)="); } else { assertThat(queryDiagnostics).doesNotContain("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan Duration (ms)="); } } @Test(groups = {"simple"}, dataProvider = "readAllItemsOfLogicalPartition", timeOut = TIMEOUT) public void queryMetricsForReadAllItemsOfLogicalPartition(Integer expectedItemCount, Boolean qmEnabled) { String pkValue = UUID.randomUUID().toString(); for (int i = 0; i < expectedItemCount; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(pkValue); CosmosItemResponse<InternalObjectNode> createResponse = container.createItem(internalObjectNode); } CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); if (qmEnabled != null) { options = options.setQueryMetricsEnabled(qmEnabled); } ModelBridgeInternal.setQueryRequestOptionsMaxItemCount(options, 5); Iterator<FeedResponse<InternalObjectNode>> iterator = this.container .readAllItems( new PartitionKey(pkValue), options, InternalObjectNode.class) .iterableByPage().iterator(); assertThat(iterator.hasNext()).isTrue(); int actualItemCount = 0; while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); actualItemCount += feedResponse.getResults().size(); validateQueryDiagnostics(queryDiagnostics, qmEnabled, false); } assertThat(actualItemCount).isEqualTo(expectedItemCount); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void directDiagnosticsOnException() throws Exception { CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = null; CosmosClient client = null; try { client = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); CosmosContainer container = client.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); createResponse = container.createItem(internalObjectNode); CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions(); ModelBridgeInternal.setPartitionKey(cosmosItemRequestOptions, new PartitionKey("wrongPartitionKey")); CosmosItemResponse<InternalObjectNode> readResponse = cosmosContainer.readItem(BridgeInternal.getProperties(createResponse).getId(), new PartitionKey("wrongPartitionKey"), InternalObjectNode.class); fail("request should fail as partition key is wrong"); } catch (CosmosException exception) { isValidJSON(exception.toString()); isValidJSON(exception.getMessage()); String diagnostics = exception.getDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null")); assertThat(exception.getDiagnostics().getContactedRegionNames()).isNotEmpty(); assertThat(exception.getDiagnostics().getDuration()).isNotNull(); assertThat(diagnostics).contains("\"backendLatencyInMs\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); isValidJSON(diagnostics); validateTransportRequestTimelineDirect(diagnostics); validateRegionContacted(createResponse.getDiagnostics(), client.asyncClient()); } finally { if (client != null) { client.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void directDiagnosticsOnMetadataException() { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosClient client = null; try { client = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); CosmosContainer container = client.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); HttpClient mockHttpClient = Mockito.mock(HttpClient.class); Mockito.when(mockHttpClient.send(Mockito.any(HttpRequest.class), Mockito.any(Duration.class))) .thenReturn(Mono.error(new CosmosException(400, "TestBadRequest"))); RxStoreModel rxGatewayStoreModel = rxGatewayStoreModel = ReflectionUtils.getGatewayProxy((RxDocumentClientImpl) client.asyncClient().getDocClientWrapper()); ReflectionUtils.setGatewayHttpClient(rxGatewayStoreModel, mockHttpClient); container.createItem(internalObjectNode); fail("request should fail as bad request"); } catch (CosmosException exception) { isValidJSON(exception.toString()); isValidJSON(exception.getMessage()); String diagnostics = exception.getDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.BADREQUEST); assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null")); assertThat(diagnostics).contains("\"resourceType\":\"DocumentCollection\""); assertThat(exception.getDiagnostics().getContactedRegionNames()).isNotEmpty(); assertThat(exception.getDiagnostics().getDuration()).isNotNull(); isValidJSON(diagnostics); } finally { if (client != null) { client.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void supplementalResponseStatisticsList() throws Exception { ClientSideRequestStatistics clientSideRequestStatistics = new ClientSideRequestStatistics(mockDiagnosticsClientContext(),null); for (int i = 0; i < 15; i++) { RxDocumentServiceRequest rxDocumentServiceRequest = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), OperationType.Head, ResourceType.Document); clientSideRequestStatistics.recordResponse(rxDocumentServiceRequest, null); } List<ClientSideRequestStatistics.StoreResponseStatistics> storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics); ObjectMapper objectMapper = new ObjectMapper(); String diagnostics = objectMapper.writeValueAsString(clientSideRequestStatistics); JsonNode jsonNode = objectMapper.readTree(diagnostics); ArrayNode supplementalResponseStatisticsListNode = (ArrayNode) jsonNode.get("supplementalResponseStatisticsList"); assertThat(storeResponseStatistics.size()).isEqualTo(15); assertThat(supplementalResponseStatisticsListNode.size()).isEqualTo(10); clearStoreResponseStatistics(clientSideRequestStatistics); storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics); assertThat(storeResponseStatistics.size()).isEqualTo(0); for (int i = 0; i < 7; i++) { RxDocumentServiceRequest rxDocumentServiceRequest = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), OperationType.Head, ResourceType.Document); clientSideRequestStatistics.recordResponse(rxDocumentServiceRequest, null); } storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics); objectMapper = new ObjectMapper(); diagnostics = objectMapper.writeValueAsString(clientSideRequestStatistics); jsonNode = objectMapper.readTree(diagnostics); supplementalResponseStatisticsListNode = (ArrayNode) jsonNode.get("supplementalResponseStatisticsList"); assertThat(storeResponseStatistics.size()).isEqualTo(7); assertThat(supplementalResponseStatisticsListNode.size()).isEqualTo(7); for(JsonNode node : supplementalResponseStatisticsListNode) { assertThat(node.get("storeResult").asText()).isNotNull(); String requestResponseTimeUTC = node.get("requestResponseTimeUTC").asText(); Instant instant = Instant.from(RESPONSE_TIME_FORMATTER.parse(requestResponseTimeUTC)); assertThat(Instant.now().toEpochMilli() - instant.toEpochMilli()).isLessThan(5000); assertThat(node.get("requestResponseTimeUTC")).isNotNull(); assertThat(node.get("requestOperationType")).isNotNull(); assertThat(node.get("requestOperationType")).isNotNull(); } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void serializationOnVariousScenarios() { CosmosDatabaseResponse cosmosDatabase = gatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).read(); String diagnostics = cosmosDatabase.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"DATABASE_DESERIALIZATION\""); CosmosContainerResponse containerResponse = this.container.read(); diagnostics = containerResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"CONTAINER_DESERIALIZATION\""); TestItem testItem = new TestItem(); testItem.id = "TestId"; testItem.mypk = "TestPk"; CosmosItemResponse<TestItem> itemResponse = this.container.createItem(testItem); diagnostics = itemResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); testItem.id = "TestId2"; testItem.mypk = "TestPk"; itemResponse = this.container.createItem(testItem, new PartitionKey("TestPk"), null); diagnostics = itemResponse.getDiagnostics().toString(); assertThat(diagnostics).doesNotContain("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); assertThat(diagnostics).doesNotContain("\"serializationType\":\"ITEM_DESERIALIZATION\""); TestItem readTestItem = itemResponse.getItem(); diagnostics = itemResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"ITEM_DESERIALIZATION\""); CosmosItemResponse<InternalObjectNode> readItemResponse = this.container.readItem(testItem.id, new PartitionKey(testItem.mypk), null, InternalObjectNode.class); InternalObjectNode properties = readItemResponse.getItem(); diagnostics = readItemResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"ITEM_DESERIALIZATION\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void rntbdRequestResponseLengthStatistics() throws Exception { TestItem testItem = new TestItem(); testItem.id = UUID.randomUUID().toString(); testItem.mypk = UUID.randomUUID().toString(); int testItemLength = OBJECT_MAPPER.writeValueAsBytes(testItem).length; CosmosContainer container = directClient.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()).getContainer(this.cosmosAsyncContainer.getId()); CosmosItemResponse<TestItem> createItemResponse = container.createItem(testItem); validate(createItemResponse.getDiagnostics(), testItemLength, ModelBridgeInternal.getPayloadLength(createItemResponse)); try { container.createItem(testItem); fail("expected to fail due to 409"); } catch (CosmosException e) { validate(e.getDiagnostics(), testItemLength, 0); } CosmosItemResponse<TestItem> readItemResponse = container.readItem(testItem.id, new PartitionKey(testItem.mypk), TestItem.class); validate(readItemResponse.getDiagnostics(), 0, ModelBridgeInternal.getPayloadLength(readItemResponse)); CosmosItemResponse<Object> deleteItemResponse = container.deleteItem(testItem, null); validate(deleteItemResponse.getDiagnostics(), 0, 0); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void rntbdStatistics() throws Exception { Instant beforeClientInitialization = Instant.now(); CosmosClient client1 = null; try { client1 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .directMode() .buildClient(); TestItem testItem = new TestItem(); testItem.id = UUID.randomUUID().toString(); testItem.mypk = UUID.randomUUID().toString(); int testItemLength = OBJECT_MAPPER.writeValueAsBytes(testItem).length; CosmosContainer container = client1.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()).getContainer(this.cosmosAsyncContainer.getId()); Thread.sleep(1000); Instant beforeInitializingRntbdServiceEndpoint = Instant.now(); CosmosItemResponse<TestItem> operation1Response = container.upsertItem(testItem); Instant afterInitializingRntbdServiceEndpoint = Instant.now(); Thread.sleep(1000); Instant beforeOperation2 = Instant.now(); CosmosItemResponse<TestItem> operation2Response = container.upsertItem(testItem); Instant afterOperation2 = Instant.now(); Thread.sleep(1000); Instant beforeOperation3 = Instant.now(); CosmosItemResponse<TestItem> operation3Response = container.upsertItem(testItem); Instant afterOperation3 = Instant.now(); validateRntbdStatistics(operation3Response.getDiagnostics(), beforeClientInitialization, beforeInitializingRntbdServiceEndpoint, afterInitializingRntbdServiceEndpoint, beforeOperation2, afterOperation2, beforeOperation3, afterOperation3); CosmosItemResponse<TestItem> readItemResponse = container.readItem(testItem.id, new PartitionKey(testItem.mypk), TestItem.class); validate(readItemResponse.getDiagnostics(), 0, ModelBridgeInternal.getPayloadLength(readItemResponse)); CosmosItemResponse<Object> deleteItemResponse = container.deleteItem(testItem, null); validate(deleteItemResponse.getDiagnostics(), 0, 0); } finally { LifeCycleUtils.closeQuietly(client1); } } private void validateRntbdStatistics(CosmosDiagnostics cosmosDiagnostics, Instant clientInitializationTime, Instant beforeInitializingRntbdServiceEndpoint, Instant afterInitializingRntbdServiceEndpoint, Instant beforeOperation2, Instant afterOperation2, Instant beforeOperation3, Instant afterOperation3) throws Exception { ObjectNode diagnostics = (ObjectNode) OBJECT_MAPPER.readTree(cosmosDiagnostics.toString()); JsonNode responseStatisticsList = diagnostics.get("responseStatisticsList"); assertThat(responseStatisticsList.isArray()).isTrue(); assertThat(responseStatisticsList.size()).isGreaterThan(0); JsonNode storeResult = responseStatisticsList.get(0).get("storeResult"); assertThat(storeResult).isNotNull(); boolean hasPayload = storeResult.get("exception").isNull(); assertThat(storeResult.get("channelTaskQueueSize").asInt(-1)).isGreaterThan(0); assertThat(storeResult.get("pendingRequestsCount").asInt(-1)).isGreaterThanOrEqualTo(0); JsonNode serviceEndpointStatistics = storeResult.get("serviceEndpointStatistics"); assertThat(serviceEndpointStatistics).isNotNull(); assertThat(serviceEndpointStatistics.get("availableChannels").asInt(-1)).isGreaterThan(0); assertThat(serviceEndpointStatistics.get("acquiredChannels").asInt(-1)).isEqualTo(0); assertThat(serviceEndpointStatistics.get("inflightRequests").asInt(-1)).isEqualTo(1); assertThat(serviceEndpointStatistics.get("isClosed").asBoolean()).isEqualTo(false); Instant beforeInitializationThreshold = beforeInitializingRntbdServiceEndpoint.minusMillis(1); assertThat(Instant.parse(serviceEndpointStatistics.get("createdTime").asText())) .isAfterOrEqualTo(beforeInitializationThreshold); Instant afterInitializationThreshold = afterInitializingRntbdServiceEndpoint.plusMillis(2); assertThat(Instant.parse(serviceEndpointStatistics.get("createdTime").asText())) .isBeforeOrEqualTo(afterInitializationThreshold); Instant afterOperation2Threshold = afterOperation2.plusMillis(2); Instant beforeOperation2Threshold = beforeOperation2.minusMillis(2); assertThat(Instant.parse(serviceEndpointStatistics.get("lastRequestTime").asText())) .isAfterOrEqualTo(beforeOperation2Threshold) .isBeforeOrEqualTo(afterOperation2Threshold); assertThat(Instant.parse(serviceEndpointStatistics.get("lastSuccessfulRequestTime").asText())) .isAfterOrEqualTo(beforeOperation2Threshold) .isBeforeOrEqualTo(afterOperation2Threshold); } private void validate(CosmosDiagnostics cosmosDiagnostics, int expectedRequestPayloadSize, int expectedResponsePayloadSize) throws Exception { ObjectNode diagnostics = (ObjectNode) OBJECT_MAPPER.readTree(cosmosDiagnostics.toString()); JsonNode responseStatisticsList = diagnostics.get("responseStatisticsList"); assertThat(responseStatisticsList.isArray()).isTrue(); assertThat(responseStatisticsList.size()).isGreaterThan(0); JsonNode storeResult = responseStatisticsList.get(0).get("storeResult"); boolean hasPayload = storeResult.get("exception").isNull(); assertThat(storeResult).isNotNull(); assertThat(storeResult.get("rntbdRequestLengthInBytes").asInt(-1)).isGreaterThan(expectedRequestPayloadSize); assertThat(storeResult.get("rntbdRequestLengthInBytes").asInt(-1)).isGreaterThan(expectedRequestPayloadSize); assertThat(storeResult.get("requestPayloadLengthInBytes").asInt(-1)).isEqualTo(expectedRequestPayloadSize); if (hasPayload) { assertThat(storeResult.get("responsePayloadLengthInBytes").asInt(-1)).isEqualTo(expectedResponsePayloadSize); } assertThat(storeResult.get("rntbdResponseLengthInBytes").asInt(-1)).isGreaterThan(expectedResponsePayloadSize); } @Test(groups = {"emulator"}, timeOut = TIMEOUT) public void addressResolutionStatistics() { CosmosClient client1 = null; CosmosClient client2 = null; String databaseId = DatabaseForTest.generateId(); String containerId = UUID.randomUUID().toString(); CosmosDatabase cosmosDatabase = null; CosmosContainer cosmosContainer = null; try { client1 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); client1.createDatabase(databaseId); cosmosDatabase = client1.getDatabase(databaseId); cosmosDatabase.createContainer(containerId, "/mypk"); InternalObjectNode internalObjectNode = getInternalObjectNode(); cosmosContainer = cosmosDatabase.getContainer(containerId); CosmosItemResponse<InternalObjectNode> writeResourceResponse = cosmosContainer.createItem(internalObjectNode); assertThat(writeResourceResponse.getDiagnostics().toString()).contains("addressResolutionStatistics"); assertThat(writeResourceResponse.getDiagnostics().toString()).contains("\"inflightRequest\":false"); assertThat(writeResourceResponse.getDiagnostics().toString()).doesNotContain("endTime=\"null\""); assertThat(writeResourceResponse.getDiagnostics().toString()).contains("\"errorMessage\":null"); assertThat(writeResourceResponse.getDiagnostics().toString()).doesNotContain("\"errorMessage\":\"io.netty" + ".channel.AbstractChannel$AnnotatedConnectException: Connection refused: no further information"); client2 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); cosmosDatabase = client2.getDatabase(databaseId); cosmosContainer = cosmosDatabase.getContainer(containerId); AsyncDocumentClient asyncDocumentClient = client2.asyncClient().getContextClient(); GlobalAddressResolver addressResolver = (GlobalAddressResolver) FieldUtils.readField(asyncDocumentClient, "addressResolver", true); @SuppressWarnings("rawtypes") Map addressCacheByEndpoint = (Map) FieldUtils.readField(addressResolver, "addressCacheByEndpoint", true); Object endpointCache = addressCacheByEndpoint.values().toArray()[0]; GatewayAddressCache addressCache = (GatewayAddressCache) FieldUtils.readField(endpointCache, "addressCache", true); HttpClient httpClient = httpClient(true); FieldUtils.writeField(addressCache, "httpClient", httpClient, true); new Thread(() -> { try { Thread.sleep(5000); HttpClient httpClient1 = httpClient(false); FieldUtils.writeField(addressCache, "httpClient", httpClient1, true); } catch (Exception e) { fail(e.getMessage()); } }).start(); PartitionKey partitionKey = new PartitionKey(internalObjectNode.get("mypk")); CosmosItemResponse<InternalObjectNode> readResourceResponse = cosmosContainer.readItem(internalObjectNode.getId(), partitionKey, new CosmosItemRequestOptions(), InternalObjectNode.class); assertThat(readResourceResponse.getDiagnostics().toString()).contains("addressResolutionStatistics"); assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"inflightRequest\":false"); assertThat(readResourceResponse.getDiagnostics().toString()).doesNotContain("endTime=\"null\""); assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"errorMessage\":null"); assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"errorMessage\":\"io.netty" + ".channel.AbstractChannel$AnnotatedConnectException: Connection refused"); } catch (Exception ex) { logger.error("Error in test addressResolutionStatistics", ex); fail("This test should not throw exception " + ex); } finally { safeDeleteSyncDatabase(cosmosDatabase); if (client1 != null) { client1.close(); } if (client2 != null) { client2.close(); } } } private InternalObjectNode getInternalObjectNode() { InternalObjectNode internalObjectNode = new InternalObjectNode(); String uuid = UUID.randomUUID().toString(); internalObjectNode.setId(uuid); BridgeInternal.setProperty(internalObjectNode, "mypk", uuid); return internalObjectNode; } private InternalObjectNode getInternalObjectNode(String pkValue) { InternalObjectNode internalObjectNode = new InternalObjectNode(); String uuid = UUID.randomUUID().toString(); internalObjectNode.setId(uuid); BridgeInternal.setProperty(internalObjectNode, "mypk", pkValue); return internalObjectNode; } private List<ClientSideRequestStatistics.StoreResponseStatistics> getStoreResponseStatistics(ClientSideRequestStatistics requestStatistics) throws Exception { Field storeResponseStatisticsField = ClientSideRequestStatistics.class.getDeclaredField("supplementalResponseStatisticsList"); storeResponseStatisticsField.setAccessible(true); @SuppressWarnings({"unchecked"}) List<ClientSideRequestStatistics.StoreResponseStatistics> list = (List<ClientSideRequestStatistics.StoreResponseStatistics>) storeResponseStatisticsField.get(requestStatistics); return list; } private void clearStoreResponseStatistics(ClientSideRequestStatistics requestStatistics) throws Exception { Field storeResponseStatisticsField = ClientSideRequestStatistics.class.getDeclaredField("supplementalResponseStatisticsList"); storeResponseStatisticsField.setAccessible(true); storeResponseStatisticsField.set(requestStatistics, new ArrayList<ClientSideRequestStatistics.StoreResponseStatistics>()); } private void validateTransportRequestTimelineGateway(String diagnostics) { assertThat(diagnostics).contains("\"eventName\":\"connectionConfigured\""); assertThat(diagnostics).contains("\"eventName\":\"connectionConfigured\""); assertThat(diagnostics).contains("\"eventName\":\"requestSent\""); assertThat(diagnostics).contains("\"eventName\":\"transitTime\""); assertThat(diagnostics).contains("\"eventName\":\"received\""); } private void validateTransportRequestTimelineDirect(String diagnostics) { assertThat(diagnostics).contains("\"eventName\":\"created\""); assertThat(diagnostics).contains("\"eventName\":\"queued\""); assertThat(diagnostics).contains("\"eventName\":\"channelAcquisitionStarted\""); assertThat(diagnostics).contains("\"eventName\":\"pipelined\""); assertThat(diagnostics).contains("\"eventName\":\"transitTime\""); assertThat(diagnostics).contains("\"eventName\":\"received\""); assertThat(diagnostics).contains("\"eventName\":\"completed\""); assertThat(diagnostics).contains("\"startTimeUTC\""); assertThat(diagnostics).contains("\"durationInMicroSec\""); } public void isValidJSON(final String json) { try { final JsonParser parser = new ObjectMapper().createParser(json); while (parser.nextToken() != null) { } } catch (IOException ex) { fail("Diagnostic string is not in json format ", ex); } } private HttpClient httpClient(boolean fakeProxy) { HttpClientConfig httpClientConfig; if(fakeProxy) { httpClientConfig = new HttpClientConfig(new Configs()) .withProxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress("localhost", 8888))); } else { httpClientConfig = new HttpClientConfig(new Configs()); } return HttpClient.createFixed(httpClientConfig); } private IndexUtilizationInfo createFromJSONString(String jsonString) { ObjectMapper indexUtilizationInfoObjectMapper = new ObjectMapper(); IndexUtilizationInfo indexUtilizationInfo = null; try { indexUtilizationInfo = indexUtilizationInfoObjectMapper.readValue(jsonString, IndexUtilizationInfo.class); } catch (JsonProcessingException e) { logger.error("Json not correctly formed ", e); } return indexUtilizationInfo; } private void validateRegionContacted(CosmosDiagnostics cosmosDiagnostics, CosmosAsyncClient cosmosAsyncClient) throws Exception { RxDocumentClientImpl rxDocumentClient = (RxDocumentClientImpl) ReflectionUtils.getAsyncDocumentClient(cosmosAsyncClient); GlobalEndpointManager globalEndpointManager = ReflectionUtils.getGlobalEndpointManager(rxDocumentClient); LocationCache locationCache = ReflectionUtils.getLocationCache(globalEndpointManager); Field locationInfoField = LocationCache.class.getDeclaredField("locationInfo"); locationInfoField.setAccessible(true); Object locationInfo = locationInfoField.get(locationCache); Class<?> DatabaseAccountLocationsInfoClass = Class.forName("com.azure.cosmos.implementation.routing" + ".LocationCache$DatabaseAccountLocationsInfo"); Field availableWriteEndpointByLocation = DatabaseAccountLocationsInfoClass.getDeclaredField( "availableWriteEndpointByLocation"); availableWriteEndpointByLocation.setAccessible(true); @SuppressWarnings("unchecked") Map<String, URI> map = (Map<String, URI>) availableWriteEndpointByLocation.get(locationInfo); String regionName = map.keySet().iterator().next(); assertThat(cosmosDiagnostics.getContactedRegionNames().size()).isEqualTo(1); assertThat(cosmosDiagnostics.getContactedRegionNames().iterator().next()).isEqualTo(regionName.toLowerCase()); } public static class TestItem { public String id; public String mypk; public TestItem() { } } }
For reviewers, I'm not sure the approach of calculating the size of described type is correct or not. The serialization of message works well. If there are something wrong, please let me change.
private static int sizeof(Object obj) { if (obj == null) { return 0; } if (obj instanceof String) { return obj.toString().length() << 1; } if (obj instanceof Symbol) { return ((Symbol) obj).length() << 1; } if (obj instanceof Byte || obj instanceof UnsignedByte) { return Byte.BYTES; } if (obj instanceof Integer || obj instanceof UnsignedInteger) { return Integer.BYTES; } if (obj instanceof Long || obj instanceof UnsignedLong || obj instanceof Date) { return Long.BYTES; } if (obj instanceof Short || obj instanceof UnsignedShort) { return Short.BYTES; } if (obj instanceof Boolean) { return 1; } if (obj instanceof Character) { return 4; } if (obj instanceof Float) { return Float.BYTES; } if (obj instanceof Double) { return Double.BYTES; } if (obj instanceof UUID) { return 16; } if (obj instanceof Decimal32) { return 4; } if (obj instanceof Decimal64) { return 8; } if (obj instanceof Decimal128) { return 16; } if (obj instanceof Binary) { return ((Binary) obj).getLength(); } if (obj instanceof Declare) { return 7; } if (obj instanceof Discharge) { Discharge discharge = (Discharge) obj; return 12 + discharge.getTxnId().getLength(); } if (obj instanceof ServiceBusDescribedType) { ServiceBusDescribedType describedType = (ServiceBusDescribedType) obj; int size = ((Symbol) describedType.getDescriptor()).length() << 1; if (describedType.getDescribed() instanceof String) { size += ((String) describedType.getDescribed()).length(); } else if (describedType.getDescribed() instanceof Long) { size += Long.BYTES; } return size; } if (obj instanceof Map) { int size = 8; Map map = (Map) obj; for (Object value : map.keySet()) { size += sizeof(value); } for (Object value : map.values()) { size += sizeof(value); } return size; } if (obj instanceof Iterable) { int size = 8; for (Object innerObject : (Iterable) obj) { size += sizeof(innerObject); } return size; } if (obj.getClass().isArray()) { int size = 8; int length = Array.getLength(obj); for (int i = 0; i < length; i++) { size += sizeof(Array.get(obj, i)); } return size; } throw new IllegalArgumentException(String.format(Locale.US, "Encoding Type: %s is not supported", obj.getClass())); }
}
private static int sizeof(Object obj) { if (obj == null) { return 0; } if (obj instanceof String) { return obj.toString().length() << 1; } if (obj instanceof Symbol) { return ((Symbol) obj).length() << 1; } if (obj instanceof Byte || obj instanceof UnsignedByte) { return Byte.BYTES; } if (obj instanceof Integer || obj instanceof UnsignedInteger) { return Integer.BYTES; } if (obj instanceof Long || obj instanceof UnsignedLong || obj instanceof Date) { return Long.BYTES; } if (obj instanceof Short || obj instanceof UnsignedShort) { return Short.BYTES; } if (obj instanceof Boolean) { return 1; } if (obj instanceof Character) { return 4; } if (obj instanceof Float) { return Float.BYTES; } if (obj instanceof Double) { return Double.BYTES; } if (obj instanceof UUID) { return 16; } if (obj instanceof Decimal32) { return 4; } if (obj instanceof Decimal64) { return 8; } if (obj instanceof Decimal128) { return 16; } if (obj instanceof Binary) { return ((Binary) obj).getLength(); } if (obj instanceof Declare) { return 7; } if (obj instanceof Discharge) { Discharge discharge = (Discharge) obj; return 12 + discharge.getTxnId().getLength(); } if (obj instanceof ServiceBusDescribedType) { ServiceBusDescribedType describedType = (ServiceBusDescribedType) obj; return describedType.size(); } if (obj instanceof Map) { int size = 8; Map map = (Map) obj; for (Object value : map.keySet()) { size += sizeof(value); } for (Object value : map.values()) { size += sizeof(value); } return size; } if (obj instanceof Iterable) { int size = 8; for (Object innerObject : (Iterable) obj) { size += sizeof(innerObject); } return size; } if (obj.getClass().isArray()) { int size = 8; int length = Array.getLength(obj); for (int i = 0; i < length; i++) { size += sizeof(Array.get(obj, i)); } return size; } throw new IllegalArgumentException(String.format(Locale.US, "Encoding Type: %s is not supported", obj.getClass())); }
class ServiceBusMessageSerializer implements MessageSerializer { private static final byte[] EMPTY_BYTE_ARRAY = new byte[0]; private final ClientLogger logger = new ClientLogger(ServiceBusMessageSerializer.class); /** * Gets the serialized size of the AMQP message. */ @Override public int getSize(Message amqpMessage) { if (amqpMessage == null) { return 0; } int payloadSize = getPayloadSize(amqpMessage); final MessageAnnotations messageAnnotations = amqpMessage.getMessageAnnotations(); final ApplicationProperties applicationProperties = amqpMessage.getApplicationProperties(); int annotationsSize = 0; int applicationPropertiesSize = 0; if (messageAnnotations != null) { final Map<Symbol, Object> map = messageAnnotations.getValue(); for (Map.Entry<Symbol, Object> entry : map.entrySet()) { final int size = sizeof(entry.getKey()) + sizeof(entry.getValue()); annotationsSize += size; } } if (applicationProperties != null) { final Map<String, Object> map = applicationProperties.getValue(); for (Map.Entry<String, Object> entry : map.entrySet()) { final int size = sizeof(entry.getKey()) + sizeof(entry.getValue()); applicationPropertiesSize += size; } } return annotationsSize + applicationPropertiesSize + payloadSize; } /** * Creates the AMQP message represented by this {@code object}. Currently, only supports serializing {@link * ServiceBusMessage}. * * @param object Concrete object to deserialize. * * @return A new AMQP message for this {@code object}. * * @throws IllegalArgumentException if {@code object} is not an instance of {@link ServiceBusMessage}. */ @Override public <T> Message serialize(T object) { Objects.requireNonNull(object, "'object' to serialize cannot be null."); if (!(object instanceof ServiceBusMessage)) { throw logger.logExceptionAsError(new IllegalArgumentException( "Cannot serialize object that is not ServiceBusMessage. Clazz: " + object.getClass())); } final ServiceBusMessage brokeredMessage = (ServiceBusMessage) object; AmqpMessageBodyType brokeredBodyType = brokeredMessage.getRawAmqpMessage().getBody().getBodyType(); final Message amqpMessage = Proton.message(); byte[] body; if (brokeredBodyType == AmqpMessageBodyType.DATA || brokeredBodyType == null) { body = brokeredMessage.getBody().toBytes(); amqpMessage.setBody(new Data(new Binary(body))); } else if (brokeredBodyType == AmqpMessageBodyType.SEQUENCE) { List<Object> sequenceList = brokeredMessage.getRawAmqpMessage().getBody().getSequence(); amqpMessage.setBody(new AmqpSequence(sequenceList)); } else if (brokeredBodyType == AmqpMessageBodyType.VALUE) { amqpMessage.setBody(new AmqpValue(brokeredMessage.getRawAmqpMessage().getBody().getValue())); } if (brokeredMessage.getApplicationProperties() != null) { amqpMessage.setApplicationProperties(new ApplicationProperties(convertToDescribedType(brokeredMessage.getApplicationProperties()))); } if (brokeredMessage.getTimeToLive() != null) { amqpMessage.setTtl(brokeredMessage.getTimeToLive().toMillis()); } if (amqpMessage.getProperties() == null) { amqpMessage.setProperties(new Properties()); } amqpMessage.setMessageId(brokeredMessage.getMessageId()); amqpMessage.setContentType(brokeredMessage.getContentType()); amqpMessage.setCorrelationId(brokeredMessage.getCorrelationId()); amqpMessage.setSubject(brokeredMessage.getSubject()); amqpMessage.setReplyTo(brokeredMessage.getReplyTo()); amqpMessage.setReplyToGroupId(brokeredMessage.getReplyToSessionId()); amqpMessage.setGroupId(brokeredMessage.getSessionId()); final AmqpMessageProperties brokeredProperties = brokeredMessage.getRawAmqpMessage().getProperties(); amqpMessage.setContentEncoding(brokeredProperties.getContentEncoding()); if (brokeredProperties.getGroupSequence() != null) { amqpMessage.setGroupSequence(brokeredProperties.getGroupSequence()); } amqpMessage.getProperties().setTo(brokeredMessage.getTo()); amqpMessage.getProperties().setUserId(new Binary(brokeredProperties.getUserId())); if (brokeredProperties.getAbsoluteExpiryTime() != null) { amqpMessage.getProperties().setAbsoluteExpiryTime(Date.from(brokeredProperties.getAbsoluteExpiryTime() .toInstant())); } if (brokeredProperties.getCreationTime() != null) { amqpMessage.getProperties().setCreationTime(Date.from(brokeredProperties.getCreationTime().toInstant())); } amqpMessage.setFooter(new Footer(brokeredMessage.getRawAmqpMessage().getFooter())); AmqpMessageHeader header = brokeredMessage.getRawAmqpMessage().getHeader(); if (header.getDeliveryCount() != null) { amqpMessage.setDeliveryCount(header.getDeliveryCount()); } if (header.getPriority() != null) { amqpMessage.setPriority(header.getPriority()); } if (header.isDurable() != null) { amqpMessage.setDurable(header.isDurable()); } if (header.isFirstAcquirer() != null) { amqpMessage.setFirstAcquirer(header.isFirstAcquirer()); } if (header.getTimeToLive() != null) { amqpMessage.setTtl(header.getTimeToLive().toMillis()); } final Map<Symbol, Object> messageAnnotationsMap = new HashMap<>(); if (brokeredMessage.getScheduledEnqueueTime() != null) { messageAnnotationsMap.put(Symbol.valueOf(SCHEDULED_ENQUEUE_UTC_TIME_NAME.getValue()), Date.from(brokeredMessage.getScheduledEnqueueTime().toInstant())); } final String partitionKey = brokeredMessage.getPartitionKey(); if (partitionKey != null && !partitionKey.isEmpty()) { messageAnnotationsMap.put(Symbol.valueOf(PARTITION_KEY_ANNOTATION_NAME.getValue()), brokeredMessage.getPartitionKey()); } amqpMessage.setMessageAnnotations(new MessageAnnotations(messageAnnotationsMap)); final Map<Symbol, Object> deliveryAnnotationsMap = new HashMap<>(); final Map<String, Object> deliveryAnnotations = brokeredMessage.getRawAmqpMessage() .getDeliveryAnnotations(); for (Map.Entry<String, Object> deliveryEntry : deliveryAnnotations.entrySet()) { deliveryAnnotationsMap.put(Symbol.valueOf(deliveryEntry.getKey()), deliveryEntry.getValue()); } amqpMessage.setDeliveryAnnotations(new DeliveryAnnotations(deliveryAnnotationsMap)); return amqpMessage; } private Map<String, Object> convertToDescribedType(Map<String, Object> propertiesValue) { for (Map.Entry<String, Object> entry : propertiesValue.entrySet()) { Object value = entry.getValue(); if (value instanceof URI) { propertiesValue.put(entry.getKey(), new ServiceBusDescribedType(ServiceBusConstants.URI_SYMBOL, ((URI) value).toString())); } else if (value instanceof OffsetDateTime) { propertiesValue.put(entry.getKey(), new ServiceBusDescribedType(ServiceBusConstants.OFFSETDATETIME_SYMBOL, ((OffsetDateTime) value).atZoneSameInstant(ZoneOffset.UTC).toInstant().toEpochMilli())); } else if (value instanceof Duration) { propertiesValue.put(entry.getKey(), new ServiceBusDescribedType(ServiceBusConstants.DURATION_SYMBOL, ((Duration) value).toNanos())); } } return propertiesValue; } private Map<String, Object> convertToOriginType(Map<String, Object> propertiesValue) { for (Map.Entry<String, Object> entry : propertiesValue.entrySet()) { Object value = entry.getValue(); if (value instanceof DescribedType) { DescribedType describedType = (DescribedType) value; if (ServiceBusConstants.URI_SYMBOL.equals(describedType.getDescriptor())) { propertiesValue.put(entry.getKey(), URI.create((String) describedType.getDescribed())); } else if (ServiceBusConstants.OFFSETDATETIME_SYMBOL.equals(describedType.getDescriptor())) { propertiesValue.put(entry.getKey(), OffsetDateTime.ofInstant(Instant.ofEpochMilli((long) describedType.getDescribed()), ZoneId.systemDefault())); } else if (ServiceBusConstants.DURATION_SYMBOL.equals(describedType.getDescriptor())) { propertiesValue.put(entry.getKey(), Duration.ofNanos((long) describedType.getDescribed())); } } } return propertiesValue; } @SuppressWarnings("unchecked") @Override public <T> T deserialize(Message message, Class<T> clazz) { Objects.requireNonNull(message, "'message' cannot be null."); Objects.requireNonNull(clazz, "'clazz' cannot be null."); if (clazz == ServiceBusReceivedMessage.class) { return (T) deserializeMessage(message); } else { throw logger.logExceptionAsError(new IllegalArgumentException( String.format(Messages.CLASS_NOT_A_SUPPORTED_TYPE, clazz))); } } @SuppressWarnings("unchecked") @Override public <T> List<T> deserializeList(Message message, Class<T> clazz) { if (clazz == ServiceBusReceivedMessage.class) { return (List<T>) deserializeListOfMessages(message); } else if (clazz == OffsetDateTime.class) { return (List<T>) deserializeListOfOffsetDateTime(message); } else if (clazz == OffsetDateTime.class) { return (List<T>) deserializeListOfOffsetDateTime(message); } else if (clazz == Long.class) { return (List<T>) deserializeListOfLong(message); } else { throw logger.logExceptionAsError(new IllegalArgumentException( String.format(Messages.CLASS_NOT_A_SUPPORTED_TYPE, clazz))); } } private List<Long> deserializeListOfLong(Message amqpMessage) { if (amqpMessage.getBody() instanceof AmqpValue) { AmqpValue amqpValue = ((AmqpValue) amqpMessage.getBody()); if (amqpValue.getValue() instanceof Map) { @SuppressWarnings("unchecked") Map<String, Object> responseBody = (Map<String, Object>) amqpValue.getValue(); Object expirationListObj = responseBody.get(ManagementConstants.SEQUENCE_NUMBERS); if (expirationListObj instanceof long[]) { return Arrays.stream((long[]) expirationListObj) .boxed() .collect(Collectors.toList()); } } } return Collections.emptyList(); } private List<OffsetDateTime> deserializeListOfOffsetDateTime(Message amqpMessage) { if (amqpMessage.getBody() instanceof AmqpValue) { AmqpValue amqpValue = ((AmqpValue) amqpMessage.getBody()); if (amqpValue.getValue() instanceof Map) { @SuppressWarnings("unchecked") Map<String, Object> responseBody = (Map<String, Object>) amqpValue.getValue(); Object expirationListObj = responseBody.get(ManagementConstants.EXPIRATIONS); if (expirationListObj instanceof Date[]) { return Arrays.stream((Date[]) expirationListObj) .map(date -> date.toInstant().atOffset(ZoneOffset.UTC)) .collect(Collectors.toList()); } } } return Collections.emptyList(); } @SuppressWarnings("rawtypes") private List<ServiceBusReceivedMessage> deserializeListOfMessages(Message amqpMessage) { final List<ServiceBusReceivedMessage> messageList = new ArrayList<>(); final AmqpResponseCode statusCode = RequestResponseUtils.getStatusCode(amqpMessage); if (statusCode != AmqpResponseCode.OK) { logger.warning("AMQP response did not contain OK status code. Actual: {}", statusCode); return Collections.emptyList(); } final Object responseBodyMap = ((AmqpValue) amqpMessage.getBody()).getValue(); if (responseBodyMap == null) { logger.warning("AMQP response did not contain a body."); return Collections.emptyList(); } else if (!(responseBodyMap instanceof Map)) { logger.warning("AMQP response body is not correct instance. Expected: {}. Actual: {}", Map.class, responseBodyMap.getClass()); return Collections.emptyList(); } final Object messages = ((Map) responseBodyMap).get(ManagementConstants.MESSAGES); if (messages == null) { logger.warning("Response body did not contain key: {}", ManagementConstants.MESSAGES); return Collections.emptyList(); } else if (!(messages instanceof Iterable)) { logger.warning("Response body contents is not the correct type. Expected: {}. Actual: {}", Iterable.class, messages.getClass()); return Collections.emptyList(); } for (Object message : (Iterable) messages) { if (!(message instanceof Map)) { logger.warning("Message inside iterable of message is not correct type. Expected: {}. Actual: {}", Map.class, message.getClass()); continue; } final Message responseMessage = Message.Factory.create(); final Binary messagePayLoad = (Binary) ((Map) message).get(ManagementConstants.MESSAGE); responseMessage.decode(messagePayLoad.getArray(), messagePayLoad.getArrayOffset(), messagePayLoad.getLength()); final ServiceBusReceivedMessage receivedMessage = deserializeMessage(responseMessage); if (((Map) message).containsKey(ManagementConstants.LOCK_TOKEN_KEY)) { receivedMessage.setLockToken((UUID) ((Map) message).get(ManagementConstants.LOCK_TOKEN_KEY)); } messageList.add(receivedMessage); } return messageList; } private ServiceBusReceivedMessage deserializeMessage(Message amqpMessage) { final Section body = amqpMessage.getBody(); AmqpMessageBody amqpMessageBody; if (body != null) { if (body instanceof Data) { final Binary messageData = ((Data) body).getValue(); amqpMessageBody = AmqpMessageBody.fromData(messageData.getArray()); } else if (body instanceof AmqpValue) { amqpMessageBody = AmqpMessageBody.fromValue(((AmqpValue) body).getValue()); } else if (body instanceof AmqpSequence) { @SuppressWarnings("unchecked") List<Object> messageData = ((AmqpSequence) body).getValue(); amqpMessageBody = AmqpMessageBody.fromSequence(messageData); } else { logger.warning(String.format(Messages.MESSAGE_NOT_OF_TYPE, body.getType())); amqpMessageBody = AmqpMessageBody.fromData(EMPTY_BYTE_ARRAY); } } else { logger.warning(String.format(Messages.MESSAGE_NOT_OF_TYPE, "null")); amqpMessageBody = AmqpMessageBody.fromData(EMPTY_BYTE_ARRAY); } final ServiceBusReceivedMessage brokeredMessage = new ServiceBusReceivedMessage(amqpMessageBody); AmqpAnnotatedMessage brokeredAmqpAnnotatedMessage = brokeredMessage.getRawAmqpMessage(); ApplicationProperties applicationProperties = amqpMessage.getApplicationProperties(); if (applicationProperties != null) { final Map<String, Object> propertiesValue = applicationProperties.getValue(); brokeredAmqpAnnotatedMessage.getApplicationProperties().putAll(convertToOriginType(propertiesValue)); } final AmqpMessageHeader brokeredHeader = brokeredAmqpAnnotatedMessage.getHeader(); brokeredHeader.setTimeToLive(Duration.ofMillis(amqpMessage.getTtl())); brokeredHeader.setDeliveryCount(amqpMessage.getDeliveryCount()); brokeredHeader.setDurable(amqpMessage.getHeader().getDurable()); brokeredHeader.setFirstAcquirer(amqpMessage.getHeader().getFirstAcquirer()); brokeredHeader.setPriority(amqpMessage.getPriority()); final Footer footer = amqpMessage.getFooter(); if (footer != null && footer.getValue() != null) { @SuppressWarnings("unchecked") final Map<Symbol, Object> footerValue = footer.getValue(); setValues(footerValue, brokeredAmqpAnnotatedMessage.getFooter()); } final AmqpMessageProperties brokeredProperties = brokeredAmqpAnnotatedMessage.getProperties(); brokeredProperties.setReplyToGroupId(amqpMessage.getReplyToGroupId()); final String replyTo = amqpMessage.getReplyTo(); if (replyTo != null) { brokeredProperties.setReplyTo(new AmqpAddress(amqpMessage.getReplyTo())); } final Object messageId = amqpMessage.getMessageId(); if (messageId != null) { brokeredProperties.setMessageId(new AmqpMessageId(messageId.toString())); } brokeredProperties.setContentType(amqpMessage.getContentType()); final Object correlationId = amqpMessage.getCorrelationId(); if (correlationId != null) { brokeredProperties.setCorrelationId(new AmqpMessageId(correlationId.toString())); } final Properties amqpProperties = amqpMessage.getProperties(); if (amqpProperties != null) { final String to = amqpProperties.getTo(); if (to != null) { brokeredProperties.setTo(new AmqpAddress(amqpProperties.getTo())); } if (amqpProperties.getAbsoluteExpiryTime() != null) { brokeredProperties.setAbsoluteExpiryTime(amqpProperties.getAbsoluteExpiryTime().toInstant() .atOffset(ZoneOffset.UTC)); } if (amqpProperties.getCreationTime() != null) { brokeredProperties.setCreationTime(amqpProperties.getCreationTime().toInstant() .atOffset(ZoneOffset.UTC)); } } brokeredProperties.setSubject(amqpMessage.getSubject()); brokeredProperties.setGroupId(amqpMessage.getGroupId()); brokeredProperties.setContentEncoding(amqpMessage.getContentEncoding()); brokeredProperties.setGroupSequence(amqpMessage.getGroupSequence()); brokeredProperties.setUserId(amqpMessage.getUserId()); final DeliveryAnnotations deliveryAnnotations = amqpMessage.getDeliveryAnnotations(); if (deliveryAnnotations != null) { setValues(deliveryAnnotations.getValue(), brokeredAmqpAnnotatedMessage.getDeliveryAnnotations()); } final MessageAnnotations messageAnnotations = amqpMessage.getMessageAnnotations(); if (messageAnnotations != null) { setValues(messageAnnotations.getValue(), brokeredAmqpAnnotatedMessage.getMessageAnnotations()); } if (amqpMessage instanceof MessageWithLockToken) { brokeredMessage.setLockToken(((MessageWithLockToken) amqpMessage).getLockToken()); } return brokeredMessage; } private static int getPayloadSize(Message msg) { if (msg == null || msg.getBody() == null) { return 0; } final Section bodySection = msg.getBody(); if (bodySection instanceof AmqpValue) { return sizeof(((AmqpValue) bodySection).getValue()); } else if (bodySection instanceof AmqpSequence) { return sizeof(((AmqpSequence) bodySection).getValue()); } else if (bodySection instanceof Data) { final Data payloadSection = (Data) bodySection; final Binary payloadBytes = payloadSection.getValue(); return sizeof(payloadBytes); } else { return 0; } } private void setValues(Map<Symbol, Object> sourceMap, Map<String, Object> targetMap) { if (sourceMap != null) { for (Map.Entry<Symbol, Object> entry : sourceMap.entrySet()) { targetMap.put(entry.getKey().toString(), entry.getValue()); } } } @SuppressWarnings("rawtypes") }
class ServiceBusMessageSerializer implements MessageSerializer { private static final byte[] EMPTY_BYTE_ARRAY = new byte[0]; private final ClientLogger logger = new ClientLogger(ServiceBusMessageSerializer.class); /** * Gets the serialized size of the AMQP message. */ @Override public int getSize(Message amqpMessage) { if (amqpMessage == null) { return 0; } int payloadSize = getPayloadSize(amqpMessage); final MessageAnnotations messageAnnotations = amqpMessage.getMessageAnnotations(); final ApplicationProperties applicationProperties = amqpMessage.getApplicationProperties(); int annotationsSize = 0; int applicationPropertiesSize = 0; if (messageAnnotations != null) { final Map<Symbol, Object> map = messageAnnotations.getValue(); for (Map.Entry<Symbol, Object> entry : map.entrySet()) { final int size = sizeof(entry.getKey()) + sizeof(entry.getValue()); annotationsSize += size; } } if (applicationProperties != null) { final Map<String, Object> map = applicationProperties.getValue(); for (Map.Entry<String, Object> entry : map.entrySet()) { final int size = sizeof(entry.getKey()) + sizeof(entry.getValue()); applicationPropertiesSize += size; } } return annotationsSize + applicationPropertiesSize + payloadSize; } /** * Creates the AMQP message represented by this {@code object}. Currently, only supports serializing {@link * ServiceBusMessage}. * * @param object Concrete object to deserialize. * * @return A new AMQP message for this {@code object}. * * @throws IllegalArgumentException if {@code object} is not an instance of {@link ServiceBusMessage}. */ @Override public <T> Message serialize(T object) { Objects.requireNonNull(object, "'object' to serialize cannot be null."); if (!(object instanceof ServiceBusMessage)) { throw logger.logExceptionAsError(new IllegalArgumentException( "Cannot serialize object that is not ServiceBusMessage. Clazz: " + object.getClass())); } final ServiceBusMessage brokeredMessage = (ServiceBusMessage) object; AmqpMessageBodyType brokeredBodyType = brokeredMessage.getRawAmqpMessage().getBody().getBodyType(); final Message amqpMessage = Proton.message(); byte[] body; if (brokeredBodyType == AmqpMessageBodyType.DATA || brokeredBodyType == null) { body = brokeredMessage.getBody().toBytes(); amqpMessage.setBody(new Data(new Binary(body))); } else if (brokeredBodyType == AmqpMessageBodyType.SEQUENCE) { List<Object> sequenceList = brokeredMessage.getRawAmqpMessage().getBody().getSequence(); amqpMessage.setBody(new AmqpSequence(sequenceList)); } else if (brokeredBodyType == AmqpMessageBodyType.VALUE) { amqpMessage.setBody(new AmqpValue(brokeredMessage.getRawAmqpMessage().getBody().getValue())); } if (brokeredMessage.getApplicationProperties() != null) { Map<String, Object> describedTypeMap = convertToDescribedType(brokeredMessage.getApplicationProperties()); amqpMessage.setApplicationProperties(new ApplicationProperties(describedTypeMap)); } if (brokeredMessage.getTimeToLive() != null) { amqpMessage.setTtl(brokeredMessage.getTimeToLive().toMillis()); } if (amqpMessage.getProperties() == null) { amqpMessage.setProperties(new Properties()); } amqpMessage.setMessageId(brokeredMessage.getMessageId()); amqpMessage.setContentType(brokeredMessage.getContentType()); amqpMessage.setCorrelationId(brokeredMessage.getCorrelationId()); amqpMessage.setSubject(brokeredMessage.getSubject()); amqpMessage.setReplyTo(brokeredMessage.getReplyTo()); amqpMessage.setReplyToGroupId(brokeredMessage.getReplyToSessionId()); amqpMessage.setGroupId(brokeredMessage.getSessionId()); final AmqpMessageProperties brokeredProperties = brokeredMessage.getRawAmqpMessage().getProperties(); amqpMessage.setContentEncoding(brokeredProperties.getContentEncoding()); if (brokeredProperties.getGroupSequence() != null) { amqpMessage.setGroupSequence(brokeredProperties.getGroupSequence()); } amqpMessage.getProperties().setTo(brokeredMessage.getTo()); amqpMessage.getProperties().setUserId(new Binary(brokeredProperties.getUserId())); if (brokeredProperties.getAbsoluteExpiryTime() != null) { amqpMessage.getProperties().setAbsoluteExpiryTime(Date.from(brokeredProperties.getAbsoluteExpiryTime() .toInstant())); } if (brokeredProperties.getCreationTime() != null) { amqpMessage.getProperties().setCreationTime(Date.from(brokeredProperties.getCreationTime().toInstant())); } amqpMessage.setFooter(new Footer(brokeredMessage.getRawAmqpMessage().getFooter())); AmqpMessageHeader header = brokeredMessage.getRawAmqpMessage().getHeader(); if (header.getDeliveryCount() != null) { amqpMessage.setDeliveryCount(header.getDeliveryCount()); } if (header.getPriority() != null) { amqpMessage.setPriority(header.getPriority()); } if (header.isDurable() != null) { amqpMessage.setDurable(header.isDurable()); } if (header.isFirstAcquirer() != null) { amqpMessage.setFirstAcquirer(header.isFirstAcquirer()); } if (header.getTimeToLive() != null) { amqpMessage.setTtl(header.getTimeToLive().toMillis()); } final Map<Symbol, Object> messageAnnotationsMap = new HashMap<>(); if (brokeredMessage.getScheduledEnqueueTime() != null) { messageAnnotationsMap.put(Symbol.valueOf(SCHEDULED_ENQUEUE_UTC_TIME_NAME.getValue()), Date.from(brokeredMessage.getScheduledEnqueueTime().toInstant())); } final String partitionKey = brokeredMessage.getPartitionKey(); if (partitionKey != null && !partitionKey.isEmpty()) { messageAnnotationsMap.put(Symbol.valueOf(PARTITION_KEY_ANNOTATION_NAME.getValue()), brokeredMessage.getPartitionKey()); } amqpMessage.setMessageAnnotations(new MessageAnnotations(messageAnnotationsMap)); final Map<Symbol, Object> deliveryAnnotationsMap = new HashMap<>(); final Map<String, Object> deliveryAnnotations = brokeredMessage.getRawAmqpMessage() .getDeliveryAnnotations(); for (Map.Entry<String, Object> deliveryEntry : deliveryAnnotations.entrySet()) { deliveryAnnotationsMap.put(Symbol.valueOf(deliveryEntry.getKey()), deliveryEntry.getValue()); } amqpMessage.setDeliveryAnnotations(new DeliveryAnnotations(deliveryAnnotationsMap)); return amqpMessage; } /** * Convert specific type to described type for sending on the wire. * @param propertiesValue application properties set by user which may contain specific type. * @return Map only contains primitive type and described type. */ private static Map<String, Object> convertToDescribedType(Map<String, Object> propertiesValue) { for (Map.Entry<String, Object> entry : propertiesValue.entrySet()) { Object value = entry.getValue(); if (value instanceof URI) { entry.setValue(new UriDescribedType((URI) value)); } else if (value instanceof OffsetDateTime) { entry.setValue(new OffsetDateTimeDescribedType((OffsetDateTime) value)); } else if (value instanceof Duration) { entry.setValue(new DurationDescribedType((Duration) value)); } } return propertiesValue; } /** * Convert described type to origin type. * @param propertiesValue application properties from amqp message may contain described type. * @return Map without described type. */ private static Map<String, Object> convertToOriginType(Map<String, Object> propertiesValue) { for (Map.Entry<String, Object> entry : propertiesValue.entrySet()) { Object value = entry.getValue(); if (value instanceof DescribedType) { entry.setValue(MessageUtils.describedToOrigin((DescribedType) value)); } } return propertiesValue; } @SuppressWarnings("unchecked") @Override public <T> T deserialize(Message message, Class<T> clazz) { Objects.requireNonNull(message, "'message' cannot be null."); Objects.requireNonNull(clazz, "'clazz' cannot be null."); if (clazz == ServiceBusReceivedMessage.class) { return (T) deserializeMessage(message); } else { throw logger.logExceptionAsError(new IllegalArgumentException( String.format(Messages.CLASS_NOT_A_SUPPORTED_TYPE, clazz))); } } @SuppressWarnings("unchecked") @Override public <T> List<T> deserializeList(Message message, Class<T> clazz) { if (clazz == ServiceBusReceivedMessage.class) { return (List<T>) deserializeListOfMessages(message); } else if (clazz == OffsetDateTime.class) { return (List<T>) deserializeListOfOffsetDateTime(message); } else if (clazz == OffsetDateTime.class) { return (List<T>) deserializeListOfOffsetDateTime(message); } else if (clazz == Long.class) { return (List<T>) deserializeListOfLong(message); } else { throw logger.logExceptionAsError(new IllegalArgumentException( String.format(Messages.CLASS_NOT_A_SUPPORTED_TYPE, clazz))); } } private List<Long> deserializeListOfLong(Message amqpMessage) { if (amqpMessage.getBody() instanceof AmqpValue) { AmqpValue amqpValue = ((AmqpValue) amqpMessage.getBody()); if (amqpValue.getValue() instanceof Map) { @SuppressWarnings("unchecked") Map<String, Object> responseBody = (Map<String, Object>) amqpValue.getValue(); Object expirationListObj = responseBody.get(ManagementConstants.SEQUENCE_NUMBERS); if (expirationListObj instanceof long[]) { return Arrays.stream((long[]) expirationListObj) .boxed() .collect(Collectors.toList()); } } } return Collections.emptyList(); } private List<OffsetDateTime> deserializeListOfOffsetDateTime(Message amqpMessage) { if (amqpMessage.getBody() instanceof AmqpValue) { AmqpValue amqpValue = ((AmqpValue) amqpMessage.getBody()); if (amqpValue.getValue() instanceof Map) { @SuppressWarnings("unchecked") Map<String, Object> responseBody = (Map<String, Object>) amqpValue.getValue(); Object expirationListObj = responseBody.get(ManagementConstants.EXPIRATIONS); if (expirationListObj instanceof Date[]) { return Arrays.stream((Date[]) expirationListObj) .map(date -> date.toInstant().atOffset(ZoneOffset.UTC)) .collect(Collectors.toList()); } } } return Collections.emptyList(); } @SuppressWarnings("rawtypes") private List<ServiceBusReceivedMessage> deserializeListOfMessages(Message amqpMessage) { final List<ServiceBusReceivedMessage> messageList = new ArrayList<>(); final AmqpResponseCode statusCode = RequestResponseUtils.getStatusCode(amqpMessage); if (statusCode != AmqpResponseCode.OK) { logger.warning("AMQP response did not contain OK status code. Actual: {}", statusCode); return Collections.emptyList(); } final Object responseBodyMap = ((AmqpValue) amqpMessage.getBody()).getValue(); if (responseBodyMap == null) { logger.warning("AMQP response did not contain a body."); return Collections.emptyList(); } else if (!(responseBodyMap instanceof Map)) { logger.warning("AMQP response body is not correct instance. Expected: {}. Actual: {}", Map.class, responseBodyMap.getClass()); return Collections.emptyList(); } final Object messages = ((Map) responseBodyMap).get(ManagementConstants.MESSAGES); if (messages == null) { logger.warning("Response body did not contain key: {}", ManagementConstants.MESSAGES); return Collections.emptyList(); } else if (!(messages instanceof Iterable)) { logger.warning("Response body contents is not the correct type. Expected: {}. Actual: {}", Iterable.class, messages.getClass()); return Collections.emptyList(); } for (Object message : (Iterable) messages) { if (!(message instanceof Map)) { logger.warning("Message inside iterable of message is not correct type. Expected: {}. Actual: {}", Map.class, message.getClass()); continue; } final Message responseMessage = Message.Factory.create(); final Binary messagePayLoad = (Binary) ((Map) message).get(ManagementConstants.MESSAGE); responseMessage.decode(messagePayLoad.getArray(), messagePayLoad.getArrayOffset(), messagePayLoad.getLength()); final ServiceBusReceivedMessage receivedMessage = deserializeMessage(responseMessage); if (((Map) message).containsKey(ManagementConstants.LOCK_TOKEN_KEY)) { receivedMessage.setLockToken((UUID) ((Map) message).get(ManagementConstants.LOCK_TOKEN_KEY)); } messageList.add(receivedMessage); } return messageList; } private ServiceBusReceivedMessage deserializeMessage(Message amqpMessage) { final Section body = amqpMessage.getBody(); AmqpMessageBody amqpMessageBody; if (body != null) { if (body instanceof Data) { final Binary messageData = ((Data) body).getValue(); amqpMessageBody = AmqpMessageBody.fromData(messageData.getArray()); } else if (body instanceof AmqpValue) { amqpMessageBody = AmqpMessageBody.fromValue(((AmqpValue) body).getValue()); } else if (body instanceof AmqpSequence) { @SuppressWarnings("unchecked") List<Object> messageData = ((AmqpSequence) body).getValue(); amqpMessageBody = AmqpMessageBody.fromSequence(messageData); } else { logger.warning(String.format(Messages.MESSAGE_NOT_OF_TYPE, body.getType())); amqpMessageBody = AmqpMessageBody.fromData(EMPTY_BYTE_ARRAY); } } else { logger.warning(String.format(Messages.MESSAGE_NOT_OF_TYPE, "null")); amqpMessageBody = AmqpMessageBody.fromData(EMPTY_BYTE_ARRAY); } final ServiceBusReceivedMessage brokeredMessage = new ServiceBusReceivedMessage(amqpMessageBody); AmqpAnnotatedMessage brokeredAmqpAnnotatedMessage = brokeredMessage.getRawAmqpMessage(); ApplicationProperties applicationProperties = amqpMessage.getApplicationProperties(); if (applicationProperties != null) { final Map<String, Object> propertiesValue = convertToOriginType(applicationProperties.getValue()); brokeredAmqpAnnotatedMessage.getApplicationProperties().putAll(propertiesValue); } final AmqpMessageHeader brokeredHeader = brokeredAmqpAnnotatedMessage.getHeader(); brokeredHeader.setTimeToLive(Duration.ofMillis(amqpMessage.getTtl())); brokeredHeader.setDeliveryCount(amqpMessage.getDeliveryCount()); brokeredHeader.setDurable(amqpMessage.getHeader().getDurable()); brokeredHeader.setFirstAcquirer(amqpMessage.getHeader().getFirstAcquirer()); brokeredHeader.setPriority(amqpMessage.getPriority()); final Footer footer = amqpMessage.getFooter(); if (footer != null && footer.getValue() != null) { @SuppressWarnings("unchecked") final Map<Symbol, Object> footerValue = footer.getValue(); setValues(footerValue, brokeredAmqpAnnotatedMessage.getFooter()); } final AmqpMessageProperties brokeredProperties = brokeredAmqpAnnotatedMessage.getProperties(); brokeredProperties.setReplyToGroupId(amqpMessage.getReplyToGroupId()); final String replyTo = amqpMessage.getReplyTo(); if (replyTo != null) { brokeredProperties.setReplyTo(new AmqpAddress(amqpMessage.getReplyTo())); } final Object messageId = amqpMessage.getMessageId(); if (messageId != null) { brokeredProperties.setMessageId(new AmqpMessageId(messageId.toString())); } brokeredProperties.setContentType(amqpMessage.getContentType()); final Object correlationId = amqpMessage.getCorrelationId(); if (correlationId != null) { brokeredProperties.setCorrelationId(new AmqpMessageId(correlationId.toString())); } final Properties amqpProperties = amqpMessage.getProperties(); if (amqpProperties != null) { final String to = amqpProperties.getTo(); if (to != null) { brokeredProperties.setTo(new AmqpAddress(amqpProperties.getTo())); } if (amqpProperties.getAbsoluteExpiryTime() != null) { brokeredProperties.setAbsoluteExpiryTime(amqpProperties.getAbsoluteExpiryTime().toInstant() .atOffset(ZoneOffset.UTC)); } if (amqpProperties.getCreationTime() != null) { brokeredProperties.setCreationTime(amqpProperties.getCreationTime().toInstant() .atOffset(ZoneOffset.UTC)); } } brokeredProperties.setSubject(amqpMessage.getSubject()); brokeredProperties.setGroupId(amqpMessage.getGroupId()); brokeredProperties.setContentEncoding(amqpMessage.getContentEncoding()); brokeredProperties.setGroupSequence(amqpMessage.getGroupSequence()); brokeredProperties.setUserId(amqpMessage.getUserId()); final DeliveryAnnotations deliveryAnnotations = amqpMessage.getDeliveryAnnotations(); if (deliveryAnnotations != null) { setValues(deliveryAnnotations.getValue(), brokeredAmqpAnnotatedMessage.getDeliveryAnnotations()); } final MessageAnnotations messageAnnotations = amqpMessage.getMessageAnnotations(); if (messageAnnotations != null) { setValues(messageAnnotations.getValue(), brokeredAmqpAnnotatedMessage.getMessageAnnotations()); } if (amqpMessage instanceof MessageWithLockToken) { brokeredMessage.setLockToken(((MessageWithLockToken) amqpMessage).getLockToken()); } return brokeredMessage; } private static int getPayloadSize(Message msg) { if (msg == null || msg.getBody() == null) { return 0; } final Section bodySection = msg.getBody(); if (bodySection instanceof AmqpValue) { return sizeof(((AmqpValue) bodySection).getValue()); } else if (bodySection instanceof AmqpSequence) { return sizeof(((AmqpSequence) bodySection).getValue()); } else if (bodySection instanceof Data) { final Data payloadSection = (Data) bodySection; final Binary payloadBytes = payloadSection.getValue(); return sizeof(payloadBytes); } else { return 0; } } private void setValues(Map<Symbol, Object> sourceMap, Map<String, Object> targetMap) { if (sourceMap != null) { for (Map.Entry<Symbol, Object> entry : sourceMap.entrySet()) { targetMap.put(entry.getKey().toString(), entry.getValue()); } } } @SuppressWarnings("rawtypes") }
`<< 1` or `* 2`? Any meaning of the `<<`?
private static int sizeof(Object obj) { if (obj == null) { return 0; } if (obj instanceof String) { return obj.toString().length() << 1; } if (obj instanceof Symbol) { return ((Symbol) obj).length() << 1; } if (obj instanceof Byte || obj instanceof UnsignedByte) { return Byte.BYTES; } if (obj instanceof Integer || obj instanceof UnsignedInteger) { return Integer.BYTES; } if (obj instanceof Long || obj instanceof UnsignedLong || obj instanceof Date) { return Long.BYTES; } if (obj instanceof Short || obj instanceof UnsignedShort) { return Short.BYTES; } if (obj instanceof Boolean) { return 1; } if (obj instanceof Character) { return 4; } if (obj instanceof Float) { return Float.BYTES; } if (obj instanceof Double) { return Double.BYTES; } if (obj instanceof UUID) { return 16; } if (obj instanceof Decimal32) { return 4; } if (obj instanceof Decimal64) { return 8; } if (obj instanceof Decimal128) { return 16; } if (obj instanceof Binary) { return ((Binary) obj).getLength(); } if (obj instanceof Declare) { return 7; } if (obj instanceof Discharge) { Discharge discharge = (Discharge) obj; return 12 + discharge.getTxnId().getLength(); } if (obj instanceof ServiceBusDescribedType) { ServiceBusDescribedType describedType = (ServiceBusDescribedType) obj; int size = ((Symbol) describedType.getDescriptor()).length() << 1; if (describedType.getDescribed() instanceof String) { size += ((String) describedType.getDescribed()).length(); } else if (describedType.getDescribed() instanceof Long) { size += Long.BYTES; } return size; } if (obj instanceof Map) { int size = 8; Map map = (Map) obj; for (Object value : map.keySet()) { size += sizeof(value); } for (Object value : map.values()) { size += sizeof(value); } return size; } if (obj instanceof Iterable) { int size = 8; for (Object innerObject : (Iterable) obj) { size += sizeof(innerObject); } return size; } if (obj.getClass().isArray()) { int size = 8; int length = Array.getLength(obj); for (int i = 0; i < length; i++) { size += sizeof(Array.get(obj, i)); } return size; } throw new IllegalArgumentException(String.format(Locale.US, "Encoding Type: %s is not supported", obj.getClass())); }
for (Object value : map.values()) {
private static int sizeof(Object obj) { if (obj == null) { return 0; } if (obj instanceof String) { return obj.toString().length() << 1; } if (obj instanceof Symbol) { return ((Symbol) obj).length() << 1; } if (obj instanceof Byte || obj instanceof UnsignedByte) { return Byte.BYTES; } if (obj instanceof Integer || obj instanceof UnsignedInteger) { return Integer.BYTES; } if (obj instanceof Long || obj instanceof UnsignedLong || obj instanceof Date) { return Long.BYTES; } if (obj instanceof Short || obj instanceof UnsignedShort) { return Short.BYTES; } if (obj instanceof Boolean) { return 1; } if (obj instanceof Character) { return 4; } if (obj instanceof Float) { return Float.BYTES; } if (obj instanceof Double) { return Double.BYTES; } if (obj instanceof UUID) { return 16; } if (obj instanceof Decimal32) { return 4; } if (obj instanceof Decimal64) { return 8; } if (obj instanceof Decimal128) { return 16; } if (obj instanceof Binary) { return ((Binary) obj).getLength(); } if (obj instanceof Declare) { return 7; } if (obj instanceof Discharge) { Discharge discharge = (Discharge) obj; return 12 + discharge.getTxnId().getLength(); } if (obj instanceof ServiceBusDescribedType) { ServiceBusDescribedType describedType = (ServiceBusDescribedType) obj; return describedType.size(); } if (obj instanceof Map) { int size = 8; Map map = (Map) obj; for (Object value : map.keySet()) { size += sizeof(value); } for (Object value : map.values()) { size += sizeof(value); } return size; } if (obj instanceof Iterable) { int size = 8; for (Object innerObject : (Iterable) obj) { size += sizeof(innerObject); } return size; } if (obj.getClass().isArray()) { int size = 8; int length = Array.getLength(obj); for (int i = 0; i < length; i++) { size += sizeof(Array.get(obj, i)); } return size; } throw new IllegalArgumentException(String.format(Locale.US, "Encoding Type: %s is not supported", obj.getClass())); }
class ServiceBusMessageSerializer implements MessageSerializer { private static final byte[] EMPTY_BYTE_ARRAY = new byte[0]; private final ClientLogger logger = new ClientLogger(ServiceBusMessageSerializer.class); /** * Gets the serialized size of the AMQP message. */ @Override public int getSize(Message amqpMessage) { if (amqpMessage == null) { return 0; } int payloadSize = getPayloadSize(amqpMessage); final MessageAnnotations messageAnnotations = amqpMessage.getMessageAnnotations(); final ApplicationProperties applicationProperties = amqpMessage.getApplicationProperties(); int annotationsSize = 0; int applicationPropertiesSize = 0; if (messageAnnotations != null) { final Map<Symbol, Object> map = messageAnnotations.getValue(); for (Map.Entry<Symbol, Object> entry : map.entrySet()) { final int size = sizeof(entry.getKey()) + sizeof(entry.getValue()); annotationsSize += size; } } if (applicationProperties != null) { final Map<String, Object> map = applicationProperties.getValue(); for (Map.Entry<String, Object> entry : map.entrySet()) { final int size = sizeof(entry.getKey()) + sizeof(entry.getValue()); applicationPropertiesSize += size; } } return annotationsSize + applicationPropertiesSize + payloadSize; } /** * Creates the AMQP message represented by this {@code object}. Currently, only supports serializing {@link * ServiceBusMessage}. * * @param object Concrete object to deserialize. * * @return A new AMQP message for this {@code object}. * * @throws IllegalArgumentException if {@code object} is not an instance of {@link ServiceBusMessage}. */ @Override public <T> Message serialize(T object) { Objects.requireNonNull(object, "'object' to serialize cannot be null."); if (!(object instanceof ServiceBusMessage)) { throw logger.logExceptionAsError(new IllegalArgumentException( "Cannot serialize object that is not ServiceBusMessage. Clazz: " + object.getClass())); } final ServiceBusMessage brokeredMessage = (ServiceBusMessage) object; AmqpMessageBodyType brokeredBodyType = brokeredMessage.getRawAmqpMessage().getBody().getBodyType(); final Message amqpMessage = Proton.message(); byte[] body; if (brokeredBodyType == AmqpMessageBodyType.DATA || brokeredBodyType == null) { body = brokeredMessage.getBody().toBytes(); amqpMessage.setBody(new Data(new Binary(body))); } else if (brokeredBodyType == AmqpMessageBodyType.SEQUENCE) { List<Object> sequenceList = brokeredMessage.getRawAmqpMessage().getBody().getSequence(); amqpMessage.setBody(new AmqpSequence(sequenceList)); } else if (brokeredBodyType == AmqpMessageBodyType.VALUE) { amqpMessage.setBody(new AmqpValue(brokeredMessage.getRawAmqpMessage().getBody().getValue())); } if (brokeredMessage.getApplicationProperties() != null) { amqpMessage.setApplicationProperties(new ApplicationProperties(convertToDescribedType(brokeredMessage.getApplicationProperties()))); } if (brokeredMessage.getTimeToLive() != null) { amqpMessage.setTtl(brokeredMessage.getTimeToLive().toMillis()); } if (amqpMessage.getProperties() == null) { amqpMessage.setProperties(new Properties()); } amqpMessage.setMessageId(brokeredMessage.getMessageId()); amqpMessage.setContentType(brokeredMessage.getContentType()); amqpMessage.setCorrelationId(brokeredMessage.getCorrelationId()); amqpMessage.setSubject(brokeredMessage.getSubject()); amqpMessage.setReplyTo(brokeredMessage.getReplyTo()); amqpMessage.setReplyToGroupId(brokeredMessage.getReplyToSessionId()); amqpMessage.setGroupId(brokeredMessage.getSessionId()); final AmqpMessageProperties brokeredProperties = brokeredMessage.getRawAmqpMessage().getProperties(); amqpMessage.setContentEncoding(brokeredProperties.getContentEncoding()); if (brokeredProperties.getGroupSequence() != null) { amqpMessage.setGroupSequence(brokeredProperties.getGroupSequence()); } amqpMessage.getProperties().setTo(brokeredMessage.getTo()); amqpMessage.getProperties().setUserId(new Binary(brokeredProperties.getUserId())); if (brokeredProperties.getAbsoluteExpiryTime() != null) { amqpMessage.getProperties().setAbsoluteExpiryTime(Date.from(brokeredProperties.getAbsoluteExpiryTime() .toInstant())); } if (brokeredProperties.getCreationTime() != null) { amqpMessage.getProperties().setCreationTime(Date.from(brokeredProperties.getCreationTime().toInstant())); } amqpMessage.setFooter(new Footer(brokeredMessage.getRawAmqpMessage().getFooter())); AmqpMessageHeader header = brokeredMessage.getRawAmqpMessage().getHeader(); if (header.getDeliveryCount() != null) { amqpMessage.setDeliveryCount(header.getDeliveryCount()); } if (header.getPriority() != null) { amqpMessage.setPriority(header.getPriority()); } if (header.isDurable() != null) { amqpMessage.setDurable(header.isDurable()); } if (header.isFirstAcquirer() != null) { amqpMessage.setFirstAcquirer(header.isFirstAcquirer()); } if (header.getTimeToLive() != null) { amqpMessage.setTtl(header.getTimeToLive().toMillis()); } final Map<Symbol, Object> messageAnnotationsMap = new HashMap<>(); if (brokeredMessage.getScheduledEnqueueTime() != null) { messageAnnotationsMap.put(Symbol.valueOf(SCHEDULED_ENQUEUE_UTC_TIME_NAME.getValue()), Date.from(brokeredMessage.getScheduledEnqueueTime().toInstant())); } final String partitionKey = brokeredMessage.getPartitionKey(); if (partitionKey != null && !partitionKey.isEmpty()) { messageAnnotationsMap.put(Symbol.valueOf(PARTITION_KEY_ANNOTATION_NAME.getValue()), brokeredMessage.getPartitionKey()); } amqpMessage.setMessageAnnotations(new MessageAnnotations(messageAnnotationsMap)); final Map<Symbol, Object> deliveryAnnotationsMap = new HashMap<>(); final Map<String, Object> deliveryAnnotations = brokeredMessage.getRawAmqpMessage() .getDeliveryAnnotations(); for (Map.Entry<String, Object> deliveryEntry : deliveryAnnotations.entrySet()) { deliveryAnnotationsMap.put(Symbol.valueOf(deliveryEntry.getKey()), deliveryEntry.getValue()); } amqpMessage.setDeliveryAnnotations(new DeliveryAnnotations(deliveryAnnotationsMap)); return amqpMessage; } private Map<String, Object> convertToDescribedType(Map<String, Object> propertiesValue) { for (Map.Entry<String, Object> entry : propertiesValue.entrySet()) { Object value = entry.getValue(); if (value instanceof URI) { propertiesValue.put(entry.getKey(), new ServiceBusDescribedType(ServiceBusConstants.URI_SYMBOL, ((URI) value).toString())); } else if (value instanceof OffsetDateTime) { propertiesValue.put(entry.getKey(), new ServiceBusDescribedType(ServiceBusConstants.OFFSETDATETIME_SYMBOL, ((OffsetDateTime) value).atZoneSameInstant(ZoneOffset.UTC).toInstant().toEpochMilli())); } else if (value instanceof Duration) { propertiesValue.put(entry.getKey(), new ServiceBusDescribedType(ServiceBusConstants.DURATION_SYMBOL, ((Duration) value).toNanos())); } } return propertiesValue; } private Map<String, Object> convertToOriginType(Map<String, Object> propertiesValue) { for (Map.Entry<String, Object> entry : propertiesValue.entrySet()) { Object value = entry.getValue(); if (value instanceof DescribedType) { DescribedType describedType = (DescribedType) value; if (ServiceBusConstants.URI_SYMBOL.equals(describedType.getDescriptor())) { propertiesValue.put(entry.getKey(), URI.create((String) describedType.getDescribed())); } else if (ServiceBusConstants.OFFSETDATETIME_SYMBOL.equals(describedType.getDescriptor())) { propertiesValue.put(entry.getKey(), OffsetDateTime.ofInstant(Instant.ofEpochMilli((long) describedType.getDescribed()), ZoneId.systemDefault())); } else if (ServiceBusConstants.DURATION_SYMBOL.equals(describedType.getDescriptor())) { propertiesValue.put(entry.getKey(), Duration.ofNanos((long) describedType.getDescribed())); } } } return propertiesValue; } @SuppressWarnings("unchecked") @Override public <T> T deserialize(Message message, Class<T> clazz) { Objects.requireNonNull(message, "'message' cannot be null."); Objects.requireNonNull(clazz, "'clazz' cannot be null."); if (clazz == ServiceBusReceivedMessage.class) { return (T) deserializeMessage(message); } else { throw logger.logExceptionAsError(new IllegalArgumentException( String.format(Messages.CLASS_NOT_A_SUPPORTED_TYPE, clazz))); } } @SuppressWarnings("unchecked") @Override public <T> List<T> deserializeList(Message message, Class<T> clazz) { if (clazz == ServiceBusReceivedMessage.class) { return (List<T>) deserializeListOfMessages(message); } else if (clazz == OffsetDateTime.class) { return (List<T>) deserializeListOfOffsetDateTime(message); } else if (clazz == OffsetDateTime.class) { return (List<T>) deserializeListOfOffsetDateTime(message); } else if (clazz == Long.class) { return (List<T>) deserializeListOfLong(message); } else { throw logger.logExceptionAsError(new IllegalArgumentException( String.format(Messages.CLASS_NOT_A_SUPPORTED_TYPE, clazz))); } } private List<Long> deserializeListOfLong(Message amqpMessage) { if (amqpMessage.getBody() instanceof AmqpValue) { AmqpValue amqpValue = ((AmqpValue) amqpMessage.getBody()); if (amqpValue.getValue() instanceof Map) { @SuppressWarnings("unchecked") Map<String, Object> responseBody = (Map<String, Object>) amqpValue.getValue(); Object expirationListObj = responseBody.get(ManagementConstants.SEQUENCE_NUMBERS); if (expirationListObj instanceof long[]) { return Arrays.stream((long[]) expirationListObj) .boxed() .collect(Collectors.toList()); } } } return Collections.emptyList(); } private List<OffsetDateTime> deserializeListOfOffsetDateTime(Message amqpMessage) { if (amqpMessage.getBody() instanceof AmqpValue) { AmqpValue amqpValue = ((AmqpValue) amqpMessage.getBody()); if (amqpValue.getValue() instanceof Map) { @SuppressWarnings("unchecked") Map<String, Object> responseBody = (Map<String, Object>) amqpValue.getValue(); Object expirationListObj = responseBody.get(ManagementConstants.EXPIRATIONS); if (expirationListObj instanceof Date[]) { return Arrays.stream((Date[]) expirationListObj) .map(date -> date.toInstant().atOffset(ZoneOffset.UTC)) .collect(Collectors.toList()); } } } return Collections.emptyList(); } @SuppressWarnings("rawtypes") private List<ServiceBusReceivedMessage> deserializeListOfMessages(Message amqpMessage) { final List<ServiceBusReceivedMessage> messageList = new ArrayList<>(); final AmqpResponseCode statusCode = RequestResponseUtils.getStatusCode(amqpMessage); if (statusCode != AmqpResponseCode.OK) { logger.warning("AMQP response did not contain OK status code. Actual: {}", statusCode); return Collections.emptyList(); } final Object responseBodyMap = ((AmqpValue) amqpMessage.getBody()).getValue(); if (responseBodyMap == null) { logger.warning("AMQP response did not contain a body."); return Collections.emptyList(); } else if (!(responseBodyMap instanceof Map)) { logger.warning("AMQP response body is not correct instance. Expected: {}. Actual: {}", Map.class, responseBodyMap.getClass()); return Collections.emptyList(); } final Object messages = ((Map) responseBodyMap).get(ManagementConstants.MESSAGES); if (messages == null) { logger.warning("Response body did not contain key: {}", ManagementConstants.MESSAGES); return Collections.emptyList(); } else if (!(messages instanceof Iterable)) { logger.warning("Response body contents is not the correct type. Expected: {}. Actual: {}", Iterable.class, messages.getClass()); return Collections.emptyList(); } for (Object message : (Iterable) messages) { if (!(message instanceof Map)) { logger.warning("Message inside iterable of message is not correct type. Expected: {}. Actual: {}", Map.class, message.getClass()); continue; } final Message responseMessage = Message.Factory.create(); final Binary messagePayLoad = (Binary) ((Map) message).get(ManagementConstants.MESSAGE); responseMessage.decode(messagePayLoad.getArray(), messagePayLoad.getArrayOffset(), messagePayLoad.getLength()); final ServiceBusReceivedMessage receivedMessage = deserializeMessage(responseMessage); if (((Map) message).containsKey(ManagementConstants.LOCK_TOKEN_KEY)) { receivedMessage.setLockToken((UUID) ((Map) message).get(ManagementConstants.LOCK_TOKEN_KEY)); } messageList.add(receivedMessage); } return messageList; } private ServiceBusReceivedMessage deserializeMessage(Message amqpMessage) { final Section body = amqpMessage.getBody(); AmqpMessageBody amqpMessageBody; if (body != null) { if (body instanceof Data) { final Binary messageData = ((Data) body).getValue(); amqpMessageBody = AmqpMessageBody.fromData(messageData.getArray()); } else if (body instanceof AmqpValue) { amqpMessageBody = AmqpMessageBody.fromValue(((AmqpValue) body).getValue()); } else if (body instanceof AmqpSequence) { @SuppressWarnings("unchecked") List<Object> messageData = ((AmqpSequence) body).getValue(); amqpMessageBody = AmqpMessageBody.fromSequence(messageData); } else { logger.warning(String.format(Messages.MESSAGE_NOT_OF_TYPE, body.getType())); amqpMessageBody = AmqpMessageBody.fromData(EMPTY_BYTE_ARRAY); } } else { logger.warning(String.format(Messages.MESSAGE_NOT_OF_TYPE, "null")); amqpMessageBody = AmqpMessageBody.fromData(EMPTY_BYTE_ARRAY); } final ServiceBusReceivedMessage brokeredMessage = new ServiceBusReceivedMessage(amqpMessageBody); AmqpAnnotatedMessage brokeredAmqpAnnotatedMessage = brokeredMessage.getRawAmqpMessage(); ApplicationProperties applicationProperties = amqpMessage.getApplicationProperties(); if (applicationProperties != null) { final Map<String, Object> propertiesValue = applicationProperties.getValue(); brokeredAmqpAnnotatedMessage.getApplicationProperties().putAll(convertToOriginType(propertiesValue)); } final AmqpMessageHeader brokeredHeader = brokeredAmqpAnnotatedMessage.getHeader(); brokeredHeader.setTimeToLive(Duration.ofMillis(amqpMessage.getTtl())); brokeredHeader.setDeliveryCount(amqpMessage.getDeliveryCount()); brokeredHeader.setDurable(amqpMessage.getHeader().getDurable()); brokeredHeader.setFirstAcquirer(amqpMessage.getHeader().getFirstAcquirer()); brokeredHeader.setPriority(amqpMessage.getPriority()); final Footer footer = amqpMessage.getFooter(); if (footer != null && footer.getValue() != null) { @SuppressWarnings("unchecked") final Map<Symbol, Object> footerValue = footer.getValue(); setValues(footerValue, brokeredAmqpAnnotatedMessage.getFooter()); } final AmqpMessageProperties brokeredProperties = brokeredAmqpAnnotatedMessage.getProperties(); brokeredProperties.setReplyToGroupId(amqpMessage.getReplyToGroupId()); final String replyTo = amqpMessage.getReplyTo(); if (replyTo != null) { brokeredProperties.setReplyTo(new AmqpAddress(amqpMessage.getReplyTo())); } final Object messageId = amqpMessage.getMessageId(); if (messageId != null) { brokeredProperties.setMessageId(new AmqpMessageId(messageId.toString())); } brokeredProperties.setContentType(amqpMessage.getContentType()); final Object correlationId = amqpMessage.getCorrelationId(); if (correlationId != null) { brokeredProperties.setCorrelationId(new AmqpMessageId(correlationId.toString())); } final Properties amqpProperties = amqpMessage.getProperties(); if (amqpProperties != null) { final String to = amqpProperties.getTo(); if (to != null) { brokeredProperties.setTo(new AmqpAddress(amqpProperties.getTo())); } if (amqpProperties.getAbsoluteExpiryTime() != null) { brokeredProperties.setAbsoluteExpiryTime(amqpProperties.getAbsoluteExpiryTime().toInstant() .atOffset(ZoneOffset.UTC)); } if (amqpProperties.getCreationTime() != null) { brokeredProperties.setCreationTime(amqpProperties.getCreationTime().toInstant() .atOffset(ZoneOffset.UTC)); } } brokeredProperties.setSubject(amqpMessage.getSubject()); brokeredProperties.setGroupId(amqpMessage.getGroupId()); brokeredProperties.setContentEncoding(amqpMessage.getContentEncoding()); brokeredProperties.setGroupSequence(amqpMessage.getGroupSequence()); brokeredProperties.setUserId(amqpMessage.getUserId()); final DeliveryAnnotations deliveryAnnotations = amqpMessage.getDeliveryAnnotations(); if (deliveryAnnotations != null) { setValues(deliveryAnnotations.getValue(), brokeredAmqpAnnotatedMessage.getDeliveryAnnotations()); } final MessageAnnotations messageAnnotations = amqpMessage.getMessageAnnotations(); if (messageAnnotations != null) { setValues(messageAnnotations.getValue(), brokeredAmqpAnnotatedMessage.getMessageAnnotations()); } if (amqpMessage instanceof MessageWithLockToken) { brokeredMessage.setLockToken(((MessageWithLockToken) amqpMessage).getLockToken()); } return brokeredMessage; } private static int getPayloadSize(Message msg) { if (msg == null || msg.getBody() == null) { return 0; } final Section bodySection = msg.getBody(); if (bodySection instanceof AmqpValue) { return sizeof(((AmqpValue) bodySection).getValue()); } else if (bodySection instanceof AmqpSequence) { return sizeof(((AmqpSequence) bodySection).getValue()); } else if (bodySection instanceof Data) { final Data payloadSection = (Data) bodySection; final Binary payloadBytes = payloadSection.getValue(); return sizeof(payloadBytes); } else { return 0; } } private void setValues(Map<Symbol, Object> sourceMap, Map<String, Object> targetMap) { if (sourceMap != null) { for (Map.Entry<Symbol, Object> entry : sourceMap.entrySet()) { targetMap.put(entry.getKey().toString(), entry.getValue()); } } } @SuppressWarnings("rawtypes") }
class ServiceBusMessageSerializer implements MessageSerializer { private static final byte[] EMPTY_BYTE_ARRAY = new byte[0]; private final ClientLogger logger = new ClientLogger(ServiceBusMessageSerializer.class); /** * Gets the serialized size of the AMQP message. */ @Override public int getSize(Message amqpMessage) { if (amqpMessage == null) { return 0; } int payloadSize = getPayloadSize(amqpMessage); final MessageAnnotations messageAnnotations = amqpMessage.getMessageAnnotations(); final ApplicationProperties applicationProperties = amqpMessage.getApplicationProperties(); int annotationsSize = 0; int applicationPropertiesSize = 0; if (messageAnnotations != null) { final Map<Symbol, Object> map = messageAnnotations.getValue(); for (Map.Entry<Symbol, Object> entry : map.entrySet()) { final int size = sizeof(entry.getKey()) + sizeof(entry.getValue()); annotationsSize += size; } } if (applicationProperties != null) { final Map<String, Object> map = applicationProperties.getValue(); for (Map.Entry<String, Object> entry : map.entrySet()) { final int size = sizeof(entry.getKey()) + sizeof(entry.getValue()); applicationPropertiesSize += size; } } return annotationsSize + applicationPropertiesSize + payloadSize; } /** * Creates the AMQP message represented by this {@code object}. Currently, only supports serializing {@link * ServiceBusMessage}. * * @param object Concrete object to deserialize. * * @return A new AMQP message for this {@code object}. * * @throws IllegalArgumentException if {@code object} is not an instance of {@link ServiceBusMessage}. */ @Override public <T> Message serialize(T object) { Objects.requireNonNull(object, "'object' to serialize cannot be null."); if (!(object instanceof ServiceBusMessage)) { throw logger.logExceptionAsError(new IllegalArgumentException( "Cannot serialize object that is not ServiceBusMessage. Clazz: " + object.getClass())); } final ServiceBusMessage brokeredMessage = (ServiceBusMessage) object; AmqpMessageBodyType brokeredBodyType = brokeredMessage.getRawAmqpMessage().getBody().getBodyType(); final Message amqpMessage = Proton.message(); byte[] body; if (brokeredBodyType == AmqpMessageBodyType.DATA || brokeredBodyType == null) { body = brokeredMessage.getBody().toBytes(); amqpMessage.setBody(new Data(new Binary(body))); } else if (brokeredBodyType == AmqpMessageBodyType.SEQUENCE) { List<Object> sequenceList = brokeredMessage.getRawAmqpMessage().getBody().getSequence(); amqpMessage.setBody(new AmqpSequence(sequenceList)); } else if (brokeredBodyType == AmqpMessageBodyType.VALUE) { amqpMessage.setBody(new AmqpValue(brokeredMessage.getRawAmqpMessage().getBody().getValue())); } if (brokeredMessage.getApplicationProperties() != null) { Map<String, Object> describedTypeMap = convertToDescribedType(brokeredMessage.getApplicationProperties()); amqpMessage.setApplicationProperties(new ApplicationProperties(describedTypeMap)); } if (brokeredMessage.getTimeToLive() != null) { amqpMessage.setTtl(brokeredMessage.getTimeToLive().toMillis()); } if (amqpMessage.getProperties() == null) { amqpMessage.setProperties(new Properties()); } amqpMessage.setMessageId(brokeredMessage.getMessageId()); amqpMessage.setContentType(brokeredMessage.getContentType()); amqpMessage.setCorrelationId(brokeredMessage.getCorrelationId()); amqpMessage.setSubject(brokeredMessage.getSubject()); amqpMessage.setReplyTo(brokeredMessage.getReplyTo()); amqpMessage.setReplyToGroupId(brokeredMessage.getReplyToSessionId()); amqpMessage.setGroupId(brokeredMessage.getSessionId()); final AmqpMessageProperties brokeredProperties = brokeredMessage.getRawAmqpMessage().getProperties(); amqpMessage.setContentEncoding(brokeredProperties.getContentEncoding()); if (brokeredProperties.getGroupSequence() != null) { amqpMessage.setGroupSequence(brokeredProperties.getGroupSequence()); } amqpMessage.getProperties().setTo(brokeredMessage.getTo()); amqpMessage.getProperties().setUserId(new Binary(brokeredProperties.getUserId())); if (brokeredProperties.getAbsoluteExpiryTime() != null) { amqpMessage.getProperties().setAbsoluteExpiryTime(Date.from(brokeredProperties.getAbsoluteExpiryTime() .toInstant())); } if (brokeredProperties.getCreationTime() != null) { amqpMessage.getProperties().setCreationTime(Date.from(brokeredProperties.getCreationTime().toInstant())); } amqpMessage.setFooter(new Footer(brokeredMessage.getRawAmqpMessage().getFooter())); AmqpMessageHeader header = brokeredMessage.getRawAmqpMessage().getHeader(); if (header.getDeliveryCount() != null) { amqpMessage.setDeliveryCount(header.getDeliveryCount()); } if (header.getPriority() != null) { amqpMessage.setPriority(header.getPriority()); } if (header.isDurable() != null) { amqpMessage.setDurable(header.isDurable()); } if (header.isFirstAcquirer() != null) { amqpMessage.setFirstAcquirer(header.isFirstAcquirer()); } if (header.getTimeToLive() != null) { amqpMessage.setTtl(header.getTimeToLive().toMillis()); } final Map<Symbol, Object> messageAnnotationsMap = new HashMap<>(); if (brokeredMessage.getScheduledEnqueueTime() != null) { messageAnnotationsMap.put(Symbol.valueOf(SCHEDULED_ENQUEUE_UTC_TIME_NAME.getValue()), Date.from(brokeredMessage.getScheduledEnqueueTime().toInstant())); } final String partitionKey = brokeredMessage.getPartitionKey(); if (partitionKey != null && !partitionKey.isEmpty()) { messageAnnotationsMap.put(Symbol.valueOf(PARTITION_KEY_ANNOTATION_NAME.getValue()), brokeredMessage.getPartitionKey()); } amqpMessage.setMessageAnnotations(new MessageAnnotations(messageAnnotationsMap)); final Map<Symbol, Object> deliveryAnnotationsMap = new HashMap<>(); final Map<String, Object> deliveryAnnotations = brokeredMessage.getRawAmqpMessage() .getDeliveryAnnotations(); for (Map.Entry<String, Object> deliveryEntry : deliveryAnnotations.entrySet()) { deliveryAnnotationsMap.put(Symbol.valueOf(deliveryEntry.getKey()), deliveryEntry.getValue()); } amqpMessage.setDeliveryAnnotations(new DeliveryAnnotations(deliveryAnnotationsMap)); return amqpMessage; } /** * Convert specific type to described type for sending on the wire. * @param propertiesValue application properties set by user which may contain specific type. * @return Map only contains primitive type and described type. */ private static Map<String, Object> convertToDescribedType(Map<String, Object> propertiesValue) { for (Map.Entry<String, Object> entry : propertiesValue.entrySet()) { Object value = entry.getValue(); if (value instanceof URI) { entry.setValue(new UriDescribedType((URI) value)); } else if (value instanceof OffsetDateTime) { entry.setValue(new OffsetDateTimeDescribedType((OffsetDateTime) value)); } else if (value instanceof Duration) { entry.setValue(new DurationDescribedType((Duration) value)); } } return propertiesValue; } /** * Convert described type to origin type. * @param propertiesValue application properties from amqp message may contain described type. * @return Map without described type. */ private static Map<String, Object> convertToOriginType(Map<String, Object> propertiesValue) { for (Map.Entry<String, Object> entry : propertiesValue.entrySet()) { Object value = entry.getValue(); if (value instanceof DescribedType) { entry.setValue(MessageUtils.describedToOrigin((DescribedType) value)); } } return propertiesValue; } @SuppressWarnings("unchecked") @Override public <T> T deserialize(Message message, Class<T> clazz) { Objects.requireNonNull(message, "'message' cannot be null."); Objects.requireNonNull(clazz, "'clazz' cannot be null."); if (clazz == ServiceBusReceivedMessage.class) { return (T) deserializeMessage(message); } else { throw logger.logExceptionAsError(new IllegalArgumentException( String.format(Messages.CLASS_NOT_A_SUPPORTED_TYPE, clazz))); } } @SuppressWarnings("unchecked") @Override public <T> List<T> deserializeList(Message message, Class<T> clazz) { if (clazz == ServiceBusReceivedMessage.class) { return (List<T>) deserializeListOfMessages(message); } else if (clazz == OffsetDateTime.class) { return (List<T>) deserializeListOfOffsetDateTime(message); } else if (clazz == OffsetDateTime.class) { return (List<T>) deserializeListOfOffsetDateTime(message); } else if (clazz == Long.class) { return (List<T>) deserializeListOfLong(message); } else { throw logger.logExceptionAsError(new IllegalArgumentException( String.format(Messages.CLASS_NOT_A_SUPPORTED_TYPE, clazz))); } } private List<Long> deserializeListOfLong(Message amqpMessage) { if (amqpMessage.getBody() instanceof AmqpValue) { AmqpValue amqpValue = ((AmqpValue) amqpMessage.getBody()); if (amqpValue.getValue() instanceof Map) { @SuppressWarnings("unchecked") Map<String, Object> responseBody = (Map<String, Object>) amqpValue.getValue(); Object expirationListObj = responseBody.get(ManagementConstants.SEQUENCE_NUMBERS); if (expirationListObj instanceof long[]) { return Arrays.stream((long[]) expirationListObj) .boxed() .collect(Collectors.toList()); } } } return Collections.emptyList(); } private List<OffsetDateTime> deserializeListOfOffsetDateTime(Message amqpMessage) { if (amqpMessage.getBody() instanceof AmqpValue) { AmqpValue amqpValue = ((AmqpValue) amqpMessage.getBody()); if (amqpValue.getValue() instanceof Map) { @SuppressWarnings("unchecked") Map<String, Object> responseBody = (Map<String, Object>) amqpValue.getValue(); Object expirationListObj = responseBody.get(ManagementConstants.EXPIRATIONS); if (expirationListObj instanceof Date[]) { return Arrays.stream((Date[]) expirationListObj) .map(date -> date.toInstant().atOffset(ZoneOffset.UTC)) .collect(Collectors.toList()); } } } return Collections.emptyList(); } @SuppressWarnings("rawtypes") private List<ServiceBusReceivedMessage> deserializeListOfMessages(Message amqpMessage) { final List<ServiceBusReceivedMessage> messageList = new ArrayList<>(); final AmqpResponseCode statusCode = RequestResponseUtils.getStatusCode(amqpMessage); if (statusCode != AmqpResponseCode.OK) { logger.warning("AMQP response did not contain OK status code. Actual: {}", statusCode); return Collections.emptyList(); } final Object responseBodyMap = ((AmqpValue) amqpMessage.getBody()).getValue(); if (responseBodyMap == null) { logger.warning("AMQP response did not contain a body."); return Collections.emptyList(); } else if (!(responseBodyMap instanceof Map)) { logger.warning("AMQP response body is not correct instance. Expected: {}. Actual: {}", Map.class, responseBodyMap.getClass()); return Collections.emptyList(); } final Object messages = ((Map) responseBodyMap).get(ManagementConstants.MESSAGES); if (messages == null) { logger.warning("Response body did not contain key: {}", ManagementConstants.MESSAGES); return Collections.emptyList(); } else if (!(messages instanceof Iterable)) { logger.warning("Response body contents is not the correct type. Expected: {}. Actual: {}", Iterable.class, messages.getClass()); return Collections.emptyList(); } for (Object message : (Iterable) messages) { if (!(message instanceof Map)) { logger.warning("Message inside iterable of message is not correct type. Expected: {}. Actual: {}", Map.class, message.getClass()); continue; } final Message responseMessage = Message.Factory.create(); final Binary messagePayLoad = (Binary) ((Map) message).get(ManagementConstants.MESSAGE); responseMessage.decode(messagePayLoad.getArray(), messagePayLoad.getArrayOffset(), messagePayLoad.getLength()); final ServiceBusReceivedMessage receivedMessage = deserializeMessage(responseMessage); if (((Map) message).containsKey(ManagementConstants.LOCK_TOKEN_KEY)) { receivedMessage.setLockToken((UUID) ((Map) message).get(ManagementConstants.LOCK_TOKEN_KEY)); } messageList.add(receivedMessage); } return messageList; } private ServiceBusReceivedMessage deserializeMessage(Message amqpMessage) { final Section body = amqpMessage.getBody(); AmqpMessageBody amqpMessageBody; if (body != null) { if (body instanceof Data) { final Binary messageData = ((Data) body).getValue(); amqpMessageBody = AmqpMessageBody.fromData(messageData.getArray()); } else if (body instanceof AmqpValue) { amqpMessageBody = AmqpMessageBody.fromValue(((AmqpValue) body).getValue()); } else if (body instanceof AmqpSequence) { @SuppressWarnings("unchecked") List<Object> messageData = ((AmqpSequence) body).getValue(); amqpMessageBody = AmqpMessageBody.fromSequence(messageData); } else { logger.warning(String.format(Messages.MESSAGE_NOT_OF_TYPE, body.getType())); amqpMessageBody = AmqpMessageBody.fromData(EMPTY_BYTE_ARRAY); } } else { logger.warning(String.format(Messages.MESSAGE_NOT_OF_TYPE, "null")); amqpMessageBody = AmqpMessageBody.fromData(EMPTY_BYTE_ARRAY); } final ServiceBusReceivedMessage brokeredMessage = new ServiceBusReceivedMessage(amqpMessageBody); AmqpAnnotatedMessage brokeredAmqpAnnotatedMessage = brokeredMessage.getRawAmqpMessage(); ApplicationProperties applicationProperties = amqpMessage.getApplicationProperties(); if (applicationProperties != null) { final Map<String, Object> propertiesValue = convertToOriginType(applicationProperties.getValue()); brokeredAmqpAnnotatedMessage.getApplicationProperties().putAll(propertiesValue); } final AmqpMessageHeader brokeredHeader = brokeredAmqpAnnotatedMessage.getHeader(); brokeredHeader.setTimeToLive(Duration.ofMillis(amqpMessage.getTtl())); brokeredHeader.setDeliveryCount(amqpMessage.getDeliveryCount()); brokeredHeader.setDurable(amqpMessage.getHeader().getDurable()); brokeredHeader.setFirstAcquirer(amqpMessage.getHeader().getFirstAcquirer()); brokeredHeader.setPriority(amqpMessage.getPriority()); final Footer footer = amqpMessage.getFooter(); if (footer != null && footer.getValue() != null) { @SuppressWarnings("unchecked") final Map<Symbol, Object> footerValue = footer.getValue(); setValues(footerValue, brokeredAmqpAnnotatedMessage.getFooter()); } final AmqpMessageProperties brokeredProperties = brokeredAmqpAnnotatedMessage.getProperties(); brokeredProperties.setReplyToGroupId(amqpMessage.getReplyToGroupId()); final String replyTo = amqpMessage.getReplyTo(); if (replyTo != null) { brokeredProperties.setReplyTo(new AmqpAddress(amqpMessage.getReplyTo())); } final Object messageId = amqpMessage.getMessageId(); if (messageId != null) { brokeredProperties.setMessageId(new AmqpMessageId(messageId.toString())); } brokeredProperties.setContentType(amqpMessage.getContentType()); final Object correlationId = amqpMessage.getCorrelationId(); if (correlationId != null) { brokeredProperties.setCorrelationId(new AmqpMessageId(correlationId.toString())); } final Properties amqpProperties = amqpMessage.getProperties(); if (amqpProperties != null) { final String to = amqpProperties.getTo(); if (to != null) { brokeredProperties.setTo(new AmqpAddress(amqpProperties.getTo())); } if (amqpProperties.getAbsoluteExpiryTime() != null) { brokeredProperties.setAbsoluteExpiryTime(amqpProperties.getAbsoluteExpiryTime().toInstant() .atOffset(ZoneOffset.UTC)); } if (amqpProperties.getCreationTime() != null) { brokeredProperties.setCreationTime(amqpProperties.getCreationTime().toInstant() .atOffset(ZoneOffset.UTC)); } } brokeredProperties.setSubject(amqpMessage.getSubject()); brokeredProperties.setGroupId(amqpMessage.getGroupId()); brokeredProperties.setContentEncoding(amqpMessage.getContentEncoding()); brokeredProperties.setGroupSequence(amqpMessage.getGroupSequence()); brokeredProperties.setUserId(amqpMessage.getUserId()); final DeliveryAnnotations deliveryAnnotations = amqpMessage.getDeliveryAnnotations(); if (deliveryAnnotations != null) { setValues(deliveryAnnotations.getValue(), brokeredAmqpAnnotatedMessage.getDeliveryAnnotations()); } final MessageAnnotations messageAnnotations = amqpMessage.getMessageAnnotations(); if (messageAnnotations != null) { setValues(messageAnnotations.getValue(), brokeredAmqpAnnotatedMessage.getMessageAnnotations()); } if (amqpMessage instanceof MessageWithLockToken) { brokeredMessage.setLockToken(((MessageWithLockToken) amqpMessage).getLockToken()); } return brokeredMessage; } private static int getPayloadSize(Message msg) { if (msg == null || msg.getBody() == null) { return 0; } final Section bodySection = msg.getBody(); if (bodySection instanceof AmqpValue) { return sizeof(((AmqpValue) bodySection).getValue()); } else if (bodySection instanceof AmqpSequence) { return sizeof(((AmqpSequence) bodySection).getValue()); } else if (bodySection instanceof Data) { final Data payloadSection = (Data) bodySection; final Binary payloadBytes = payloadSection.getValue(); return sizeof(payloadBytes); } else { return 0; } } private void setValues(Map<Symbol, Object> sourceMap, Map<String, Object> targetMap) { if (sourceMap != null) { for (Map.Entry<Symbol, Object> entry : sourceMap.entrySet()) { targetMap.put(entry.getKey().toString(), entry.getValue()); } } } @SuppressWarnings("rawtypes") }
Keep the same code style as former code.
private static int sizeof(Object obj) { if (obj == null) { return 0; } if (obj instanceof String) { return obj.toString().length() << 1; } if (obj instanceof Symbol) { return ((Symbol) obj).length() << 1; } if (obj instanceof Byte || obj instanceof UnsignedByte) { return Byte.BYTES; } if (obj instanceof Integer || obj instanceof UnsignedInteger) { return Integer.BYTES; } if (obj instanceof Long || obj instanceof UnsignedLong || obj instanceof Date) { return Long.BYTES; } if (obj instanceof Short || obj instanceof UnsignedShort) { return Short.BYTES; } if (obj instanceof Boolean) { return 1; } if (obj instanceof Character) { return 4; } if (obj instanceof Float) { return Float.BYTES; } if (obj instanceof Double) { return Double.BYTES; } if (obj instanceof UUID) { return 16; } if (obj instanceof Decimal32) { return 4; } if (obj instanceof Decimal64) { return 8; } if (obj instanceof Decimal128) { return 16; } if (obj instanceof Binary) { return ((Binary) obj).getLength(); } if (obj instanceof Declare) { return 7; } if (obj instanceof Discharge) { Discharge discharge = (Discharge) obj; return 12 + discharge.getTxnId().getLength(); } if (obj instanceof ServiceBusDescribedType) { ServiceBusDescribedType describedType = (ServiceBusDescribedType) obj; int size = ((Symbol) describedType.getDescriptor()).length() << 1; if (describedType.getDescribed() instanceof String) { size += ((String) describedType.getDescribed()).length(); } else if (describedType.getDescribed() instanceof Long) { size += Long.BYTES; } return size; } if (obj instanceof Map) { int size = 8; Map map = (Map) obj; for (Object value : map.keySet()) { size += sizeof(value); } for (Object value : map.values()) { size += sizeof(value); } return size; } if (obj instanceof Iterable) { int size = 8; for (Object innerObject : (Iterable) obj) { size += sizeof(innerObject); } return size; } if (obj.getClass().isArray()) { int size = 8; int length = Array.getLength(obj); for (int i = 0; i < length; i++) { size += sizeof(Array.get(obj, i)); } return size; } throw new IllegalArgumentException(String.format(Locale.US, "Encoding Type: %s is not supported", obj.getClass())); }
for (Object value : map.values()) {
private static int sizeof(Object obj) { if (obj == null) { return 0; } if (obj instanceof String) { return obj.toString().length() << 1; } if (obj instanceof Symbol) { return ((Symbol) obj).length() << 1; } if (obj instanceof Byte || obj instanceof UnsignedByte) { return Byte.BYTES; } if (obj instanceof Integer || obj instanceof UnsignedInteger) { return Integer.BYTES; } if (obj instanceof Long || obj instanceof UnsignedLong || obj instanceof Date) { return Long.BYTES; } if (obj instanceof Short || obj instanceof UnsignedShort) { return Short.BYTES; } if (obj instanceof Boolean) { return 1; } if (obj instanceof Character) { return 4; } if (obj instanceof Float) { return Float.BYTES; } if (obj instanceof Double) { return Double.BYTES; } if (obj instanceof UUID) { return 16; } if (obj instanceof Decimal32) { return 4; } if (obj instanceof Decimal64) { return 8; } if (obj instanceof Decimal128) { return 16; } if (obj instanceof Binary) { return ((Binary) obj).getLength(); } if (obj instanceof Declare) { return 7; } if (obj instanceof Discharge) { Discharge discharge = (Discharge) obj; return 12 + discharge.getTxnId().getLength(); } if (obj instanceof ServiceBusDescribedType) { ServiceBusDescribedType describedType = (ServiceBusDescribedType) obj; return describedType.size(); } if (obj instanceof Map) { int size = 8; Map map = (Map) obj; for (Object value : map.keySet()) { size += sizeof(value); } for (Object value : map.values()) { size += sizeof(value); } return size; } if (obj instanceof Iterable) { int size = 8; for (Object innerObject : (Iterable) obj) { size += sizeof(innerObject); } return size; } if (obj.getClass().isArray()) { int size = 8; int length = Array.getLength(obj); for (int i = 0; i < length; i++) { size += sizeof(Array.get(obj, i)); } return size; } throw new IllegalArgumentException(String.format(Locale.US, "Encoding Type: %s is not supported", obj.getClass())); }
class ServiceBusMessageSerializer implements MessageSerializer { private static final byte[] EMPTY_BYTE_ARRAY = new byte[0]; private final ClientLogger logger = new ClientLogger(ServiceBusMessageSerializer.class); /** * Gets the serialized size of the AMQP message. */ @Override public int getSize(Message amqpMessage) { if (amqpMessage == null) { return 0; } int payloadSize = getPayloadSize(amqpMessage); final MessageAnnotations messageAnnotations = amqpMessage.getMessageAnnotations(); final ApplicationProperties applicationProperties = amqpMessage.getApplicationProperties(); int annotationsSize = 0; int applicationPropertiesSize = 0; if (messageAnnotations != null) { final Map<Symbol, Object> map = messageAnnotations.getValue(); for (Map.Entry<Symbol, Object> entry : map.entrySet()) { final int size = sizeof(entry.getKey()) + sizeof(entry.getValue()); annotationsSize += size; } } if (applicationProperties != null) { final Map<String, Object> map = applicationProperties.getValue(); for (Map.Entry<String, Object> entry : map.entrySet()) { final int size = sizeof(entry.getKey()) + sizeof(entry.getValue()); applicationPropertiesSize += size; } } return annotationsSize + applicationPropertiesSize + payloadSize; } /** * Creates the AMQP message represented by this {@code object}. Currently, only supports serializing {@link * ServiceBusMessage}. * * @param object Concrete object to deserialize. * * @return A new AMQP message for this {@code object}. * * @throws IllegalArgumentException if {@code object} is not an instance of {@link ServiceBusMessage}. */ @Override public <T> Message serialize(T object) { Objects.requireNonNull(object, "'object' to serialize cannot be null."); if (!(object instanceof ServiceBusMessage)) { throw logger.logExceptionAsError(new IllegalArgumentException( "Cannot serialize object that is not ServiceBusMessage. Clazz: " + object.getClass())); } final ServiceBusMessage brokeredMessage = (ServiceBusMessage) object; AmqpMessageBodyType brokeredBodyType = brokeredMessage.getRawAmqpMessage().getBody().getBodyType(); final Message amqpMessage = Proton.message(); byte[] body; if (brokeredBodyType == AmqpMessageBodyType.DATA || brokeredBodyType == null) { body = brokeredMessage.getBody().toBytes(); amqpMessage.setBody(new Data(new Binary(body))); } else if (brokeredBodyType == AmqpMessageBodyType.SEQUENCE) { List<Object> sequenceList = brokeredMessage.getRawAmqpMessage().getBody().getSequence(); amqpMessage.setBody(new AmqpSequence(sequenceList)); } else if (brokeredBodyType == AmqpMessageBodyType.VALUE) { amqpMessage.setBody(new AmqpValue(brokeredMessage.getRawAmqpMessage().getBody().getValue())); } if (brokeredMessage.getApplicationProperties() != null) { amqpMessage.setApplicationProperties(new ApplicationProperties(convertToDescribedType(brokeredMessage.getApplicationProperties()))); } if (brokeredMessage.getTimeToLive() != null) { amqpMessage.setTtl(brokeredMessage.getTimeToLive().toMillis()); } if (amqpMessage.getProperties() == null) { amqpMessage.setProperties(new Properties()); } amqpMessage.setMessageId(brokeredMessage.getMessageId()); amqpMessage.setContentType(brokeredMessage.getContentType()); amqpMessage.setCorrelationId(brokeredMessage.getCorrelationId()); amqpMessage.setSubject(brokeredMessage.getSubject()); amqpMessage.setReplyTo(brokeredMessage.getReplyTo()); amqpMessage.setReplyToGroupId(brokeredMessage.getReplyToSessionId()); amqpMessage.setGroupId(brokeredMessage.getSessionId()); final AmqpMessageProperties brokeredProperties = brokeredMessage.getRawAmqpMessage().getProperties(); amqpMessage.setContentEncoding(brokeredProperties.getContentEncoding()); if (brokeredProperties.getGroupSequence() != null) { amqpMessage.setGroupSequence(brokeredProperties.getGroupSequence()); } amqpMessage.getProperties().setTo(brokeredMessage.getTo()); amqpMessage.getProperties().setUserId(new Binary(brokeredProperties.getUserId())); if (brokeredProperties.getAbsoluteExpiryTime() != null) { amqpMessage.getProperties().setAbsoluteExpiryTime(Date.from(brokeredProperties.getAbsoluteExpiryTime() .toInstant())); } if (brokeredProperties.getCreationTime() != null) { amqpMessage.getProperties().setCreationTime(Date.from(brokeredProperties.getCreationTime().toInstant())); } amqpMessage.setFooter(new Footer(brokeredMessage.getRawAmqpMessage().getFooter())); AmqpMessageHeader header = brokeredMessage.getRawAmqpMessage().getHeader(); if (header.getDeliveryCount() != null) { amqpMessage.setDeliveryCount(header.getDeliveryCount()); } if (header.getPriority() != null) { amqpMessage.setPriority(header.getPriority()); } if (header.isDurable() != null) { amqpMessage.setDurable(header.isDurable()); } if (header.isFirstAcquirer() != null) { amqpMessage.setFirstAcquirer(header.isFirstAcquirer()); } if (header.getTimeToLive() != null) { amqpMessage.setTtl(header.getTimeToLive().toMillis()); } final Map<Symbol, Object> messageAnnotationsMap = new HashMap<>(); if (brokeredMessage.getScheduledEnqueueTime() != null) { messageAnnotationsMap.put(Symbol.valueOf(SCHEDULED_ENQUEUE_UTC_TIME_NAME.getValue()), Date.from(brokeredMessage.getScheduledEnqueueTime().toInstant())); } final String partitionKey = brokeredMessage.getPartitionKey(); if (partitionKey != null && !partitionKey.isEmpty()) { messageAnnotationsMap.put(Symbol.valueOf(PARTITION_KEY_ANNOTATION_NAME.getValue()), brokeredMessage.getPartitionKey()); } amqpMessage.setMessageAnnotations(new MessageAnnotations(messageAnnotationsMap)); final Map<Symbol, Object> deliveryAnnotationsMap = new HashMap<>(); final Map<String, Object> deliveryAnnotations = brokeredMessage.getRawAmqpMessage() .getDeliveryAnnotations(); for (Map.Entry<String, Object> deliveryEntry : deliveryAnnotations.entrySet()) { deliveryAnnotationsMap.put(Symbol.valueOf(deliveryEntry.getKey()), deliveryEntry.getValue()); } amqpMessage.setDeliveryAnnotations(new DeliveryAnnotations(deliveryAnnotationsMap)); return amqpMessage; } private Map<String, Object> convertToDescribedType(Map<String, Object> propertiesValue) { for (Map.Entry<String, Object> entry : propertiesValue.entrySet()) { Object value = entry.getValue(); if (value instanceof URI) { propertiesValue.put(entry.getKey(), new ServiceBusDescribedType(ServiceBusConstants.URI_SYMBOL, ((URI) value).toString())); } else if (value instanceof OffsetDateTime) { propertiesValue.put(entry.getKey(), new ServiceBusDescribedType(ServiceBusConstants.OFFSETDATETIME_SYMBOL, ((OffsetDateTime) value).atZoneSameInstant(ZoneOffset.UTC).toInstant().toEpochMilli())); } else if (value instanceof Duration) { propertiesValue.put(entry.getKey(), new ServiceBusDescribedType(ServiceBusConstants.DURATION_SYMBOL, ((Duration) value).toNanos())); } } return propertiesValue; } private Map<String, Object> convertToOriginType(Map<String, Object> propertiesValue) { for (Map.Entry<String, Object> entry : propertiesValue.entrySet()) { Object value = entry.getValue(); if (value instanceof DescribedType) { DescribedType describedType = (DescribedType) value; if (ServiceBusConstants.URI_SYMBOL.equals(describedType.getDescriptor())) { propertiesValue.put(entry.getKey(), URI.create((String) describedType.getDescribed())); } else if (ServiceBusConstants.OFFSETDATETIME_SYMBOL.equals(describedType.getDescriptor())) { propertiesValue.put(entry.getKey(), OffsetDateTime.ofInstant(Instant.ofEpochMilli((long) describedType.getDescribed()), ZoneId.systemDefault())); } else if (ServiceBusConstants.DURATION_SYMBOL.equals(describedType.getDescriptor())) { propertiesValue.put(entry.getKey(), Duration.ofNanos((long) describedType.getDescribed())); } } } return propertiesValue; } @SuppressWarnings("unchecked") @Override public <T> T deserialize(Message message, Class<T> clazz) { Objects.requireNonNull(message, "'message' cannot be null."); Objects.requireNonNull(clazz, "'clazz' cannot be null."); if (clazz == ServiceBusReceivedMessage.class) { return (T) deserializeMessage(message); } else { throw logger.logExceptionAsError(new IllegalArgumentException( String.format(Messages.CLASS_NOT_A_SUPPORTED_TYPE, clazz))); } } @SuppressWarnings("unchecked") @Override public <T> List<T> deserializeList(Message message, Class<T> clazz) { if (clazz == ServiceBusReceivedMessage.class) { return (List<T>) deserializeListOfMessages(message); } else if (clazz == OffsetDateTime.class) { return (List<T>) deserializeListOfOffsetDateTime(message); } else if (clazz == OffsetDateTime.class) { return (List<T>) deserializeListOfOffsetDateTime(message); } else if (clazz == Long.class) { return (List<T>) deserializeListOfLong(message); } else { throw logger.logExceptionAsError(new IllegalArgumentException( String.format(Messages.CLASS_NOT_A_SUPPORTED_TYPE, clazz))); } } private List<Long> deserializeListOfLong(Message amqpMessage) { if (amqpMessage.getBody() instanceof AmqpValue) { AmqpValue amqpValue = ((AmqpValue) amqpMessage.getBody()); if (amqpValue.getValue() instanceof Map) { @SuppressWarnings("unchecked") Map<String, Object> responseBody = (Map<String, Object>) amqpValue.getValue(); Object expirationListObj = responseBody.get(ManagementConstants.SEQUENCE_NUMBERS); if (expirationListObj instanceof long[]) { return Arrays.stream((long[]) expirationListObj) .boxed() .collect(Collectors.toList()); } } } return Collections.emptyList(); } private List<OffsetDateTime> deserializeListOfOffsetDateTime(Message amqpMessage) { if (amqpMessage.getBody() instanceof AmqpValue) { AmqpValue amqpValue = ((AmqpValue) amqpMessage.getBody()); if (amqpValue.getValue() instanceof Map) { @SuppressWarnings("unchecked") Map<String, Object> responseBody = (Map<String, Object>) amqpValue.getValue(); Object expirationListObj = responseBody.get(ManagementConstants.EXPIRATIONS); if (expirationListObj instanceof Date[]) { return Arrays.stream((Date[]) expirationListObj) .map(date -> date.toInstant().atOffset(ZoneOffset.UTC)) .collect(Collectors.toList()); } } } return Collections.emptyList(); } @SuppressWarnings("rawtypes") private List<ServiceBusReceivedMessage> deserializeListOfMessages(Message amqpMessage) { final List<ServiceBusReceivedMessage> messageList = new ArrayList<>(); final AmqpResponseCode statusCode = RequestResponseUtils.getStatusCode(amqpMessage); if (statusCode != AmqpResponseCode.OK) { logger.warning("AMQP response did not contain OK status code. Actual: {}", statusCode); return Collections.emptyList(); } final Object responseBodyMap = ((AmqpValue) amqpMessage.getBody()).getValue(); if (responseBodyMap == null) { logger.warning("AMQP response did not contain a body."); return Collections.emptyList(); } else if (!(responseBodyMap instanceof Map)) { logger.warning("AMQP response body is not correct instance. Expected: {}. Actual: {}", Map.class, responseBodyMap.getClass()); return Collections.emptyList(); } final Object messages = ((Map) responseBodyMap).get(ManagementConstants.MESSAGES); if (messages == null) { logger.warning("Response body did not contain key: {}", ManagementConstants.MESSAGES); return Collections.emptyList(); } else if (!(messages instanceof Iterable)) { logger.warning("Response body contents is not the correct type. Expected: {}. Actual: {}", Iterable.class, messages.getClass()); return Collections.emptyList(); } for (Object message : (Iterable) messages) { if (!(message instanceof Map)) { logger.warning("Message inside iterable of message is not correct type. Expected: {}. Actual: {}", Map.class, message.getClass()); continue; } final Message responseMessage = Message.Factory.create(); final Binary messagePayLoad = (Binary) ((Map) message).get(ManagementConstants.MESSAGE); responseMessage.decode(messagePayLoad.getArray(), messagePayLoad.getArrayOffset(), messagePayLoad.getLength()); final ServiceBusReceivedMessage receivedMessage = deserializeMessage(responseMessage); if (((Map) message).containsKey(ManagementConstants.LOCK_TOKEN_KEY)) { receivedMessage.setLockToken((UUID) ((Map) message).get(ManagementConstants.LOCK_TOKEN_KEY)); } messageList.add(receivedMessage); } return messageList; } private ServiceBusReceivedMessage deserializeMessage(Message amqpMessage) { final Section body = amqpMessage.getBody(); AmqpMessageBody amqpMessageBody; if (body != null) { if (body instanceof Data) { final Binary messageData = ((Data) body).getValue(); amqpMessageBody = AmqpMessageBody.fromData(messageData.getArray()); } else if (body instanceof AmqpValue) { amqpMessageBody = AmqpMessageBody.fromValue(((AmqpValue) body).getValue()); } else if (body instanceof AmqpSequence) { @SuppressWarnings("unchecked") List<Object> messageData = ((AmqpSequence) body).getValue(); amqpMessageBody = AmqpMessageBody.fromSequence(messageData); } else { logger.warning(String.format(Messages.MESSAGE_NOT_OF_TYPE, body.getType())); amqpMessageBody = AmqpMessageBody.fromData(EMPTY_BYTE_ARRAY); } } else { logger.warning(String.format(Messages.MESSAGE_NOT_OF_TYPE, "null")); amqpMessageBody = AmqpMessageBody.fromData(EMPTY_BYTE_ARRAY); } final ServiceBusReceivedMessage brokeredMessage = new ServiceBusReceivedMessage(amqpMessageBody); AmqpAnnotatedMessage brokeredAmqpAnnotatedMessage = brokeredMessage.getRawAmqpMessage(); ApplicationProperties applicationProperties = amqpMessage.getApplicationProperties(); if (applicationProperties != null) { final Map<String, Object> propertiesValue = applicationProperties.getValue(); brokeredAmqpAnnotatedMessage.getApplicationProperties().putAll(convertToOriginType(propertiesValue)); } final AmqpMessageHeader brokeredHeader = brokeredAmqpAnnotatedMessage.getHeader(); brokeredHeader.setTimeToLive(Duration.ofMillis(amqpMessage.getTtl())); brokeredHeader.setDeliveryCount(amqpMessage.getDeliveryCount()); brokeredHeader.setDurable(amqpMessage.getHeader().getDurable()); brokeredHeader.setFirstAcquirer(amqpMessage.getHeader().getFirstAcquirer()); brokeredHeader.setPriority(amqpMessage.getPriority()); final Footer footer = amqpMessage.getFooter(); if (footer != null && footer.getValue() != null) { @SuppressWarnings("unchecked") final Map<Symbol, Object> footerValue = footer.getValue(); setValues(footerValue, brokeredAmqpAnnotatedMessage.getFooter()); } final AmqpMessageProperties brokeredProperties = brokeredAmqpAnnotatedMessage.getProperties(); brokeredProperties.setReplyToGroupId(amqpMessage.getReplyToGroupId()); final String replyTo = amqpMessage.getReplyTo(); if (replyTo != null) { brokeredProperties.setReplyTo(new AmqpAddress(amqpMessage.getReplyTo())); } final Object messageId = amqpMessage.getMessageId(); if (messageId != null) { brokeredProperties.setMessageId(new AmqpMessageId(messageId.toString())); } brokeredProperties.setContentType(amqpMessage.getContentType()); final Object correlationId = amqpMessage.getCorrelationId(); if (correlationId != null) { brokeredProperties.setCorrelationId(new AmqpMessageId(correlationId.toString())); } final Properties amqpProperties = amqpMessage.getProperties(); if (amqpProperties != null) { final String to = amqpProperties.getTo(); if (to != null) { brokeredProperties.setTo(new AmqpAddress(amqpProperties.getTo())); } if (amqpProperties.getAbsoluteExpiryTime() != null) { brokeredProperties.setAbsoluteExpiryTime(amqpProperties.getAbsoluteExpiryTime().toInstant() .atOffset(ZoneOffset.UTC)); } if (amqpProperties.getCreationTime() != null) { brokeredProperties.setCreationTime(amqpProperties.getCreationTime().toInstant() .atOffset(ZoneOffset.UTC)); } } brokeredProperties.setSubject(amqpMessage.getSubject()); brokeredProperties.setGroupId(amqpMessage.getGroupId()); brokeredProperties.setContentEncoding(amqpMessage.getContentEncoding()); brokeredProperties.setGroupSequence(amqpMessage.getGroupSequence()); brokeredProperties.setUserId(amqpMessage.getUserId()); final DeliveryAnnotations deliveryAnnotations = amqpMessage.getDeliveryAnnotations(); if (deliveryAnnotations != null) { setValues(deliveryAnnotations.getValue(), brokeredAmqpAnnotatedMessage.getDeliveryAnnotations()); } final MessageAnnotations messageAnnotations = amqpMessage.getMessageAnnotations(); if (messageAnnotations != null) { setValues(messageAnnotations.getValue(), brokeredAmqpAnnotatedMessage.getMessageAnnotations()); } if (amqpMessage instanceof MessageWithLockToken) { brokeredMessage.setLockToken(((MessageWithLockToken) amqpMessage).getLockToken()); } return brokeredMessage; } private static int getPayloadSize(Message msg) { if (msg == null || msg.getBody() == null) { return 0; } final Section bodySection = msg.getBody(); if (bodySection instanceof AmqpValue) { return sizeof(((AmqpValue) bodySection).getValue()); } else if (bodySection instanceof AmqpSequence) { return sizeof(((AmqpSequence) bodySection).getValue()); } else if (bodySection instanceof Data) { final Data payloadSection = (Data) bodySection; final Binary payloadBytes = payloadSection.getValue(); return sizeof(payloadBytes); } else { return 0; } } private void setValues(Map<Symbol, Object> sourceMap, Map<String, Object> targetMap) { if (sourceMap != null) { for (Map.Entry<Symbol, Object> entry : sourceMap.entrySet()) { targetMap.put(entry.getKey().toString(), entry.getValue()); } } } @SuppressWarnings("rawtypes") }
class ServiceBusMessageSerializer implements MessageSerializer { private static final byte[] EMPTY_BYTE_ARRAY = new byte[0]; private final ClientLogger logger = new ClientLogger(ServiceBusMessageSerializer.class); /** * Gets the serialized size of the AMQP message. */ @Override public int getSize(Message amqpMessage) { if (amqpMessage == null) { return 0; } int payloadSize = getPayloadSize(amqpMessage); final MessageAnnotations messageAnnotations = amqpMessage.getMessageAnnotations(); final ApplicationProperties applicationProperties = amqpMessage.getApplicationProperties(); int annotationsSize = 0; int applicationPropertiesSize = 0; if (messageAnnotations != null) { final Map<Symbol, Object> map = messageAnnotations.getValue(); for (Map.Entry<Symbol, Object> entry : map.entrySet()) { final int size = sizeof(entry.getKey()) + sizeof(entry.getValue()); annotationsSize += size; } } if (applicationProperties != null) { final Map<String, Object> map = applicationProperties.getValue(); for (Map.Entry<String, Object> entry : map.entrySet()) { final int size = sizeof(entry.getKey()) + sizeof(entry.getValue()); applicationPropertiesSize += size; } } return annotationsSize + applicationPropertiesSize + payloadSize; } /** * Creates the AMQP message represented by this {@code object}. Currently, only supports serializing {@link * ServiceBusMessage}. * * @param object Concrete object to deserialize. * * @return A new AMQP message for this {@code object}. * * @throws IllegalArgumentException if {@code object} is not an instance of {@link ServiceBusMessage}. */ @Override public <T> Message serialize(T object) { Objects.requireNonNull(object, "'object' to serialize cannot be null."); if (!(object instanceof ServiceBusMessage)) { throw logger.logExceptionAsError(new IllegalArgumentException( "Cannot serialize object that is not ServiceBusMessage. Clazz: " + object.getClass())); } final ServiceBusMessage brokeredMessage = (ServiceBusMessage) object; AmqpMessageBodyType brokeredBodyType = brokeredMessage.getRawAmqpMessage().getBody().getBodyType(); final Message amqpMessage = Proton.message(); byte[] body; if (brokeredBodyType == AmqpMessageBodyType.DATA || brokeredBodyType == null) { body = brokeredMessage.getBody().toBytes(); amqpMessage.setBody(new Data(new Binary(body))); } else if (brokeredBodyType == AmqpMessageBodyType.SEQUENCE) { List<Object> sequenceList = brokeredMessage.getRawAmqpMessage().getBody().getSequence(); amqpMessage.setBody(new AmqpSequence(sequenceList)); } else if (brokeredBodyType == AmqpMessageBodyType.VALUE) { amqpMessage.setBody(new AmqpValue(brokeredMessage.getRawAmqpMessage().getBody().getValue())); } if (brokeredMessage.getApplicationProperties() != null) { Map<String, Object> describedTypeMap = convertToDescribedType(brokeredMessage.getApplicationProperties()); amqpMessage.setApplicationProperties(new ApplicationProperties(describedTypeMap)); } if (brokeredMessage.getTimeToLive() != null) { amqpMessage.setTtl(brokeredMessage.getTimeToLive().toMillis()); } if (amqpMessage.getProperties() == null) { amqpMessage.setProperties(new Properties()); } amqpMessage.setMessageId(brokeredMessage.getMessageId()); amqpMessage.setContentType(brokeredMessage.getContentType()); amqpMessage.setCorrelationId(brokeredMessage.getCorrelationId()); amqpMessage.setSubject(brokeredMessage.getSubject()); amqpMessage.setReplyTo(brokeredMessage.getReplyTo()); amqpMessage.setReplyToGroupId(brokeredMessage.getReplyToSessionId()); amqpMessage.setGroupId(brokeredMessage.getSessionId()); final AmqpMessageProperties brokeredProperties = brokeredMessage.getRawAmqpMessage().getProperties(); amqpMessage.setContentEncoding(brokeredProperties.getContentEncoding()); if (brokeredProperties.getGroupSequence() != null) { amqpMessage.setGroupSequence(brokeredProperties.getGroupSequence()); } amqpMessage.getProperties().setTo(brokeredMessage.getTo()); amqpMessage.getProperties().setUserId(new Binary(brokeredProperties.getUserId())); if (brokeredProperties.getAbsoluteExpiryTime() != null) { amqpMessage.getProperties().setAbsoluteExpiryTime(Date.from(brokeredProperties.getAbsoluteExpiryTime() .toInstant())); } if (brokeredProperties.getCreationTime() != null) { amqpMessage.getProperties().setCreationTime(Date.from(brokeredProperties.getCreationTime().toInstant())); } amqpMessage.setFooter(new Footer(brokeredMessage.getRawAmqpMessage().getFooter())); AmqpMessageHeader header = brokeredMessage.getRawAmqpMessage().getHeader(); if (header.getDeliveryCount() != null) { amqpMessage.setDeliveryCount(header.getDeliveryCount()); } if (header.getPriority() != null) { amqpMessage.setPriority(header.getPriority()); } if (header.isDurable() != null) { amqpMessage.setDurable(header.isDurable()); } if (header.isFirstAcquirer() != null) { amqpMessage.setFirstAcquirer(header.isFirstAcquirer()); } if (header.getTimeToLive() != null) { amqpMessage.setTtl(header.getTimeToLive().toMillis()); } final Map<Symbol, Object> messageAnnotationsMap = new HashMap<>(); if (brokeredMessage.getScheduledEnqueueTime() != null) { messageAnnotationsMap.put(Symbol.valueOf(SCHEDULED_ENQUEUE_UTC_TIME_NAME.getValue()), Date.from(brokeredMessage.getScheduledEnqueueTime().toInstant())); } final String partitionKey = brokeredMessage.getPartitionKey(); if (partitionKey != null && !partitionKey.isEmpty()) { messageAnnotationsMap.put(Symbol.valueOf(PARTITION_KEY_ANNOTATION_NAME.getValue()), brokeredMessage.getPartitionKey()); } amqpMessage.setMessageAnnotations(new MessageAnnotations(messageAnnotationsMap)); final Map<Symbol, Object> deliveryAnnotationsMap = new HashMap<>(); final Map<String, Object> deliveryAnnotations = brokeredMessage.getRawAmqpMessage() .getDeliveryAnnotations(); for (Map.Entry<String, Object> deliveryEntry : deliveryAnnotations.entrySet()) { deliveryAnnotationsMap.put(Symbol.valueOf(deliveryEntry.getKey()), deliveryEntry.getValue()); } amqpMessage.setDeliveryAnnotations(new DeliveryAnnotations(deliveryAnnotationsMap)); return amqpMessage; } /** * Convert specific type to described type for sending on the wire. * @param propertiesValue application properties set by user which may contain specific type. * @return Map only contains primitive type and described type. */ private static Map<String, Object> convertToDescribedType(Map<String, Object> propertiesValue) { for (Map.Entry<String, Object> entry : propertiesValue.entrySet()) { Object value = entry.getValue(); if (value instanceof URI) { entry.setValue(new UriDescribedType((URI) value)); } else if (value instanceof OffsetDateTime) { entry.setValue(new OffsetDateTimeDescribedType((OffsetDateTime) value)); } else if (value instanceof Duration) { entry.setValue(new DurationDescribedType((Duration) value)); } } return propertiesValue; } /** * Convert described type to origin type. * @param propertiesValue application properties from amqp message may contain described type. * @return Map without described type. */ private static Map<String, Object> convertToOriginType(Map<String, Object> propertiesValue) { for (Map.Entry<String, Object> entry : propertiesValue.entrySet()) { Object value = entry.getValue(); if (value instanceof DescribedType) { entry.setValue(MessageUtils.describedToOrigin((DescribedType) value)); } } return propertiesValue; } @SuppressWarnings("unchecked") @Override public <T> T deserialize(Message message, Class<T> clazz) { Objects.requireNonNull(message, "'message' cannot be null."); Objects.requireNonNull(clazz, "'clazz' cannot be null."); if (clazz == ServiceBusReceivedMessage.class) { return (T) deserializeMessage(message); } else { throw logger.logExceptionAsError(new IllegalArgumentException( String.format(Messages.CLASS_NOT_A_SUPPORTED_TYPE, clazz))); } } @SuppressWarnings("unchecked") @Override public <T> List<T> deserializeList(Message message, Class<T> clazz) { if (clazz == ServiceBusReceivedMessage.class) { return (List<T>) deserializeListOfMessages(message); } else if (clazz == OffsetDateTime.class) { return (List<T>) deserializeListOfOffsetDateTime(message); } else if (clazz == OffsetDateTime.class) { return (List<T>) deserializeListOfOffsetDateTime(message); } else if (clazz == Long.class) { return (List<T>) deserializeListOfLong(message); } else { throw logger.logExceptionAsError(new IllegalArgumentException( String.format(Messages.CLASS_NOT_A_SUPPORTED_TYPE, clazz))); } } private List<Long> deserializeListOfLong(Message amqpMessage) { if (amqpMessage.getBody() instanceof AmqpValue) { AmqpValue amqpValue = ((AmqpValue) amqpMessage.getBody()); if (amqpValue.getValue() instanceof Map) { @SuppressWarnings("unchecked") Map<String, Object> responseBody = (Map<String, Object>) amqpValue.getValue(); Object expirationListObj = responseBody.get(ManagementConstants.SEQUENCE_NUMBERS); if (expirationListObj instanceof long[]) { return Arrays.stream((long[]) expirationListObj) .boxed() .collect(Collectors.toList()); } } } return Collections.emptyList(); } private List<OffsetDateTime> deserializeListOfOffsetDateTime(Message amqpMessage) { if (amqpMessage.getBody() instanceof AmqpValue) { AmqpValue amqpValue = ((AmqpValue) amqpMessage.getBody()); if (amqpValue.getValue() instanceof Map) { @SuppressWarnings("unchecked") Map<String, Object> responseBody = (Map<String, Object>) amqpValue.getValue(); Object expirationListObj = responseBody.get(ManagementConstants.EXPIRATIONS); if (expirationListObj instanceof Date[]) { return Arrays.stream((Date[]) expirationListObj) .map(date -> date.toInstant().atOffset(ZoneOffset.UTC)) .collect(Collectors.toList()); } } } return Collections.emptyList(); } @SuppressWarnings("rawtypes") private List<ServiceBusReceivedMessage> deserializeListOfMessages(Message amqpMessage) { final List<ServiceBusReceivedMessage> messageList = new ArrayList<>(); final AmqpResponseCode statusCode = RequestResponseUtils.getStatusCode(amqpMessage); if (statusCode != AmqpResponseCode.OK) { logger.warning("AMQP response did not contain OK status code. Actual: {}", statusCode); return Collections.emptyList(); } final Object responseBodyMap = ((AmqpValue) amqpMessage.getBody()).getValue(); if (responseBodyMap == null) { logger.warning("AMQP response did not contain a body."); return Collections.emptyList(); } else if (!(responseBodyMap instanceof Map)) { logger.warning("AMQP response body is not correct instance. Expected: {}. Actual: {}", Map.class, responseBodyMap.getClass()); return Collections.emptyList(); } final Object messages = ((Map) responseBodyMap).get(ManagementConstants.MESSAGES); if (messages == null) { logger.warning("Response body did not contain key: {}", ManagementConstants.MESSAGES); return Collections.emptyList(); } else if (!(messages instanceof Iterable)) { logger.warning("Response body contents is not the correct type. Expected: {}. Actual: {}", Iterable.class, messages.getClass()); return Collections.emptyList(); } for (Object message : (Iterable) messages) { if (!(message instanceof Map)) { logger.warning("Message inside iterable of message is not correct type. Expected: {}. Actual: {}", Map.class, message.getClass()); continue; } final Message responseMessage = Message.Factory.create(); final Binary messagePayLoad = (Binary) ((Map) message).get(ManagementConstants.MESSAGE); responseMessage.decode(messagePayLoad.getArray(), messagePayLoad.getArrayOffset(), messagePayLoad.getLength()); final ServiceBusReceivedMessage receivedMessage = deserializeMessage(responseMessage); if (((Map) message).containsKey(ManagementConstants.LOCK_TOKEN_KEY)) { receivedMessage.setLockToken((UUID) ((Map) message).get(ManagementConstants.LOCK_TOKEN_KEY)); } messageList.add(receivedMessage); } return messageList; } private ServiceBusReceivedMessage deserializeMessage(Message amqpMessage) { final Section body = amqpMessage.getBody(); AmqpMessageBody amqpMessageBody; if (body != null) { if (body instanceof Data) { final Binary messageData = ((Data) body).getValue(); amqpMessageBody = AmqpMessageBody.fromData(messageData.getArray()); } else if (body instanceof AmqpValue) { amqpMessageBody = AmqpMessageBody.fromValue(((AmqpValue) body).getValue()); } else if (body instanceof AmqpSequence) { @SuppressWarnings("unchecked") List<Object> messageData = ((AmqpSequence) body).getValue(); amqpMessageBody = AmqpMessageBody.fromSequence(messageData); } else { logger.warning(String.format(Messages.MESSAGE_NOT_OF_TYPE, body.getType())); amqpMessageBody = AmqpMessageBody.fromData(EMPTY_BYTE_ARRAY); } } else { logger.warning(String.format(Messages.MESSAGE_NOT_OF_TYPE, "null")); amqpMessageBody = AmqpMessageBody.fromData(EMPTY_BYTE_ARRAY); } final ServiceBusReceivedMessage brokeredMessage = new ServiceBusReceivedMessage(amqpMessageBody); AmqpAnnotatedMessage brokeredAmqpAnnotatedMessage = brokeredMessage.getRawAmqpMessage(); ApplicationProperties applicationProperties = amqpMessage.getApplicationProperties(); if (applicationProperties != null) { final Map<String, Object> propertiesValue = convertToOriginType(applicationProperties.getValue()); brokeredAmqpAnnotatedMessage.getApplicationProperties().putAll(propertiesValue); } final AmqpMessageHeader brokeredHeader = brokeredAmqpAnnotatedMessage.getHeader(); brokeredHeader.setTimeToLive(Duration.ofMillis(amqpMessage.getTtl())); brokeredHeader.setDeliveryCount(amqpMessage.getDeliveryCount()); brokeredHeader.setDurable(amqpMessage.getHeader().getDurable()); brokeredHeader.setFirstAcquirer(amqpMessage.getHeader().getFirstAcquirer()); brokeredHeader.setPriority(amqpMessage.getPriority()); final Footer footer = amqpMessage.getFooter(); if (footer != null && footer.getValue() != null) { @SuppressWarnings("unchecked") final Map<Symbol, Object> footerValue = footer.getValue(); setValues(footerValue, brokeredAmqpAnnotatedMessage.getFooter()); } final AmqpMessageProperties brokeredProperties = brokeredAmqpAnnotatedMessage.getProperties(); brokeredProperties.setReplyToGroupId(amqpMessage.getReplyToGroupId()); final String replyTo = amqpMessage.getReplyTo(); if (replyTo != null) { brokeredProperties.setReplyTo(new AmqpAddress(amqpMessage.getReplyTo())); } final Object messageId = amqpMessage.getMessageId(); if (messageId != null) { brokeredProperties.setMessageId(new AmqpMessageId(messageId.toString())); } brokeredProperties.setContentType(amqpMessage.getContentType()); final Object correlationId = amqpMessage.getCorrelationId(); if (correlationId != null) { brokeredProperties.setCorrelationId(new AmqpMessageId(correlationId.toString())); } final Properties amqpProperties = amqpMessage.getProperties(); if (amqpProperties != null) { final String to = amqpProperties.getTo(); if (to != null) { brokeredProperties.setTo(new AmqpAddress(amqpProperties.getTo())); } if (amqpProperties.getAbsoluteExpiryTime() != null) { brokeredProperties.setAbsoluteExpiryTime(amqpProperties.getAbsoluteExpiryTime().toInstant() .atOffset(ZoneOffset.UTC)); } if (amqpProperties.getCreationTime() != null) { brokeredProperties.setCreationTime(amqpProperties.getCreationTime().toInstant() .atOffset(ZoneOffset.UTC)); } } brokeredProperties.setSubject(amqpMessage.getSubject()); brokeredProperties.setGroupId(amqpMessage.getGroupId()); brokeredProperties.setContentEncoding(amqpMessage.getContentEncoding()); brokeredProperties.setGroupSequence(amqpMessage.getGroupSequence()); brokeredProperties.setUserId(amqpMessage.getUserId()); final DeliveryAnnotations deliveryAnnotations = amqpMessage.getDeliveryAnnotations(); if (deliveryAnnotations != null) { setValues(deliveryAnnotations.getValue(), brokeredAmqpAnnotatedMessage.getDeliveryAnnotations()); } final MessageAnnotations messageAnnotations = amqpMessage.getMessageAnnotations(); if (messageAnnotations != null) { setValues(messageAnnotations.getValue(), brokeredAmqpAnnotatedMessage.getMessageAnnotations()); } if (amqpMessage instanceof MessageWithLockToken) { brokeredMessage.setLockToken(((MessageWithLockToken) amqpMessage).getLockToken()); } return brokeredMessage; } private static int getPayloadSize(Message msg) { if (msg == null || msg.getBody() == null) { return 0; } final Section bodySection = msg.getBody(); if (bodySection instanceof AmqpValue) { return sizeof(((AmqpValue) bodySection).getValue()); } else if (bodySection instanceof AmqpSequence) { return sizeof(((AmqpSequence) bodySection).getValue()); } else if (bodySection instanceof Data) { final Data payloadSection = (Data) bodySection; final Binary payloadBytes = payloadSection.getValue(); return sizeof(payloadBytes); } else { return 0; } } private void setValues(Map<Symbol, Object> sourceMap, Map<String, Object> targetMap) { if (sourceMap != null) { for (Map.Entry<Symbol, Object> entry : sourceMap.entrySet()) { targetMap.put(entry.getKey().toString(), entry.getValue()); } } } @SuppressWarnings("rawtypes") }
Thanks, Zeija; Just curious, did we check it work as we expect if we send instances of these 3 types to the service bus and then retrieve it from the service?
private static int sizeof(Object obj) { if (obj == null) { return 0; } if (obj instanceof String) { return obj.toString().length() << 1; } if (obj instanceof Symbol) { return ((Symbol) obj).length() << 1; } if (obj instanceof Byte || obj instanceof UnsignedByte) { return Byte.BYTES; } if (obj instanceof Integer || obj instanceof UnsignedInteger) { return Integer.BYTES; } if (obj instanceof Long || obj instanceof UnsignedLong || obj instanceof Date) { return Long.BYTES; } if (obj instanceof Short || obj instanceof UnsignedShort) { return Short.BYTES; } if (obj instanceof Boolean) { return 1; } if (obj instanceof Character) { return 4; } if (obj instanceof Float) { return Float.BYTES; } if (obj instanceof Double) { return Double.BYTES; } if (obj instanceof UUID) { return 16; } if (obj instanceof Decimal32) { return 4; } if (obj instanceof Decimal64) { return 8; } if (obj instanceof Decimal128) { return 16; } if (obj instanceof Binary) { return ((Binary) obj).getLength(); } if (obj instanceof Declare) { return 7; } if (obj instanceof Discharge) { Discharge discharge = (Discharge) obj; return 12 + discharge.getTxnId().getLength(); } if (obj instanceof ServiceBusDescribedType) { ServiceBusDescribedType describedType = (ServiceBusDescribedType) obj; int size = ((Symbol) describedType.getDescriptor()).length() << 1; if (describedType.getDescribed() instanceof String) { size += ((String) describedType.getDescribed()).length(); } else if (describedType.getDescribed() instanceof Long) { size += Long.BYTES; } return size; } if (obj instanceof Map) { int size = 8; Map map = (Map) obj; for (Object value : map.keySet()) { size += sizeof(value); } for (Object value : map.values()) { size += sizeof(value); } return size; } if (obj instanceof Iterable) { int size = 8; for (Object innerObject : (Iterable) obj) { size += sizeof(innerObject); } return size; } if (obj.getClass().isArray()) { int size = 8; int length = Array.getLength(obj); for (int i = 0; i < length; i++) { size += sizeof(Array.get(obj, i)); } return size; } throw new IllegalArgumentException(String.format(Locale.US, "Encoding Type: %s is not supported", obj.getClass())); }
}
private static int sizeof(Object obj) { if (obj == null) { return 0; } if (obj instanceof String) { return obj.toString().length() << 1; } if (obj instanceof Symbol) { return ((Symbol) obj).length() << 1; } if (obj instanceof Byte || obj instanceof UnsignedByte) { return Byte.BYTES; } if (obj instanceof Integer || obj instanceof UnsignedInteger) { return Integer.BYTES; } if (obj instanceof Long || obj instanceof UnsignedLong || obj instanceof Date) { return Long.BYTES; } if (obj instanceof Short || obj instanceof UnsignedShort) { return Short.BYTES; } if (obj instanceof Boolean) { return 1; } if (obj instanceof Character) { return 4; } if (obj instanceof Float) { return Float.BYTES; } if (obj instanceof Double) { return Double.BYTES; } if (obj instanceof UUID) { return 16; } if (obj instanceof Decimal32) { return 4; } if (obj instanceof Decimal64) { return 8; } if (obj instanceof Decimal128) { return 16; } if (obj instanceof Binary) { return ((Binary) obj).getLength(); } if (obj instanceof Declare) { return 7; } if (obj instanceof Discharge) { Discharge discharge = (Discharge) obj; return 12 + discharge.getTxnId().getLength(); } if (obj instanceof ServiceBusDescribedType) { ServiceBusDescribedType describedType = (ServiceBusDescribedType) obj; return describedType.size(); } if (obj instanceof Map) { int size = 8; Map map = (Map) obj; for (Object value : map.keySet()) { size += sizeof(value); } for (Object value : map.values()) { size += sizeof(value); } return size; } if (obj instanceof Iterable) { int size = 8; for (Object innerObject : (Iterable) obj) { size += sizeof(innerObject); } return size; } if (obj.getClass().isArray()) { int size = 8; int length = Array.getLength(obj); for (int i = 0; i < length; i++) { size += sizeof(Array.get(obj, i)); } return size; } throw new IllegalArgumentException(String.format(Locale.US, "Encoding Type: %s is not supported", obj.getClass())); }
class ServiceBusMessageSerializer implements MessageSerializer { private static final byte[] EMPTY_BYTE_ARRAY = new byte[0]; private final ClientLogger logger = new ClientLogger(ServiceBusMessageSerializer.class); /** * Gets the serialized size of the AMQP message. */ @Override public int getSize(Message amqpMessage) { if (amqpMessage == null) { return 0; } int payloadSize = getPayloadSize(amqpMessage); final MessageAnnotations messageAnnotations = amqpMessage.getMessageAnnotations(); final ApplicationProperties applicationProperties = amqpMessage.getApplicationProperties(); int annotationsSize = 0; int applicationPropertiesSize = 0; if (messageAnnotations != null) { final Map<Symbol, Object> map = messageAnnotations.getValue(); for (Map.Entry<Symbol, Object> entry : map.entrySet()) { final int size = sizeof(entry.getKey()) + sizeof(entry.getValue()); annotationsSize += size; } } if (applicationProperties != null) { final Map<String, Object> map = applicationProperties.getValue(); for (Map.Entry<String, Object> entry : map.entrySet()) { final int size = sizeof(entry.getKey()) + sizeof(entry.getValue()); applicationPropertiesSize += size; } } return annotationsSize + applicationPropertiesSize + payloadSize; } /** * Creates the AMQP message represented by this {@code object}. Currently, only supports serializing {@link * ServiceBusMessage}. * * @param object Concrete object to deserialize. * * @return A new AMQP message for this {@code object}. * * @throws IllegalArgumentException if {@code object} is not an instance of {@link ServiceBusMessage}. */ @Override public <T> Message serialize(T object) { Objects.requireNonNull(object, "'object' to serialize cannot be null."); if (!(object instanceof ServiceBusMessage)) { throw logger.logExceptionAsError(new IllegalArgumentException( "Cannot serialize object that is not ServiceBusMessage. Clazz: " + object.getClass())); } final ServiceBusMessage brokeredMessage = (ServiceBusMessage) object; AmqpMessageBodyType brokeredBodyType = brokeredMessage.getRawAmqpMessage().getBody().getBodyType(); final Message amqpMessage = Proton.message(); byte[] body; if (brokeredBodyType == AmqpMessageBodyType.DATA || brokeredBodyType == null) { body = brokeredMessage.getBody().toBytes(); amqpMessage.setBody(new Data(new Binary(body))); } else if (brokeredBodyType == AmqpMessageBodyType.SEQUENCE) { List<Object> sequenceList = brokeredMessage.getRawAmqpMessage().getBody().getSequence(); amqpMessage.setBody(new AmqpSequence(sequenceList)); } else if (brokeredBodyType == AmqpMessageBodyType.VALUE) { amqpMessage.setBody(new AmqpValue(brokeredMessage.getRawAmqpMessage().getBody().getValue())); } if (brokeredMessage.getApplicationProperties() != null) { amqpMessage.setApplicationProperties(new ApplicationProperties(convertToDescribedType(brokeredMessage.getApplicationProperties()))); } if (brokeredMessage.getTimeToLive() != null) { amqpMessage.setTtl(brokeredMessage.getTimeToLive().toMillis()); } if (amqpMessage.getProperties() == null) { amqpMessage.setProperties(new Properties()); } amqpMessage.setMessageId(brokeredMessage.getMessageId()); amqpMessage.setContentType(brokeredMessage.getContentType()); amqpMessage.setCorrelationId(brokeredMessage.getCorrelationId()); amqpMessage.setSubject(brokeredMessage.getSubject()); amqpMessage.setReplyTo(brokeredMessage.getReplyTo()); amqpMessage.setReplyToGroupId(brokeredMessage.getReplyToSessionId()); amqpMessage.setGroupId(brokeredMessage.getSessionId()); final AmqpMessageProperties brokeredProperties = brokeredMessage.getRawAmqpMessage().getProperties(); amqpMessage.setContentEncoding(brokeredProperties.getContentEncoding()); if (brokeredProperties.getGroupSequence() != null) { amqpMessage.setGroupSequence(brokeredProperties.getGroupSequence()); } amqpMessage.getProperties().setTo(brokeredMessage.getTo()); amqpMessage.getProperties().setUserId(new Binary(brokeredProperties.getUserId())); if (brokeredProperties.getAbsoluteExpiryTime() != null) { amqpMessage.getProperties().setAbsoluteExpiryTime(Date.from(brokeredProperties.getAbsoluteExpiryTime() .toInstant())); } if (brokeredProperties.getCreationTime() != null) { amqpMessage.getProperties().setCreationTime(Date.from(brokeredProperties.getCreationTime().toInstant())); } amqpMessage.setFooter(new Footer(brokeredMessage.getRawAmqpMessage().getFooter())); AmqpMessageHeader header = brokeredMessage.getRawAmqpMessage().getHeader(); if (header.getDeliveryCount() != null) { amqpMessage.setDeliveryCount(header.getDeliveryCount()); } if (header.getPriority() != null) { amqpMessage.setPriority(header.getPriority()); } if (header.isDurable() != null) { amqpMessage.setDurable(header.isDurable()); } if (header.isFirstAcquirer() != null) { amqpMessage.setFirstAcquirer(header.isFirstAcquirer()); } if (header.getTimeToLive() != null) { amqpMessage.setTtl(header.getTimeToLive().toMillis()); } final Map<Symbol, Object> messageAnnotationsMap = new HashMap<>(); if (brokeredMessage.getScheduledEnqueueTime() != null) { messageAnnotationsMap.put(Symbol.valueOf(SCHEDULED_ENQUEUE_UTC_TIME_NAME.getValue()), Date.from(brokeredMessage.getScheduledEnqueueTime().toInstant())); } final String partitionKey = brokeredMessage.getPartitionKey(); if (partitionKey != null && !partitionKey.isEmpty()) { messageAnnotationsMap.put(Symbol.valueOf(PARTITION_KEY_ANNOTATION_NAME.getValue()), brokeredMessage.getPartitionKey()); } amqpMessage.setMessageAnnotations(new MessageAnnotations(messageAnnotationsMap)); final Map<Symbol, Object> deliveryAnnotationsMap = new HashMap<>(); final Map<String, Object> deliveryAnnotations = brokeredMessage.getRawAmqpMessage() .getDeliveryAnnotations(); for (Map.Entry<String, Object> deliveryEntry : deliveryAnnotations.entrySet()) { deliveryAnnotationsMap.put(Symbol.valueOf(deliveryEntry.getKey()), deliveryEntry.getValue()); } amqpMessage.setDeliveryAnnotations(new DeliveryAnnotations(deliveryAnnotationsMap)); return amqpMessage; } private Map<String, Object> convertToDescribedType(Map<String, Object> propertiesValue) { for (Map.Entry<String, Object> entry : propertiesValue.entrySet()) { Object value = entry.getValue(); if (value instanceof URI) { propertiesValue.put(entry.getKey(), new ServiceBusDescribedType(ServiceBusConstants.URI_SYMBOL, ((URI) value).toString())); } else if (value instanceof OffsetDateTime) { propertiesValue.put(entry.getKey(), new ServiceBusDescribedType(ServiceBusConstants.OFFSETDATETIME_SYMBOL, ((OffsetDateTime) value).atZoneSameInstant(ZoneOffset.UTC).toInstant().toEpochMilli())); } else if (value instanceof Duration) { propertiesValue.put(entry.getKey(), new ServiceBusDescribedType(ServiceBusConstants.DURATION_SYMBOL, ((Duration) value).toNanos())); } } return propertiesValue; } private Map<String, Object> convertToOriginType(Map<String, Object> propertiesValue) { for (Map.Entry<String, Object> entry : propertiesValue.entrySet()) { Object value = entry.getValue(); if (value instanceof DescribedType) { DescribedType describedType = (DescribedType) value; if (ServiceBusConstants.URI_SYMBOL.equals(describedType.getDescriptor())) { propertiesValue.put(entry.getKey(), URI.create((String) describedType.getDescribed())); } else if (ServiceBusConstants.OFFSETDATETIME_SYMBOL.equals(describedType.getDescriptor())) { propertiesValue.put(entry.getKey(), OffsetDateTime.ofInstant(Instant.ofEpochMilli((long) describedType.getDescribed()), ZoneId.systemDefault())); } else if (ServiceBusConstants.DURATION_SYMBOL.equals(describedType.getDescriptor())) { propertiesValue.put(entry.getKey(), Duration.ofNanos((long) describedType.getDescribed())); } } } return propertiesValue; } @SuppressWarnings("unchecked") @Override public <T> T deserialize(Message message, Class<T> clazz) { Objects.requireNonNull(message, "'message' cannot be null."); Objects.requireNonNull(clazz, "'clazz' cannot be null."); if (clazz == ServiceBusReceivedMessage.class) { return (T) deserializeMessage(message); } else { throw logger.logExceptionAsError(new IllegalArgumentException( String.format(Messages.CLASS_NOT_A_SUPPORTED_TYPE, clazz))); } } @SuppressWarnings("unchecked") @Override public <T> List<T> deserializeList(Message message, Class<T> clazz) { if (clazz == ServiceBusReceivedMessage.class) { return (List<T>) deserializeListOfMessages(message); } else if (clazz == OffsetDateTime.class) { return (List<T>) deserializeListOfOffsetDateTime(message); } else if (clazz == OffsetDateTime.class) { return (List<T>) deserializeListOfOffsetDateTime(message); } else if (clazz == Long.class) { return (List<T>) deserializeListOfLong(message); } else { throw logger.logExceptionAsError(new IllegalArgumentException( String.format(Messages.CLASS_NOT_A_SUPPORTED_TYPE, clazz))); } } private List<Long> deserializeListOfLong(Message amqpMessage) { if (amqpMessage.getBody() instanceof AmqpValue) { AmqpValue amqpValue = ((AmqpValue) amqpMessage.getBody()); if (amqpValue.getValue() instanceof Map) { @SuppressWarnings("unchecked") Map<String, Object> responseBody = (Map<String, Object>) amqpValue.getValue(); Object expirationListObj = responseBody.get(ManagementConstants.SEQUENCE_NUMBERS); if (expirationListObj instanceof long[]) { return Arrays.stream((long[]) expirationListObj) .boxed() .collect(Collectors.toList()); } } } return Collections.emptyList(); } private List<OffsetDateTime> deserializeListOfOffsetDateTime(Message amqpMessage) { if (amqpMessage.getBody() instanceof AmqpValue) { AmqpValue amqpValue = ((AmqpValue) amqpMessage.getBody()); if (amqpValue.getValue() instanceof Map) { @SuppressWarnings("unchecked") Map<String, Object> responseBody = (Map<String, Object>) amqpValue.getValue(); Object expirationListObj = responseBody.get(ManagementConstants.EXPIRATIONS); if (expirationListObj instanceof Date[]) { return Arrays.stream((Date[]) expirationListObj) .map(date -> date.toInstant().atOffset(ZoneOffset.UTC)) .collect(Collectors.toList()); } } } return Collections.emptyList(); } @SuppressWarnings("rawtypes") private List<ServiceBusReceivedMessage> deserializeListOfMessages(Message amqpMessage) { final List<ServiceBusReceivedMessage> messageList = new ArrayList<>(); final AmqpResponseCode statusCode = RequestResponseUtils.getStatusCode(amqpMessage); if (statusCode != AmqpResponseCode.OK) { logger.warning("AMQP response did not contain OK status code. Actual: {}", statusCode); return Collections.emptyList(); } final Object responseBodyMap = ((AmqpValue) amqpMessage.getBody()).getValue(); if (responseBodyMap == null) { logger.warning("AMQP response did not contain a body."); return Collections.emptyList(); } else if (!(responseBodyMap instanceof Map)) { logger.warning("AMQP response body is not correct instance. Expected: {}. Actual: {}", Map.class, responseBodyMap.getClass()); return Collections.emptyList(); } final Object messages = ((Map) responseBodyMap).get(ManagementConstants.MESSAGES); if (messages == null) { logger.warning("Response body did not contain key: {}", ManagementConstants.MESSAGES); return Collections.emptyList(); } else if (!(messages instanceof Iterable)) { logger.warning("Response body contents is not the correct type. Expected: {}. Actual: {}", Iterable.class, messages.getClass()); return Collections.emptyList(); } for (Object message : (Iterable) messages) { if (!(message instanceof Map)) { logger.warning("Message inside iterable of message is not correct type. Expected: {}. Actual: {}", Map.class, message.getClass()); continue; } final Message responseMessage = Message.Factory.create(); final Binary messagePayLoad = (Binary) ((Map) message).get(ManagementConstants.MESSAGE); responseMessage.decode(messagePayLoad.getArray(), messagePayLoad.getArrayOffset(), messagePayLoad.getLength()); final ServiceBusReceivedMessage receivedMessage = deserializeMessage(responseMessage); if (((Map) message).containsKey(ManagementConstants.LOCK_TOKEN_KEY)) { receivedMessage.setLockToken((UUID) ((Map) message).get(ManagementConstants.LOCK_TOKEN_KEY)); } messageList.add(receivedMessage); } return messageList; } private ServiceBusReceivedMessage deserializeMessage(Message amqpMessage) { final Section body = amqpMessage.getBody(); AmqpMessageBody amqpMessageBody; if (body != null) { if (body instanceof Data) { final Binary messageData = ((Data) body).getValue(); amqpMessageBody = AmqpMessageBody.fromData(messageData.getArray()); } else if (body instanceof AmqpValue) { amqpMessageBody = AmqpMessageBody.fromValue(((AmqpValue) body).getValue()); } else if (body instanceof AmqpSequence) { @SuppressWarnings("unchecked") List<Object> messageData = ((AmqpSequence) body).getValue(); amqpMessageBody = AmqpMessageBody.fromSequence(messageData); } else { logger.warning(String.format(Messages.MESSAGE_NOT_OF_TYPE, body.getType())); amqpMessageBody = AmqpMessageBody.fromData(EMPTY_BYTE_ARRAY); } } else { logger.warning(String.format(Messages.MESSAGE_NOT_OF_TYPE, "null")); amqpMessageBody = AmqpMessageBody.fromData(EMPTY_BYTE_ARRAY); } final ServiceBusReceivedMessage brokeredMessage = new ServiceBusReceivedMessage(amqpMessageBody); AmqpAnnotatedMessage brokeredAmqpAnnotatedMessage = brokeredMessage.getRawAmqpMessage(); ApplicationProperties applicationProperties = amqpMessage.getApplicationProperties(); if (applicationProperties != null) { final Map<String, Object> propertiesValue = applicationProperties.getValue(); brokeredAmqpAnnotatedMessage.getApplicationProperties().putAll(convertToOriginType(propertiesValue)); } final AmqpMessageHeader brokeredHeader = brokeredAmqpAnnotatedMessage.getHeader(); brokeredHeader.setTimeToLive(Duration.ofMillis(amqpMessage.getTtl())); brokeredHeader.setDeliveryCount(amqpMessage.getDeliveryCount()); brokeredHeader.setDurable(amqpMessage.getHeader().getDurable()); brokeredHeader.setFirstAcquirer(amqpMessage.getHeader().getFirstAcquirer()); brokeredHeader.setPriority(amqpMessage.getPriority()); final Footer footer = amqpMessage.getFooter(); if (footer != null && footer.getValue() != null) { @SuppressWarnings("unchecked") final Map<Symbol, Object> footerValue = footer.getValue(); setValues(footerValue, brokeredAmqpAnnotatedMessage.getFooter()); } final AmqpMessageProperties brokeredProperties = brokeredAmqpAnnotatedMessage.getProperties(); brokeredProperties.setReplyToGroupId(amqpMessage.getReplyToGroupId()); final String replyTo = amqpMessage.getReplyTo(); if (replyTo != null) { brokeredProperties.setReplyTo(new AmqpAddress(amqpMessage.getReplyTo())); } final Object messageId = amqpMessage.getMessageId(); if (messageId != null) { brokeredProperties.setMessageId(new AmqpMessageId(messageId.toString())); } brokeredProperties.setContentType(amqpMessage.getContentType()); final Object correlationId = amqpMessage.getCorrelationId(); if (correlationId != null) { brokeredProperties.setCorrelationId(new AmqpMessageId(correlationId.toString())); } final Properties amqpProperties = amqpMessage.getProperties(); if (amqpProperties != null) { final String to = amqpProperties.getTo(); if (to != null) { brokeredProperties.setTo(new AmqpAddress(amqpProperties.getTo())); } if (amqpProperties.getAbsoluteExpiryTime() != null) { brokeredProperties.setAbsoluteExpiryTime(amqpProperties.getAbsoluteExpiryTime().toInstant() .atOffset(ZoneOffset.UTC)); } if (amqpProperties.getCreationTime() != null) { brokeredProperties.setCreationTime(amqpProperties.getCreationTime().toInstant() .atOffset(ZoneOffset.UTC)); } } brokeredProperties.setSubject(amqpMessage.getSubject()); brokeredProperties.setGroupId(amqpMessage.getGroupId()); brokeredProperties.setContentEncoding(amqpMessage.getContentEncoding()); brokeredProperties.setGroupSequence(amqpMessage.getGroupSequence()); brokeredProperties.setUserId(amqpMessage.getUserId()); final DeliveryAnnotations deliveryAnnotations = amqpMessage.getDeliveryAnnotations(); if (deliveryAnnotations != null) { setValues(deliveryAnnotations.getValue(), brokeredAmqpAnnotatedMessage.getDeliveryAnnotations()); } final MessageAnnotations messageAnnotations = amqpMessage.getMessageAnnotations(); if (messageAnnotations != null) { setValues(messageAnnotations.getValue(), brokeredAmqpAnnotatedMessage.getMessageAnnotations()); } if (amqpMessage instanceof MessageWithLockToken) { brokeredMessage.setLockToken(((MessageWithLockToken) amqpMessage).getLockToken()); } return brokeredMessage; } private static int getPayloadSize(Message msg) { if (msg == null || msg.getBody() == null) { return 0; } final Section bodySection = msg.getBody(); if (bodySection instanceof AmqpValue) { return sizeof(((AmqpValue) bodySection).getValue()); } else if (bodySection instanceof AmqpSequence) { return sizeof(((AmqpSequence) bodySection).getValue()); } else if (bodySection instanceof Data) { final Data payloadSection = (Data) bodySection; final Binary payloadBytes = payloadSection.getValue(); return sizeof(payloadBytes); } else { return 0; } } private void setValues(Map<Symbol, Object> sourceMap, Map<String, Object> targetMap) { if (sourceMap != null) { for (Map.Entry<Symbol, Object> entry : sourceMap.entrySet()) { targetMap.put(entry.getKey().toString(), entry.getValue()); } } } @SuppressWarnings("rawtypes") }
class ServiceBusMessageSerializer implements MessageSerializer { private static final byte[] EMPTY_BYTE_ARRAY = new byte[0]; private final ClientLogger logger = new ClientLogger(ServiceBusMessageSerializer.class); /** * Gets the serialized size of the AMQP message. */ @Override public int getSize(Message amqpMessage) { if (amqpMessage == null) { return 0; } int payloadSize = getPayloadSize(amqpMessage); final MessageAnnotations messageAnnotations = amqpMessage.getMessageAnnotations(); final ApplicationProperties applicationProperties = amqpMessage.getApplicationProperties(); int annotationsSize = 0; int applicationPropertiesSize = 0; if (messageAnnotations != null) { final Map<Symbol, Object> map = messageAnnotations.getValue(); for (Map.Entry<Symbol, Object> entry : map.entrySet()) { final int size = sizeof(entry.getKey()) + sizeof(entry.getValue()); annotationsSize += size; } } if (applicationProperties != null) { final Map<String, Object> map = applicationProperties.getValue(); for (Map.Entry<String, Object> entry : map.entrySet()) { final int size = sizeof(entry.getKey()) + sizeof(entry.getValue()); applicationPropertiesSize += size; } } return annotationsSize + applicationPropertiesSize + payloadSize; } /** * Creates the AMQP message represented by this {@code object}. Currently, only supports serializing {@link * ServiceBusMessage}. * * @param object Concrete object to deserialize. * * @return A new AMQP message for this {@code object}. * * @throws IllegalArgumentException if {@code object} is not an instance of {@link ServiceBusMessage}. */ @Override public <T> Message serialize(T object) { Objects.requireNonNull(object, "'object' to serialize cannot be null."); if (!(object instanceof ServiceBusMessage)) { throw logger.logExceptionAsError(new IllegalArgumentException( "Cannot serialize object that is not ServiceBusMessage. Clazz: " + object.getClass())); } final ServiceBusMessage brokeredMessage = (ServiceBusMessage) object; AmqpMessageBodyType brokeredBodyType = brokeredMessage.getRawAmqpMessage().getBody().getBodyType(); final Message amqpMessage = Proton.message(); byte[] body; if (brokeredBodyType == AmqpMessageBodyType.DATA || brokeredBodyType == null) { body = brokeredMessage.getBody().toBytes(); amqpMessage.setBody(new Data(new Binary(body))); } else if (brokeredBodyType == AmqpMessageBodyType.SEQUENCE) { List<Object> sequenceList = brokeredMessage.getRawAmqpMessage().getBody().getSequence(); amqpMessage.setBody(new AmqpSequence(sequenceList)); } else if (brokeredBodyType == AmqpMessageBodyType.VALUE) { amqpMessage.setBody(new AmqpValue(brokeredMessage.getRawAmqpMessage().getBody().getValue())); } if (brokeredMessage.getApplicationProperties() != null) { Map<String, Object> describedTypeMap = convertToDescribedType(brokeredMessage.getApplicationProperties()); amqpMessage.setApplicationProperties(new ApplicationProperties(describedTypeMap)); } if (brokeredMessage.getTimeToLive() != null) { amqpMessage.setTtl(brokeredMessage.getTimeToLive().toMillis()); } if (amqpMessage.getProperties() == null) { amqpMessage.setProperties(new Properties()); } amqpMessage.setMessageId(brokeredMessage.getMessageId()); amqpMessage.setContentType(brokeredMessage.getContentType()); amqpMessage.setCorrelationId(brokeredMessage.getCorrelationId()); amqpMessage.setSubject(brokeredMessage.getSubject()); amqpMessage.setReplyTo(brokeredMessage.getReplyTo()); amqpMessage.setReplyToGroupId(brokeredMessage.getReplyToSessionId()); amqpMessage.setGroupId(brokeredMessage.getSessionId()); final AmqpMessageProperties brokeredProperties = brokeredMessage.getRawAmqpMessage().getProperties(); amqpMessage.setContentEncoding(brokeredProperties.getContentEncoding()); if (brokeredProperties.getGroupSequence() != null) { amqpMessage.setGroupSequence(brokeredProperties.getGroupSequence()); } amqpMessage.getProperties().setTo(brokeredMessage.getTo()); amqpMessage.getProperties().setUserId(new Binary(brokeredProperties.getUserId())); if (brokeredProperties.getAbsoluteExpiryTime() != null) { amqpMessage.getProperties().setAbsoluteExpiryTime(Date.from(brokeredProperties.getAbsoluteExpiryTime() .toInstant())); } if (brokeredProperties.getCreationTime() != null) { amqpMessage.getProperties().setCreationTime(Date.from(brokeredProperties.getCreationTime().toInstant())); } amqpMessage.setFooter(new Footer(brokeredMessage.getRawAmqpMessage().getFooter())); AmqpMessageHeader header = brokeredMessage.getRawAmqpMessage().getHeader(); if (header.getDeliveryCount() != null) { amqpMessage.setDeliveryCount(header.getDeliveryCount()); } if (header.getPriority() != null) { amqpMessage.setPriority(header.getPriority()); } if (header.isDurable() != null) { amqpMessage.setDurable(header.isDurable()); } if (header.isFirstAcquirer() != null) { amqpMessage.setFirstAcquirer(header.isFirstAcquirer()); } if (header.getTimeToLive() != null) { amqpMessage.setTtl(header.getTimeToLive().toMillis()); } final Map<Symbol, Object> messageAnnotationsMap = new HashMap<>(); if (brokeredMessage.getScheduledEnqueueTime() != null) { messageAnnotationsMap.put(Symbol.valueOf(SCHEDULED_ENQUEUE_UTC_TIME_NAME.getValue()), Date.from(brokeredMessage.getScheduledEnqueueTime().toInstant())); } final String partitionKey = brokeredMessage.getPartitionKey(); if (partitionKey != null && !partitionKey.isEmpty()) { messageAnnotationsMap.put(Symbol.valueOf(PARTITION_KEY_ANNOTATION_NAME.getValue()), brokeredMessage.getPartitionKey()); } amqpMessage.setMessageAnnotations(new MessageAnnotations(messageAnnotationsMap)); final Map<Symbol, Object> deliveryAnnotationsMap = new HashMap<>(); final Map<String, Object> deliveryAnnotations = brokeredMessage.getRawAmqpMessage() .getDeliveryAnnotations(); for (Map.Entry<String, Object> deliveryEntry : deliveryAnnotations.entrySet()) { deliveryAnnotationsMap.put(Symbol.valueOf(deliveryEntry.getKey()), deliveryEntry.getValue()); } amqpMessage.setDeliveryAnnotations(new DeliveryAnnotations(deliveryAnnotationsMap)); return amqpMessage; } /** * Convert specific type to described type for sending on the wire. * @param propertiesValue application properties set by user which may contain specific type. * @return Map only contains primitive type and described type. */ private static Map<String, Object> convertToDescribedType(Map<String, Object> propertiesValue) { for (Map.Entry<String, Object> entry : propertiesValue.entrySet()) { Object value = entry.getValue(); if (value instanceof URI) { entry.setValue(new UriDescribedType((URI) value)); } else if (value instanceof OffsetDateTime) { entry.setValue(new OffsetDateTimeDescribedType((OffsetDateTime) value)); } else if (value instanceof Duration) { entry.setValue(new DurationDescribedType((Duration) value)); } } return propertiesValue; } /** * Convert described type to origin type. * @param propertiesValue application properties from amqp message may contain described type. * @return Map without described type. */ private static Map<String, Object> convertToOriginType(Map<String, Object> propertiesValue) { for (Map.Entry<String, Object> entry : propertiesValue.entrySet()) { Object value = entry.getValue(); if (value instanceof DescribedType) { entry.setValue(MessageUtils.describedToOrigin((DescribedType) value)); } } return propertiesValue; } @SuppressWarnings("unchecked") @Override public <T> T deserialize(Message message, Class<T> clazz) { Objects.requireNonNull(message, "'message' cannot be null."); Objects.requireNonNull(clazz, "'clazz' cannot be null."); if (clazz == ServiceBusReceivedMessage.class) { return (T) deserializeMessage(message); } else { throw logger.logExceptionAsError(new IllegalArgumentException( String.format(Messages.CLASS_NOT_A_SUPPORTED_TYPE, clazz))); } } @SuppressWarnings("unchecked") @Override public <T> List<T> deserializeList(Message message, Class<T> clazz) { if (clazz == ServiceBusReceivedMessage.class) { return (List<T>) deserializeListOfMessages(message); } else if (clazz == OffsetDateTime.class) { return (List<T>) deserializeListOfOffsetDateTime(message); } else if (clazz == OffsetDateTime.class) { return (List<T>) deserializeListOfOffsetDateTime(message); } else if (clazz == Long.class) { return (List<T>) deserializeListOfLong(message); } else { throw logger.logExceptionAsError(new IllegalArgumentException( String.format(Messages.CLASS_NOT_A_SUPPORTED_TYPE, clazz))); } } private List<Long> deserializeListOfLong(Message amqpMessage) { if (amqpMessage.getBody() instanceof AmqpValue) { AmqpValue amqpValue = ((AmqpValue) amqpMessage.getBody()); if (amqpValue.getValue() instanceof Map) { @SuppressWarnings("unchecked") Map<String, Object> responseBody = (Map<String, Object>) amqpValue.getValue(); Object expirationListObj = responseBody.get(ManagementConstants.SEQUENCE_NUMBERS); if (expirationListObj instanceof long[]) { return Arrays.stream((long[]) expirationListObj) .boxed() .collect(Collectors.toList()); } } } return Collections.emptyList(); } private List<OffsetDateTime> deserializeListOfOffsetDateTime(Message amqpMessage) { if (amqpMessage.getBody() instanceof AmqpValue) { AmqpValue amqpValue = ((AmqpValue) amqpMessage.getBody()); if (amqpValue.getValue() instanceof Map) { @SuppressWarnings("unchecked") Map<String, Object> responseBody = (Map<String, Object>) amqpValue.getValue(); Object expirationListObj = responseBody.get(ManagementConstants.EXPIRATIONS); if (expirationListObj instanceof Date[]) { return Arrays.stream((Date[]) expirationListObj) .map(date -> date.toInstant().atOffset(ZoneOffset.UTC)) .collect(Collectors.toList()); } } } return Collections.emptyList(); } @SuppressWarnings("rawtypes") private List<ServiceBusReceivedMessage> deserializeListOfMessages(Message amqpMessage) { final List<ServiceBusReceivedMessage> messageList = new ArrayList<>(); final AmqpResponseCode statusCode = RequestResponseUtils.getStatusCode(amqpMessage); if (statusCode != AmqpResponseCode.OK) { logger.warning("AMQP response did not contain OK status code. Actual: {}", statusCode); return Collections.emptyList(); } final Object responseBodyMap = ((AmqpValue) amqpMessage.getBody()).getValue(); if (responseBodyMap == null) { logger.warning("AMQP response did not contain a body."); return Collections.emptyList(); } else if (!(responseBodyMap instanceof Map)) { logger.warning("AMQP response body is not correct instance. Expected: {}. Actual: {}", Map.class, responseBodyMap.getClass()); return Collections.emptyList(); } final Object messages = ((Map) responseBodyMap).get(ManagementConstants.MESSAGES); if (messages == null) { logger.warning("Response body did not contain key: {}", ManagementConstants.MESSAGES); return Collections.emptyList(); } else if (!(messages instanceof Iterable)) { logger.warning("Response body contents is not the correct type. Expected: {}. Actual: {}", Iterable.class, messages.getClass()); return Collections.emptyList(); } for (Object message : (Iterable) messages) { if (!(message instanceof Map)) { logger.warning("Message inside iterable of message is not correct type. Expected: {}. Actual: {}", Map.class, message.getClass()); continue; } final Message responseMessage = Message.Factory.create(); final Binary messagePayLoad = (Binary) ((Map) message).get(ManagementConstants.MESSAGE); responseMessage.decode(messagePayLoad.getArray(), messagePayLoad.getArrayOffset(), messagePayLoad.getLength()); final ServiceBusReceivedMessage receivedMessage = deserializeMessage(responseMessage); if (((Map) message).containsKey(ManagementConstants.LOCK_TOKEN_KEY)) { receivedMessage.setLockToken((UUID) ((Map) message).get(ManagementConstants.LOCK_TOKEN_KEY)); } messageList.add(receivedMessage); } return messageList; } private ServiceBusReceivedMessage deserializeMessage(Message amqpMessage) { final Section body = amqpMessage.getBody(); AmqpMessageBody amqpMessageBody; if (body != null) { if (body instanceof Data) { final Binary messageData = ((Data) body).getValue(); amqpMessageBody = AmqpMessageBody.fromData(messageData.getArray()); } else if (body instanceof AmqpValue) { amqpMessageBody = AmqpMessageBody.fromValue(((AmqpValue) body).getValue()); } else if (body instanceof AmqpSequence) { @SuppressWarnings("unchecked") List<Object> messageData = ((AmqpSequence) body).getValue(); amqpMessageBody = AmqpMessageBody.fromSequence(messageData); } else { logger.warning(String.format(Messages.MESSAGE_NOT_OF_TYPE, body.getType())); amqpMessageBody = AmqpMessageBody.fromData(EMPTY_BYTE_ARRAY); } } else { logger.warning(String.format(Messages.MESSAGE_NOT_OF_TYPE, "null")); amqpMessageBody = AmqpMessageBody.fromData(EMPTY_BYTE_ARRAY); } final ServiceBusReceivedMessage brokeredMessage = new ServiceBusReceivedMessage(amqpMessageBody); AmqpAnnotatedMessage brokeredAmqpAnnotatedMessage = brokeredMessage.getRawAmqpMessage(); ApplicationProperties applicationProperties = amqpMessage.getApplicationProperties(); if (applicationProperties != null) { final Map<String, Object> propertiesValue = convertToOriginType(applicationProperties.getValue()); brokeredAmqpAnnotatedMessage.getApplicationProperties().putAll(propertiesValue); } final AmqpMessageHeader brokeredHeader = brokeredAmqpAnnotatedMessage.getHeader(); brokeredHeader.setTimeToLive(Duration.ofMillis(amqpMessage.getTtl())); brokeredHeader.setDeliveryCount(amqpMessage.getDeliveryCount()); brokeredHeader.setDurable(amqpMessage.getHeader().getDurable()); brokeredHeader.setFirstAcquirer(amqpMessage.getHeader().getFirstAcquirer()); brokeredHeader.setPriority(amqpMessage.getPriority()); final Footer footer = amqpMessage.getFooter(); if (footer != null && footer.getValue() != null) { @SuppressWarnings("unchecked") final Map<Symbol, Object> footerValue = footer.getValue(); setValues(footerValue, brokeredAmqpAnnotatedMessage.getFooter()); } final AmqpMessageProperties brokeredProperties = brokeredAmqpAnnotatedMessage.getProperties(); brokeredProperties.setReplyToGroupId(amqpMessage.getReplyToGroupId()); final String replyTo = amqpMessage.getReplyTo(); if (replyTo != null) { brokeredProperties.setReplyTo(new AmqpAddress(amqpMessage.getReplyTo())); } final Object messageId = amqpMessage.getMessageId(); if (messageId != null) { brokeredProperties.setMessageId(new AmqpMessageId(messageId.toString())); } brokeredProperties.setContentType(amqpMessage.getContentType()); final Object correlationId = amqpMessage.getCorrelationId(); if (correlationId != null) { brokeredProperties.setCorrelationId(new AmqpMessageId(correlationId.toString())); } final Properties amqpProperties = amqpMessage.getProperties(); if (amqpProperties != null) { final String to = amqpProperties.getTo(); if (to != null) { brokeredProperties.setTo(new AmqpAddress(amqpProperties.getTo())); } if (amqpProperties.getAbsoluteExpiryTime() != null) { brokeredProperties.setAbsoluteExpiryTime(amqpProperties.getAbsoluteExpiryTime().toInstant() .atOffset(ZoneOffset.UTC)); } if (amqpProperties.getCreationTime() != null) { brokeredProperties.setCreationTime(amqpProperties.getCreationTime().toInstant() .atOffset(ZoneOffset.UTC)); } } brokeredProperties.setSubject(amqpMessage.getSubject()); brokeredProperties.setGroupId(amqpMessage.getGroupId()); brokeredProperties.setContentEncoding(amqpMessage.getContentEncoding()); brokeredProperties.setGroupSequence(amqpMessage.getGroupSequence()); brokeredProperties.setUserId(amqpMessage.getUserId()); final DeliveryAnnotations deliveryAnnotations = amqpMessage.getDeliveryAnnotations(); if (deliveryAnnotations != null) { setValues(deliveryAnnotations.getValue(), brokeredAmqpAnnotatedMessage.getDeliveryAnnotations()); } final MessageAnnotations messageAnnotations = amqpMessage.getMessageAnnotations(); if (messageAnnotations != null) { setValues(messageAnnotations.getValue(), brokeredAmqpAnnotatedMessage.getMessageAnnotations()); } if (amqpMessage instanceof MessageWithLockToken) { brokeredMessage.setLockToken(((MessageWithLockToken) amqpMessage).getLockToken()); } return brokeredMessage; } private static int getPayloadSize(Message msg) { if (msg == null || msg.getBody() == null) { return 0; } final Section bodySection = msg.getBody(); if (bodySection instanceof AmqpValue) { return sizeof(((AmqpValue) bodySection).getValue()); } else if (bodySection instanceof AmqpSequence) { return sizeof(((AmqpSequence) bodySection).getValue()); } else if (bodySection instanceof Data) { final Data payloadSection = (Data) bodySection; final Binary payloadBytes = payloadSection.getValue(); return sizeof(payloadBytes); } else { return 0; } } private void setValues(Map<Symbol, Object> sourceMap, Map<String, Object> targetMap) { if (sourceMap != null) { for (Map.Entry<Symbol, Object> entry : sourceMap.entrySet()) { targetMap.put(entry.getKey().toString(), entry.getValue()); } } } @SuppressWarnings("rawtypes") }
> Thanks, Zeija; Just curious, did we check it work as we expect if we send instances of these 3 types to the service bus and then retrieve it from the service? Yeah, I checked these three types serveral times in my local environment . But I'm not sure about all edge cases.
private static int sizeof(Object obj) { if (obj == null) { return 0; } if (obj instanceof String) { return obj.toString().length() << 1; } if (obj instanceof Symbol) { return ((Symbol) obj).length() << 1; } if (obj instanceof Byte || obj instanceof UnsignedByte) { return Byte.BYTES; } if (obj instanceof Integer || obj instanceof UnsignedInteger) { return Integer.BYTES; } if (obj instanceof Long || obj instanceof UnsignedLong || obj instanceof Date) { return Long.BYTES; } if (obj instanceof Short || obj instanceof UnsignedShort) { return Short.BYTES; } if (obj instanceof Boolean) { return 1; } if (obj instanceof Character) { return 4; } if (obj instanceof Float) { return Float.BYTES; } if (obj instanceof Double) { return Double.BYTES; } if (obj instanceof UUID) { return 16; } if (obj instanceof Decimal32) { return 4; } if (obj instanceof Decimal64) { return 8; } if (obj instanceof Decimal128) { return 16; } if (obj instanceof Binary) { return ((Binary) obj).getLength(); } if (obj instanceof Declare) { return 7; } if (obj instanceof Discharge) { Discharge discharge = (Discharge) obj; return 12 + discharge.getTxnId().getLength(); } if (obj instanceof ServiceBusDescribedType) { ServiceBusDescribedType describedType = (ServiceBusDescribedType) obj; int size = ((Symbol) describedType.getDescriptor()).length() << 1; if (describedType.getDescribed() instanceof String) { size += ((String) describedType.getDescribed()).length(); } else if (describedType.getDescribed() instanceof Long) { size += Long.BYTES; } return size; } if (obj instanceof Map) { int size = 8; Map map = (Map) obj; for (Object value : map.keySet()) { size += sizeof(value); } for (Object value : map.values()) { size += sizeof(value); } return size; } if (obj instanceof Iterable) { int size = 8; for (Object innerObject : (Iterable) obj) { size += sizeof(innerObject); } return size; } if (obj.getClass().isArray()) { int size = 8; int length = Array.getLength(obj); for (int i = 0; i < length; i++) { size += sizeof(Array.get(obj, i)); } return size; } throw new IllegalArgumentException(String.format(Locale.US, "Encoding Type: %s is not supported", obj.getClass())); }
}
private static int sizeof(Object obj) { if (obj == null) { return 0; } if (obj instanceof String) { return obj.toString().length() << 1; } if (obj instanceof Symbol) { return ((Symbol) obj).length() << 1; } if (obj instanceof Byte || obj instanceof UnsignedByte) { return Byte.BYTES; } if (obj instanceof Integer || obj instanceof UnsignedInteger) { return Integer.BYTES; } if (obj instanceof Long || obj instanceof UnsignedLong || obj instanceof Date) { return Long.BYTES; } if (obj instanceof Short || obj instanceof UnsignedShort) { return Short.BYTES; } if (obj instanceof Boolean) { return 1; } if (obj instanceof Character) { return 4; } if (obj instanceof Float) { return Float.BYTES; } if (obj instanceof Double) { return Double.BYTES; } if (obj instanceof UUID) { return 16; } if (obj instanceof Decimal32) { return 4; } if (obj instanceof Decimal64) { return 8; } if (obj instanceof Decimal128) { return 16; } if (obj instanceof Binary) { return ((Binary) obj).getLength(); } if (obj instanceof Declare) { return 7; } if (obj instanceof Discharge) { Discharge discharge = (Discharge) obj; return 12 + discharge.getTxnId().getLength(); } if (obj instanceof ServiceBusDescribedType) { ServiceBusDescribedType describedType = (ServiceBusDescribedType) obj; return describedType.size(); } if (obj instanceof Map) { int size = 8; Map map = (Map) obj; for (Object value : map.keySet()) { size += sizeof(value); } for (Object value : map.values()) { size += sizeof(value); } return size; } if (obj instanceof Iterable) { int size = 8; for (Object innerObject : (Iterable) obj) { size += sizeof(innerObject); } return size; } if (obj.getClass().isArray()) { int size = 8; int length = Array.getLength(obj); for (int i = 0; i < length; i++) { size += sizeof(Array.get(obj, i)); } return size; } throw new IllegalArgumentException(String.format(Locale.US, "Encoding Type: %s is not supported", obj.getClass())); }
class ServiceBusMessageSerializer implements MessageSerializer { private static final byte[] EMPTY_BYTE_ARRAY = new byte[0]; private final ClientLogger logger = new ClientLogger(ServiceBusMessageSerializer.class); /** * Gets the serialized size of the AMQP message. */ @Override public int getSize(Message amqpMessage) { if (amqpMessage == null) { return 0; } int payloadSize = getPayloadSize(amqpMessage); final MessageAnnotations messageAnnotations = amqpMessage.getMessageAnnotations(); final ApplicationProperties applicationProperties = amqpMessage.getApplicationProperties(); int annotationsSize = 0; int applicationPropertiesSize = 0; if (messageAnnotations != null) { final Map<Symbol, Object> map = messageAnnotations.getValue(); for (Map.Entry<Symbol, Object> entry : map.entrySet()) { final int size = sizeof(entry.getKey()) + sizeof(entry.getValue()); annotationsSize += size; } } if (applicationProperties != null) { final Map<String, Object> map = applicationProperties.getValue(); for (Map.Entry<String, Object> entry : map.entrySet()) { final int size = sizeof(entry.getKey()) + sizeof(entry.getValue()); applicationPropertiesSize += size; } } return annotationsSize + applicationPropertiesSize + payloadSize; } /** * Creates the AMQP message represented by this {@code object}. Currently, only supports serializing {@link * ServiceBusMessage}. * * @param object Concrete object to deserialize. * * @return A new AMQP message for this {@code object}. * * @throws IllegalArgumentException if {@code object} is not an instance of {@link ServiceBusMessage}. */ @Override public <T> Message serialize(T object) { Objects.requireNonNull(object, "'object' to serialize cannot be null."); if (!(object instanceof ServiceBusMessage)) { throw logger.logExceptionAsError(new IllegalArgumentException( "Cannot serialize object that is not ServiceBusMessage. Clazz: " + object.getClass())); } final ServiceBusMessage brokeredMessage = (ServiceBusMessage) object; AmqpMessageBodyType brokeredBodyType = brokeredMessage.getRawAmqpMessage().getBody().getBodyType(); final Message amqpMessage = Proton.message(); byte[] body; if (brokeredBodyType == AmqpMessageBodyType.DATA || brokeredBodyType == null) { body = brokeredMessage.getBody().toBytes(); amqpMessage.setBody(new Data(new Binary(body))); } else if (brokeredBodyType == AmqpMessageBodyType.SEQUENCE) { List<Object> sequenceList = brokeredMessage.getRawAmqpMessage().getBody().getSequence(); amqpMessage.setBody(new AmqpSequence(sequenceList)); } else if (brokeredBodyType == AmqpMessageBodyType.VALUE) { amqpMessage.setBody(new AmqpValue(brokeredMessage.getRawAmqpMessage().getBody().getValue())); } if (brokeredMessage.getApplicationProperties() != null) { amqpMessage.setApplicationProperties(new ApplicationProperties(convertToDescribedType(brokeredMessage.getApplicationProperties()))); } if (brokeredMessage.getTimeToLive() != null) { amqpMessage.setTtl(brokeredMessage.getTimeToLive().toMillis()); } if (amqpMessage.getProperties() == null) { amqpMessage.setProperties(new Properties()); } amqpMessage.setMessageId(brokeredMessage.getMessageId()); amqpMessage.setContentType(brokeredMessage.getContentType()); amqpMessage.setCorrelationId(brokeredMessage.getCorrelationId()); amqpMessage.setSubject(brokeredMessage.getSubject()); amqpMessage.setReplyTo(brokeredMessage.getReplyTo()); amqpMessage.setReplyToGroupId(brokeredMessage.getReplyToSessionId()); amqpMessage.setGroupId(brokeredMessage.getSessionId()); final AmqpMessageProperties brokeredProperties = brokeredMessage.getRawAmqpMessage().getProperties(); amqpMessage.setContentEncoding(brokeredProperties.getContentEncoding()); if (brokeredProperties.getGroupSequence() != null) { amqpMessage.setGroupSequence(brokeredProperties.getGroupSequence()); } amqpMessage.getProperties().setTo(brokeredMessage.getTo()); amqpMessage.getProperties().setUserId(new Binary(brokeredProperties.getUserId())); if (brokeredProperties.getAbsoluteExpiryTime() != null) { amqpMessage.getProperties().setAbsoluteExpiryTime(Date.from(brokeredProperties.getAbsoluteExpiryTime() .toInstant())); } if (brokeredProperties.getCreationTime() != null) { amqpMessage.getProperties().setCreationTime(Date.from(brokeredProperties.getCreationTime().toInstant())); } amqpMessage.setFooter(new Footer(brokeredMessage.getRawAmqpMessage().getFooter())); AmqpMessageHeader header = brokeredMessage.getRawAmqpMessage().getHeader(); if (header.getDeliveryCount() != null) { amqpMessage.setDeliveryCount(header.getDeliveryCount()); } if (header.getPriority() != null) { amqpMessage.setPriority(header.getPriority()); } if (header.isDurable() != null) { amqpMessage.setDurable(header.isDurable()); } if (header.isFirstAcquirer() != null) { amqpMessage.setFirstAcquirer(header.isFirstAcquirer()); } if (header.getTimeToLive() != null) { amqpMessage.setTtl(header.getTimeToLive().toMillis()); } final Map<Symbol, Object> messageAnnotationsMap = new HashMap<>(); if (brokeredMessage.getScheduledEnqueueTime() != null) { messageAnnotationsMap.put(Symbol.valueOf(SCHEDULED_ENQUEUE_UTC_TIME_NAME.getValue()), Date.from(brokeredMessage.getScheduledEnqueueTime().toInstant())); } final String partitionKey = brokeredMessage.getPartitionKey(); if (partitionKey != null && !partitionKey.isEmpty()) { messageAnnotationsMap.put(Symbol.valueOf(PARTITION_KEY_ANNOTATION_NAME.getValue()), brokeredMessage.getPartitionKey()); } amqpMessage.setMessageAnnotations(new MessageAnnotations(messageAnnotationsMap)); final Map<Symbol, Object> deliveryAnnotationsMap = new HashMap<>(); final Map<String, Object> deliveryAnnotations = brokeredMessage.getRawAmqpMessage() .getDeliveryAnnotations(); for (Map.Entry<String, Object> deliveryEntry : deliveryAnnotations.entrySet()) { deliveryAnnotationsMap.put(Symbol.valueOf(deliveryEntry.getKey()), deliveryEntry.getValue()); } amqpMessage.setDeliveryAnnotations(new DeliveryAnnotations(deliveryAnnotationsMap)); return amqpMessage; } private Map<String, Object> convertToDescribedType(Map<String, Object> propertiesValue) { for (Map.Entry<String, Object> entry : propertiesValue.entrySet()) { Object value = entry.getValue(); if (value instanceof URI) { propertiesValue.put(entry.getKey(), new ServiceBusDescribedType(ServiceBusConstants.URI_SYMBOL, ((URI) value).toString())); } else if (value instanceof OffsetDateTime) { propertiesValue.put(entry.getKey(), new ServiceBusDescribedType(ServiceBusConstants.OFFSETDATETIME_SYMBOL, ((OffsetDateTime) value).atZoneSameInstant(ZoneOffset.UTC).toInstant().toEpochMilli())); } else if (value instanceof Duration) { propertiesValue.put(entry.getKey(), new ServiceBusDescribedType(ServiceBusConstants.DURATION_SYMBOL, ((Duration) value).toNanos())); } } return propertiesValue; } private Map<String, Object> convertToOriginType(Map<String, Object> propertiesValue) { for (Map.Entry<String, Object> entry : propertiesValue.entrySet()) { Object value = entry.getValue(); if (value instanceof DescribedType) { DescribedType describedType = (DescribedType) value; if (ServiceBusConstants.URI_SYMBOL.equals(describedType.getDescriptor())) { propertiesValue.put(entry.getKey(), URI.create((String) describedType.getDescribed())); } else if (ServiceBusConstants.OFFSETDATETIME_SYMBOL.equals(describedType.getDescriptor())) { propertiesValue.put(entry.getKey(), OffsetDateTime.ofInstant(Instant.ofEpochMilli((long) describedType.getDescribed()), ZoneId.systemDefault())); } else if (ServiceBusConstants.DURATION_SYMBOL.equals(describedType.getDescriptor())) { propertiesValue.put(entry.getKey(), Duration.ofNanos((long) describedType.getDescribed())); } } } return propertiesValue; } @SuppressWarnings("unchecked") @Override public <T> T deserialize(Message message, Class<T> clazz) { Objects.requireNonNull(message, "'message' cannot be null."); Objects.requireNonNull(clazz, "'clazz' cannot be null."); if (clazz == ServiceBusReceivedMessage.class) { return (T) deserializeMessage(message); } else { throw logger.logExceptionAsError(new IllegalArgumentException( String.format(Messages.CLASS_NOT_A_SUPPORTED_TYPE, clazz))); } } @SuppressWarnings("unchecked") @Override public <T> List<T> deserializeList(Message message, Class<T> clazz) { if (clazz == ServiceBusReceivedMessage.class) { return (List<T>) deserializeListOfMessages(message); } else if (clazz == OffsetDateTime.class) { return (List<T>) deserializeListOfOffsetDateTime(message); } else if (clazz == OffsetDateTime.class) { return (List<T>) deserializeListOfOffsetDateTime(message); } else if (clazz == Long.class) { return (List<T>) deserializeListOfLong(message); } else { throw logger.logExceptionAsError(new IllegalArgumentException( String.format(Messages.CLASS_NOT_A_SUPPORTED_TYPE, clazz))); } } private List<Long> deserializeListOfLong(Message amqpMessage) { if (amqpMessage.getBody() instanceof AmqpValue) { AmqpValue amqpValue = ((AmqpValue) amqpMessage.getBody()); if (amqpValue.getValue() instanceof Map) { @SuppressWarnings("unchecked") Map<String, Object> responseBody = (Map<String, Object>) amqpValue.getValue(); Object expirationListObj = responseBody.get(ManagementConstants.SEQUENCE_NUMBERS); if (expirationListObj instanceof long[]) { return Arrays.stream((long[]) expirationListObj) .boxed() .collect(Collectors.toList()); } } } return Collections.emptyList(); } private List<OffsetDateTime> deserializeListOfOffsetDateTime(Message amqpMessage) { if (amqpMessage.getBody() instanceof AmqpValue) { AmqpValue amqpValue = ((AmqpValue) amqpMessage.getBody()); if (amqpValue.getValue() instanceof Map) { @SuppressWarnings("unchecked") Map<String, Object> responseBody = (Map<String, Object>) amqpValue.getValue(); Object expirationListObj = responseBody.get(ManagementConstants.EXPIRATIONS); if (expirationListObj instanceof Date[]) { return Arrays.stream((Date[]) expirationListObj) .map(date -> date.toInstant().atOffset(ZoneOffset.UTC)) .collect(Collectors.toList()); } } } return Collections.emptyList(); } @SuppressWarnings("rawtypes") private List<ServiceBusReceivedMessage> deserializeListOfMessages(Message amqpMessage) { final List<ServiceBusReceivedMessage> messageList = new ArrayList<>(); final AmqpResponseCode statusCode = RequestResponseUtils.getStatusCode(amqpMessage); if (statusCode != AmqpResponseCode.OK) { logger.warning("AMQP response did not contain OK status code. Actual: {}", statusCode); return Collections.emptyList(); } final Object responseBodyMap = ((AmqpValue) amqpMessage.getBody()).getValue(); if (responseBodyMap == null) { logger.warning("AMQP response did not contain a body."); return Collections.emptyList(); } else if (!(responseBodyMap instanceof Map)) { logger.warning("AMQP response body is not correct instance. Expected: {}. Actual: {}", Map.class, responseBodyMap.getClass()); return Collections.emptyList(); } final Object messages = ((Map) responseBodyMap).get(ManagementConstants.MESSAGES); if (messages == null) { logger.warning("Response body did not contain key: {}", ManagementConstants.MESSAGES); return Collections.emptyList(); } else if (!(messages instanceof Iterable)) { logger.warning("Response body contents is not the correct type. Expected: {}. Actual: {}", Iterable.class, messages.getClass()); return Collections.emptyList(); } for (Object message : (Iterable) messages) { if (!(message instanceof Map)) { logger.warning("Message inside iterable of message is not correct type. Expected: {}. Actual: {}", Map.class, message.getClass()); continue; } final Message responseMessage = Message.Factory.create(); final Binary messagePayLoad = (Binary) ((Map) message).get(ManagementConstants.MESSAGE); responseMessage.decode(messagePayLoad.getArray(), messagePayLoad.getArrayOffset(), messagePayLoad.getLength()); final ServiceBusReceivedMessage receivedMessage = deserializeMessage(responseMessage); if (((Map) message).containsKey(ManagementConstants.LOCK_TOKEN_KEY)) { receivedMessage.setLockToken((UUID) ((Map) message).get(ManagementConstants.LOCK_TOKEN_KEY)); } messageList.add(receivedMessage); } return messageList; } private ServiceBusReceivedMessage deserializeMessage(Message amqpMessage) { final Section body = amqpMessage.getBody(); AmqpMessageBody amqpMessageBody; if (body != null) { if (body instanceof Data) { final Binary messageData = ((Data) body).getValue(); amqpMessageBody = AmqpMessageBody.fromData(messageData.getArray()); } else if (body instanceof AmqpValue) { amqpMessageBody = AmqpMessageBody.fromValue(((AmqpValue) body).getValue()); } else if (body instanceof AmqpSequence) { @SuppressWarnings("unchecked") List<Object> messageData = ((AmqpSequence) body).getValue(); amqpMessageBody = AmqpMessageBody.fromSequence(messageData); } else { logger.warning(String.format(Messages.MESSAGE_NOT_OF_TYPE, body.getType())); amqpMessageBody = AmqpMessageBody.fromData(EMPTY_BYTE_ARRAY); } } else { logger.warning(String.format(Messages.MESSAGE_NOT_OF_TYPE, "null")); amqpMessageBody = AmqpMessageBody.fromData(EMPTY_BYTE_ARRAY); } final ServiceBusReceivedMessage brokeredMessage = new ServiceBusReceivedMessage(amqpMessageBody); AmqpAnnotatedMessage brokeredAmqpAnnotatedMessage = brokeredMessage.getRawAmqpMessage(); ApplicationProperties applicationProperties = amqpMessage.getApplicationProperties(); if (applicationProperties != null) { final Map<String, Object> propertiesValue = applicationProperties.getValue(); brokeredAmqpAnnotatedMessage.getApplicationProperties().putAll(convertToOriginType(propertiesValue)); } final AmqpMessageHeader brokeredHeader = brokeredAmqpAnnotatedMessage.getHeader(); brokeredHeader.setTimeToLive(Duration.ofMillis(amqpMessage.getTtl())); brokeredHeader.setDeliveryCount(amqpMessage.getDeliveryCount()); brokeredHeader.setDurable(amqpMessage.getHeader().getDurable()); brokeredHeader.setFirstAcquirer(amqpMessage.getHeader().getFirstAcquirer()); brokeredHeader.setPriority(amqpMessage.getPriority()); final Footer footer = amqpMessage.getFooter(); if (footer != null && footer.getValue() != null) { @SuppressWarnings("unchecked") final Map<Symbol, Object> footerValue = footer.getValue(); setValues(footerValue, brokeredAmqpAnnotatedMessage.getFooter()); } final AmqpMessageProperties brokeredProperties = brokeredAmqpAnnotatedMessage.getProperties(); brokeredProperties.setReplyToGroupId(amqpMessage.getReplyToGroupId()); final String replyTo = amqpMessage.getReplyTo(); if (replyTo != null) { brokeredProperties.setReplyTo(new AmqpAddress(amqpMessage.getReplyTo())); } final Object messageId = amqpMessage.getMessageId(); if (messageId != null) { brokeredProperties.setMessageId(new AmqpMessageId(messageId.toString())); } brokeredProperties.setContentType(amqpMessage.getContentType()); final Object correlationId = amqpMessage.getCorrelationId(); if (correlationId != null) { brokeredProperties.setCorrelationId(new AmqpMessageId(correlationId.toString())); } final Properties amqpProperties = amqpMessage.getProperties(); if (amqpProperties != null) { final String to = amqpProperties.getTo(); if (to != null) { brokeredProperties.setTo(new AmqpAddress(amqpProperties.getTo())); } if (amqpProperties.getAbsoluteExpiryTime() != null) { brokeredProperties.setAbsoluteExpiryTime(amqpProperties.getAbsoluteExpiryTime().toInstant() .atOffset(ZoneOffset.UTC)); } if (amqpProperties.getCreationTime() != null) { brokeredProperties.setCreationTime(amqpProperties.getCreationTime().toInstant() .atOffset(ZoneOffset.UTC)); } } brokeredProperties.setSubject(amqpMessage.getSubject()); brokeredProperties.setGroupId(amqpMessage.getGroupId()); brokeredProperties.setContentEncoding(amqpMessage.getContentEncoding()); brokeredProperties.setGroupSequence(amqpMessage.getGroupSequence()); brokeredProperties.setUserId(amqpMessage.getUserId()); final DeliveryAnnotations deliveryAnnotations = amqpMessage.getDeliveryAnnotations(); if (deliveryAnnotations != null) { setValues(deliveryAnnotations.getValue(), brokeredAmqpAnnotatedMessage.getDeliveryAnnotations()); } final MessageAnnotations messageAnnotations = amqpMessage.getMessageAnnotations(); if (messageAnnotations != null) { setValues(messageAnnotations.getValue(), brokeredAmqpAnnotatedMessage.getMessageAnnotations()); } if (amqpMessage instanceof MessageWithLockToken) { brokeredMessage.setLockToken(((MessageWithLockToken) amqpMessage).getLockToken()); } return brokeredMessage; } private static int getPayloadSize(Message msg) { if (msg == null || msg.getBody() == null) { return 0; } final Section bodySection = msg.getBody(); if (bodySection instanceof AmqpValue) { return sizeof(((AmqpValue) bodySection).getValue()); } else if (bodySection instanceof AmqpSequence) { return sizeof(((AmqpSequence) bodySection).getValue()); } else if (bodySection instanceof Data) { final Data payloadSection = (Data) bodySection; final Binary payloadBytes = payloadSection.getValue(); return sizeof(payloadBytes); } else { return 0; } } private void setValues(Map<Symbol, Object> sourceMap, Map<String, Object> targetMap) { if (sourceMap != null) { for (Map.Entry<Symbol, Object> entry : sourceMap.entrySet()) { targetMap.put(entry.getKey().toString(), entry.getValue()); } } } @SuppressWarnings("rawtypes") }
class ServiceBusMessageSerializer implements MessageSerializer { private static final byte[] EMPTY_BYTE_ARRAY = new byte[0]; private final ClientLogger logger = new ClientLogger(ServiceBusMessageSerializer.class); /** * Gets the serialized size of the AMQP message. */ @Override public int getSize(Message amqpMessage) { if (amqpMessage == null) { return 0; } int payloadSize = getPayloadSize(amqpMessage); final MessageAnnotations messageAnnotations = amqpMessage.getMessageAnnotations(); final ApplicationProperties applicationProperties = amqpMessage.getApplicationProperties(); int annotationsSize = 0; int applicationPropertiesSize = 0; if (messageAnnotations != null) { final Map<Symbol, Object> map = messageAnnotations.getValue(); for (Map.Entry<Symbol, Object> entry : map.entrySet()) { final int size = sizeof(entry.getKey()) + sizeof(entry.getValue()); annotationsSize += size; } } if (applicationProperties != null) { final Map<String, Object> map = applicationProperties.getValue(); for (Map.Entry<String, Object> entry : map.entrySet()) { final int size = sizeof(entry.getKey()) + sizeof(entry.getValue()); applicationPropertiesSize += size; } } return annotationsSize + applicationPropertiesSize + payloadSize; } /** * Creates the AMQP message represented by this {@code object}. Currently, only supports serializing {@link * ServiceBusMessage}. * * @param object Concrete object to deserialize. * * @return A new AMQP message for this {@code object}. * * @throws IllegalArgumentException if {@code object} is not an instance of {@link ServiceBusMessage}. */ @Override public <T> Message serialize(T object) { Objects.requireNonNull(object, "'object' to serialize cannot be null."); if (!(object instanceof ServiceBusMessage)) { throw logger.logExceptionAsError(new IllegalArgumentException( "Cannot serialize object that is not ServiceBusMessage. Clazz: " + object.getClass())); } final ServiceBusMessage brokeredMessage = (ServiceBusMessage) object; AmqpMessageBodyType brokeredBodyType = brokeredMessage.getRawAmqpMessage().getBody().getBodyType(); final Message amqpMessage = Proton.message(); byte[] body; if (brokeredBodyType == AmqpMessageBodyType.DATA || brokeredBodyType == null) { body = brokeredMessage.getBody().toBytes(); amqpMessage.setBody(new Data(new Binary(body))); } else if (brokeredBodyType == AmqpMessageBodyType.SEQUENCE) { List<Object> sequenceList = brokeredMessage.getRawAmqpMessage().getBody().getSequence(); amqpMessage.setBody(new AmqpSequence(sequenceList)); } else if (brokeredBodyType == AmqpMessageBodyType.VALUE) { amqpMessage.setBody(new AmqpValue(brokeredMessage.getRawAmqpMessage().getBody().getValue())); } if (brokeredMessage.getApplicationProperties() != null) { Map<String, Object> describedTypeMap = convertToDescribedType(brokeredMessage.getApplicationProperties()); amqpMessage.setApplicationProperties(new ApplicationProperties(describedTypeMap)); } if (brokeredMessage.getTimeToLive() != null) { amqpMessage.setTtl(brokeredMessage.getTimeToLive().toMillis()); } if (amqpMessage.getProperties() == null) { amqpMessage.setProperties(new Properties()); } amqpMessage.setMessageId(brokeredMessage.getMessageId()); amqpMessage.setContentType(brokeredMessage.getContentType()); amqpMessage.setCorrelationId(brokeredMessage.getCorrelationId()); amqpMessage.setSubject(brokeredMessage.getSubject()); amqpMessage.setReplyTo(brokeredMessage.getReplyTo()); amqpMessage.setReplyToGroupId(brokeredMessage.getReplyToSessionId()); amqpMessage.setGroupId(brokeredMessage.getSessionId()); final AmqpMessageProperties brokeredProperties = brokeredMessage.getRawAmqpMessage().getProperties(); amqpMessage.setContentEncoding(brokeredProperties.getContentEncoding()); if (brokeredProperties.getGroupSequence() != null) { amqpMessage.setGroupSequence(brokeredProperties.getGroupSequence()); } amqpMessage.getProperties().setTo(brokeredMessage.getTo()); amqpMessage.getProperties().setUserId(new Binary(brokeredProperties.getUserId())); if (brokeredProperties.getAbsoluteExpiryTime() != null) { amqpMessage.getProperties().setAbsoluteExpiryTime(Date.from(brokeredProperties.getAbsoluteExpiryTime() .toInstant())); } if (brokeredProperties.getCreationTime() != null) { amqpMessage.getProperties().setCreationTime(Date.from(brokeredProperties.getCreationTime().toInstant())); } amqpMessage.setFooter(new Footer(brokeredMessage.getRawAmqpMessage().getFooter())); AmqpMessageHeader header = brokeredMessage.getRawAmqpMessage().getHeader(); if (header.getDeliveryCount() != null) { amqpMessage.setDeliveryCount(header.getDeliveryCount()); } if (header.getPriority() != null) { amqpMessage.setPriority(header.getPriority()); } if (header.isDurable() != null) { amqpMessage.setDurable(header.isDurable()); } if (header.isFirstAcquirer() != null) { amqpMessage.setFirstAcquirer(header.isFirstAcquirer()); } if (header.getTimeToLive() != null) { amqpMessage.setTtl(header.getTimeToLive().toMillis()); } final Map<Symbol, Object> messageAnnotationsMap = new HashMap<>(); if (brokeredMessage.getScheduledEnqueueTime() != null) { messageAnnotationsMap.put(Symbol.valueOf(SCHEDULED_ENQUEUE_UTC_TIME_NAME.getValue()), Date.from(brokeredMessage.getScheduledEnqueueTime().toInstant())); } final String partitionKey = brokeredMessage.getPartitionKey(); if (partitionKey != null && !partitionKey.isEmpty()) { messageAnnotationsMap.put(Symbol.valueOf(PARTITION_KEY_ANNOTATION_NAME.getValue()), brokeredMessage.getPartitionKey()); } amqpMessage.setMessageAnnotations(new MessageAnnotations(messageAnnotationsMap)); final Map<Symbol, Object> deliveryAnnotationsMap = new HashMap<>(); final Map<String, Object> deliveryAnnotations = brokeredMessage.getRawAmqpMessage() .getDeliveryAnnotations(); for (Map.Entry<String, Object> deliveryEntry : deliveryAnnotations.entrySet()) { deliveryAnnotationsMap.put(Symbol.valueOf(deliveryEntry.getKey()), deliveryEntry.getValue()); } amqpMessage.setDeliveryAnnotations(new DeliveryAnnotations(deliveryAnnotationsMap)); return amqpMessage; } /** * Convert specific type to described type for sending on the wire. * @param propertiesValue application properties set by user which may contain specific type. * @return Map only contains primitive type and described type. */ private static Map<String, Object> convertToDescribedType(Map<String, Object> propertiesValue) { for (Map.Entry<String, Object> entry : propertiesValue.entrySet()) { Object value = entry.getValue(); if (value instanceof URI) { entry.setValue(new UriDescribedType((URI) value)); } else if (value instanceof OffsetDateTime) { entry.setValue(new OffsetDateTimeDescribedType((OffsetDateTime) value)); } else if (value instanceof Duration) { entry.setValue(new DurationDescribedType((Duration) value)); } } return propertiesValue; } /** * Convert described type to origin type. * @param propertiesValue application properties from amqp message may contain described type. * @return Map without described type. */ private static Map<String, Object> convertToOriginType(Map<String, Object> propertiesValue) { for (Map.Entry<String, Object> entry : propertiesValue.entrySet()) { Object value = entry.getValue(); if (value instanceof DescribedType) { entry.setValue(MessageUtils.describedToOrigin((DescribedType) value)); } } return propertiesValue; } @SuppressWarnings("unchecked") @Override public <T> T deserialize(Message message, Class<T> clazz) { Objects.requireNonNull(message, "'message' cannot be null."); Objects.requireNonNull(clazz, "'clazz' cannot be null."); if (clazz == ServiceBusReceivedMessage.class) { return (T) deserializeMessage(message); } else { throw logger.logExceptionAsError(new IllegalArgumentException( String.format(Messages.CLASS_NOT_A_SUPPORTED_TYPE, clazz))); } } @SuppressWarnings("unchecked") @Override public <T> List<T> deserializeList(Message message, Class<T> clazz) { if (clazz == ServiceBusReceivedMessage.class) { return (List<T>) deserializeListOfMessages(message); } else if (clazz == OffsetDateTime.class) { return (List<T>) deserializeListOfOffsetDateTime(message); } else if (clazz == OffsetDateTime.class) { return (List<T>) deserializeListOfOffsetDateTime(message); } else if (clazz == Long.class) { return (List<T>) deserializeListOfLong(message); } else { throw logger.logExceptionAsError(new IllegalArgumentException( String.format(Messages.CLASS_NOT_A_SUPPORTED_TYPE, clazz))); } } private List<Long> deserializeListOfLong(Message amqpMessage) { if (amqpMessage.getBody() instanceof AmqpValue) { AmqpValue amqpValue = ((AmqpValue) amqpMessage.getBody()); if (amqpValue.getValue() instanceof Map) { @SuppressWarnings("unchecked") Map<String, Object> responseBody = (Map<String, Object>) amqpValue.getValue(); Object expirationListObj = responseBody.get(ManagementConstants.SEQUENCE_NUMBERS); if (expirationListObj instanceof long[]) { return Arrays.stream((long[]) expirationListObj) .boxed() .collect(Collectors.toList()); } } } return Collections.emptyList(); } private List<OffsetDateTime> deserializeListOfOffsetDateTime(Message amqpMessage) { if (amqpMessage.getBody() instanceof AmqpValue) { AmqpValue amqpValue = ((AmqpValue) amqpMessage.getBody()); if (amqpValue.getValue() instanceof Map) { @SuppressWarnings("unchecked") Map<String, Object> responseBody = (Map<String, Object>) amqpValue.getValue(); Object expirationListObj = responseBody.get(ManagementConstants.EXPIRATIONS); if (expirationListObj instanceof Date[]) { return Arrays.stream((Date[]) expirationListObj) .map(date -> date.toInstant().atOffset(ZoneOffset.UTC)) .collect(Collectors.toList()); } } } return Collections.emptyList(); } @SuppressWarnings("rawtypes") private List<ServiceBusReceivedMessage> deserializeListOfMessages(Message amqpMessage) { final List<ServiceBusReceivedMessage> messageList = new ArrayList<>(); final AmqpResponseCode statusCode = RequestResponseUtils.getStatusCode(amqpMessage); if (statusCode != AmqpResponseCode.OK) { logger.warning("AMQP response did not contain OK status code. Actual: {}", statusCode); return Collections.emptyList(); } final Object responseBodyMap = ((AmqpValue) amqpMessage.getBody()).getValue(); if (responseBodyMap == null) { logger.warning("AMQP response did not contain a body."); return Collections.emptyList(); } else if (!(responseBodyMap instanceof Map)) { logger.warning("AMQP response body is not correct instance. Expected: {}. Actual: {}", Map.class, responseBodyMap.getClass()); return Collections.emptyList(); } final Object messages = ((Map) responseBodyMap).get(ManagementConstants.MESSAGES); if (messages == null) { logger.warning("Response body did not contain key: {}", ManagementConstants.MESSAGES); return Collections.emptyList(); } else if (!(messages instanceof Iterable)) { logger.warning("Response body contents is not the correct type. Expected: {}. Actual: {}", Iterable.class, messages.getClass()); return Collections.emptyList(); } for (Object message : (Iterable) messages) { if (!(message instanceof Map)) { logger.warning("Message inside iterable of message is not correct type. Expected: {}. Actual: {}", Map.class, message.getClass()); continue; } final Message responseMessage = Message.Factory.create(); final Binary messagePayLoad = (Binary) ((Map) message).get(ManagementConstants.MESSAGE); responseMessage.decode(messagePayLoad.getArray(), messagePayLoad.getArrayOffset(), messagePayLoad.getLength()); final ServiceBusReceivedMessage receivedMessage = deserializeMessage(responseMessage); if (((Map) message).containsKey(ManagementConstants.LOCK_TOKEN_KEY)) { receivedMessage.setLockToken((UUID) ((Map) message).get(ManagementConstants.LOCK_TOKEN_KEY)); } messageList.add(receivedMessage); } return messageList; } private ServiceBusReceivedMessage deserializeMessage(Message amqpMessage) { final Section body = amqpMessage.getBody(); AmqpMessageBody amqpMessageBody; if (body != null) { if (body instanceof Data) { final Binary messageData = ((Data) body).getValue(); amqpMessageBody = AmqpMessageBody.fromData(messageData.getArray()); } else if (body instanceof AmqpValue) { amqpMessageBody = AmqpMessageBody.fromValue(((AmqpValue) body).getValue()); } else if (body instanceof AmqpSequence) { @SuppressWarnings("unchecked") List<Object> messageData = ((AmqpSequence) body).getValue(); amqpMessageBody = AmqpMessageBody.fromSequence(messageData); } else { logger.warning(String.format(Messages.MESSAGE_NOT_OF_TYPE, body.getType())); amqpMessageBody = AmqpMessageBody.fromData(EMPTY_BYTE_ARRAY); } } else { logger.warning(String.format(Messages.MESSAGE_NOT_OF_TYPE, "null")); amqpMessageBody = AmqpMessageBody.fromData(EMPTY_BYTE_ARRAY); } final ServiceBusReceivedMessage brokeredMessage = new ServiceBusReceivedMessage(amqpMessageBody); AmqpAnnotatedMessage brokeredAmqpAnnotatedMessage = brokeredMessage.getRawAmqpMessage(); ApplicationProperties applicationProperties = amqpMessage.getApplicationProperties(); if (applicationProperties != null) { final Map<String, Object> propertiesValue = convertToOriginType(applicationProperties.getValue()); brokeredAmqpAnnotatedMessage.getApplicationProperties().putAll(propertiesValue); } final AmqpMessageHeader brokeredHeader = brokeredAmqpAnnotatedMessage.getHeader(); brokeredHeader.setTimeToLive(Duration.ofMillis(amqpMessage.getTtl())); brokeredHeader.setDeliveryCount(amqpMessage.getDeliveryCount()); brokeredHeader.setDurable(amqpMessage.getHeader().getDurable()); brokeredHeader.setFirstAcquirer(amqpMessage.getHeader().getFirstAcquirer()); brokeredHeader.setPriority(amqpMessage.getPriority()); final Footer footer = amqpMessage.getFooter(); if (footer != null && footer.getValue() != null) { @SuppressWarnings("unchecked") final Map<Symbol, Object> footerValue = footer.getValue(); setValues(footerValue, brokeredAmqpAnnotatedMessage.getFooter()); } final AmqpMessageProperties brokeredProperties = brokeredAmqpAnnotatedMessage.getProperties(); brokeredProperties.setReplyToGroupId(amqpMessage.getReplyToGroupId()); final String replyTo = amqpMessage.getReplyTo(); if (replyTo != null) { brokeredProperties.setReplyTo(new AmqpAddress(amqpMessage.getReplyTo())); } final Object messageId = amqpMessage.getMessageId(); if (messageId != null) { brokeredProperties.setMessageId(new AmqpMessageId(messageId.toString())); } brokeredProperties.setContentType(amqpMessage.getContentType()); final Object correlationId = amqpMessage.getCorrelationId(); if (correlationId != null) { brokeredProperties.setCorrelationId(new AmqpMessageId(correlationId.toString())); } final Properties amqpProperties = amqpMessage.getProperties(); if (amqpProperties != null) { final String to = amqpProperties.getTo(); if (to != null) { brokeredProperties.setTo(new AmqpAddress(amqpProperties.getTo())); } if (amqpProperties.getAbsoluteExpiryTime() != null) { brokeredProperties.setAbsoluteExpiryTime(amqpProperties.getAbsoluteExpiryTime().toInstant() .atOffset(ZoneOffset.UTC)); } if (amqpProperties.getCreationTime() != null) { brokeredProperties.setCreationTime(amqpProperties.getCreationTime().toInstant() .atOffset(ZoneOffset.UTC)); } } brokeredProperties.setSubject(amqpMessage.getSubject()); brokeredProperties.setGroupId(amqpMessage.getGroupId()); brokeredProperties.setContentEncoding(amqpMessage.getContentEncoding()); brokeredProperties.setGroupSequence(amqpMessage.getGroupSequence()); brokeredProperties.setUserId(amqpMessage.getUserId()); final DeliveryAnnotations deliveryAnnotations = amqpMessage.getDeliveryAnnotations(); if (deliveryAnnotations != null) { setValues(deliveryAnnotations.getValue(), brokeredAmqpAnnotatedMessage.getDeliveryAnnotations()); } final MessageAnnotations messageAnnotations = amqpMessage.getMessageAnnotations(); if (messageAnnotations != null) { setValues(messageAnnotations.getValue(), brokeredAmqpAnnotatedMessage.getMessageAnnotations()); } if (amqpMessage instanceof MessageWithLockToken) { brokeredMessage.setLockToken(((MessageWithLockToken) amqpMessage).getLockToken()); } return brokeredMessage; } private static int getPayloadSize(Message msg) { if (msg == null || msg.getBody() == null) { return 0; } final Section bodySection = msg.getBody(); if (bodySection instanceof AmqpValue) { return sizeof(((AmqpValue) bodySection).getValue()); } else if (bodySection instanceof AmqpSequence) { return sizeof(((AmqpSequence) bodySection).getValue()); } else if (bodySection instanceof Data) { final Data payloadSection = (Data) bodySection; final Binary payloadBytes = payloadSection.getValue(); return sizeof(payloadBytes); } else { return 0; } } private void setValues(Map<Symbol, Object> sourceMap, Map<String, Object> targetMap) { if (sourceMap != null) { for (Map.Entry<Symbol, Object> entry : sourceMap.entrySet()) { targetMap.put(entry.getKey().toString(), entry.getValue()); } } } @SuppressWarnings("rawtypes") }
@ZejiaJiang My guess is you are correct on the size, but you need to know it for sure from AMQP spec. My curiosity is why String need `length * 2`. I assume AMQP uses utf-8, not sure.
private static int sizeof(Object obj) { if (obj == null) { return 0; } if (obj instanceof String) { return obj.toString().length() << 1; } if (obj instanceof Symbol) { return ((Symbol) obj).length() << 1; } if (obj instanceof Byte || obj instanceof UnsignedByte) { return Byte.BYTES; } if (obj instanceof Integer || obj instanceof UnsignedInteger) { return Integer.BYTES; } if (obj instanceof Long || obj instanceof UnsignedLong || obj instanceof Date) { return Long.BYTES; } if (obj instanceof Short || obj instanceof UnsignedShort) { return Short.BYTES; } if (obj instanceof Boolean) { return 1; } if (obj instanceof Character) { return 4; } if (obj instanceof Float) { return Float.BYTES; } if (obj instanceof Double) { return Double.BYTES; } if (obj instanceof UUID) { return 16; } if (obj instanceof Decimal32) { return 4; } if (obj instanceof Decimal64) { return 8; } if (obj instanceof Decimal128) { return 16; } if (obj instanceof Binary) { return ((Binary) obj).getLength(); } if (obj instanceof Declare) { return 7; } if (obj instanceof Discharge) { Discharge discharge = (Discharge) obj; return 12 + discharge.getTxnId().getLength(); } if (obj instanceof ServiceBusDescribedType) { ServiceBusDescribedType describedType = (ServiceBusDescribedType) obj; int size = ((Symbol) describedType.getDescriptor()).length() << 1; if (describedType.getDescribed() instanceof String) { size += ((String) describedType.getDescribed()).length(); } else if (describedType.getDescribed() instanceof Long) { size += Long.BYTES; } return size; } if (obj instanceof Map) { int size = 8; Map map = (Map) obj; for (Object value : map.keySet()) { size += sizeof(value); } for (Object value : map.values()) { size += sizeof(value); } return size; } if (obj instanceof Iterable) { int size = 8; for (Object innerObject : (Iterable) obj) { size += sizeof(innerObject); } return size; } if (obj.getClass().isArray()) { int size = 8; int length = Array.getLength(obj); for (int i = 0; i < length; i++) { size += sizeof(Array.get(obj, i)); } return size; } throw new IllegalArgumentException(String.format(Locale.US, "Encoding Type: %s is not supported", obj.getClass())); }
}
private static int sizeof(Object obj) { if (obj == null) { return 0; } if (obj instanceof String) { return obj.toString().length() << 1; } if (obj instanceof Symbol) { return ((Symbol) obj).length() << 1; } if (obj instanceof Byte || obj instanceof UnsignedByte) { return Byte.BYTES; } if (obj instanceof Integer || obj instanceof UnsignedInteger) { return Integer.BYTES; } if (obj instanceof Long || obj instanceof UnsignedLong || obj instanceof Date) { return Long.BYTES; } if (obj instanceof Short || obj instanceof UnsignedShort) { return Short.BYTES; } if (obj instanceof Boolean) { return 1; } if (obj instanceof Character) { return 4; } if (obj instanceof Float) { return Float.BYTES; } if (obj instanceof Double) { return Double.BYTES; } if (obj instanceof UUID) { return 16; } if (obj instanceof Decimal32) { return 4; } if (obj instanceof Decimal64) { return 8; } if (obj instanceof Decimal128) { return 16; } if (obj instanceof Binary) { return ((Binary) obj).getLength(); } if (obj instanceof Declare) { return 7; } if (obj instanceof Discharge) { Discharge discharge = (Discharge) obj; return 12 + discharge.getTxnId().getLength(); } if (obj instanceof ServiceBusDescribedType) { ServiceBusDescribedType describedType = (ServiceBusDescribedType) obj; return describedType.size(); } if (obj instanceof Map) { int size = 8; Map map = (Map) obj; for (Object value : map.keySet()) { size += sizeof(value); } for (Object value : map.values()) { size += sizeof(value); } return size; } if (obj instanceof Iterable) { int size = 8; for (Object innerObject : (Iterable) obj) { size += sizeof(innerObject); } return size; } if (obj.getClass().isArray()) { int size = 8; int length = Array.getLength(obj); for (int i = 0; i < length; i++) { size += sizeof(Array.get(obj, i)); } return size; } throw new IllegalArgumentException(String.format(Locale.US, "Encoding Type: %s is not supported", obj.getClass())); }
class ServiceBusMessageSerializer implements MessageSerializer { private static final byte[] EMPTY_BYTE_ARRAY = new byte[0]; private final ClientLogger logger = new ClientLogger(ServiceBusMessageSerializer.class); /** * Gets the serialized size of the AMQP message. */ @Override public int getSize(Message amqpMessage) { if (amqpMessage == null) { return 0; } int payloadSize = getPayloadSize(amqpMessage); final MessageAnnotations messageAnnotations = amqpMessage.getMessageAnnotations(); final ApplicationProperties applicationProperties = amqpMessage.getApplicationProperties(); int annotationsSize = 0; int applicationPropertiesSize = 0; if (messageAnnotations != null) { final Map<Symbol, Object> map = messageAnnotations.getValue(); for (Map.Entry<Symbol, Object> entry : map.entrySet()) { final int size = sizeof(entry.getKey()) + sizeof(entry.getValue()); annotationsSize += size; } } if (applicationProperties != null) { final Map<String, Object> map = applicationProperties.getValue(); for (Map.Entry<String, Object> entry : map.entrySet()) { final int size = sizeof(entry.getKey()) + sizeof(entry.getValue()); applicationPropertiesSize += size; } } return annotationsSize + applicationPropertiesSize + payloadSize; } /** * Creates the AMQP message represented by this {@code object}. Currently, only supports serializing {@link * ServiceBusMessage}. * * @param object Concrete object to deserialize. * * @return A new AMQP message for this {@code object}. * * @throws IllegalArgumentException if {@code object} is not an instance of {@link ServiceBusMessage}. */ @Override public <T> Message serialize(T object) { Objects.requireNonNull(object, "'object' to serialize cannot be null."); if (!(object instanceof ServiceBusMessage)) { throw logger.logExceptionAsError(new IllegalArgumentException( "Cannot serialize object that is not ServiceBusMessage. Clazz: " + object.getClass())); } final ServiceBusMessage brokeredMessage = (ServiceBusMessage) object; AmqpMessageBodyType brokeredBodyType = brokeredMessage.getRawAmqpMessage().getBody().getBodyType(); final Message amqpMessage = Proton.message(); byte[] body; if (brokeredBodyType == AmqpMessageBodyType.DATA || brokeredBodyType == null) { body = brokeredMessage.getBody().toBytes(); amqpMessage.setBody(new Data(new Binary(body))); } else if (brokeredBodyType == AmqpMessageBodyType.SEQUENCE) { List<Object> sequenceList = brokeredMessage.getRawAmqpMessage().getBody().getSequence(); amqpMessage.setBody(new AmqpSequence(sequenceList)); } else if (brokeredBodyType == AmqpMessageBodyType.VALUE) { amqpMessage.setBody(new AmqpValue(brokeredMessage.getRawAmqpMessage().getBody().getValue())); } if (brokeredMessage.getApplicationProperties() != null) { amqpMessage.setApplicationProperties(new ApplicationProperties(convertToDescribedType(brokeredMessage.getApplicationProperties()))); } if (brokeredMessage.getTimeToLive() != null) { amqpMessage.setTtl(brokeredMessage.getTimeToLive().toMillis()); } if (amqpMessage.getProperties() == null) { amqpMessage.setProperties(new Properties()); } amqpMessage.setMessageId(brokeredMessage.getMessageId()); amqpMessage.setContentType(brokeredMessage.getContentType()); amqpMessage.setCorrelationId(brokeredMessage.getCorrelationId()); amqpMessage.setSubject(brokeredMessage.getSubject()); amqpMessage.setReplyTo(brokeredMessage.getReplyTo()); amqpMessage.setReplyToGroupId(brokeredMessage.getReplyToSessionId()); amqpMessage.setGroupId(brokeredMessage.getSessionId()); final AmqpMessageProperties brokeredProperties = brokeredMessage.getRawAmqpMessage().getProperties(); amqpMessage.setContentEncoding(brokeredProperties.getContentEncoding()); if (brokeredProperties.getGroupSequence() != null) { amqpMessage.setGroupSequence(brokeredProperties.getGroupSequence()); } amqpMessage.getProperties().setTo(brokeredMessage.getTo()); amqpMessage.getProperties().setUserId(new Binary(brokeredProperties.getUserId())); if (brokeredProperties.getAbsoluteExpiryTime() != null) { amqpMessage.getProperties().setAbsoluteExpiryTime(Date.from(brokeredProperties.getAbsoluteExpiryTime() .toInstant())); } if (brokeredProperties.getCreationTime() != null) { amqpMessage.getProperties().setCreationTime(Date.from(brokeredProperties.getCreationTime().toInstant())); } amqpMessage.setFooter(new Footer(brokeredMessage.getRawAmqpMessage().getFooter())); AmqpMessageHeader header = brokeredMessage.getRawAmqpMessage().getHeader(); if (header.getDeliveryCount() != null) { amqpMessage.setDeliveryCount(header.getDeliveryCount()); } if (header.getPriority() != null) { amqpMessage.setPriority(header.getPriority()); } if (header.isDurable() != null) { amqpMessage.setDurable(header.isDurable()); } if (header.isFirstAcquirer() != null) { amqpMessage.setFirstAcquirer(header.isFirstAcquirer()); } if (header.getTimeToLive() != null) { amqpMessage.setTtl(header.getTimeToLive().toMillis()); } final Map<Symbol, Object> messageAnnotationsMap = new HashMap<>(); if (brokeredMessage.getScheduledEnqueueTime() != null) { messageAnnotationsMap.put(Symbol.valueOf(SCHEDULED_ENQUEUE_UTC_TIME_NAME.getValue()), Date.from(brokeredMessage.getScheduledEnqueueTime().toInstant())); } final String partitionKey = brokeredMessage.getPartitionKey(); if (partitionKey != null && !partitionKey.isEmpty()) { messageAnnotationsMap.put(Symbol.valueOf(PARTITION_KEY_ANNOTATION_NAME.getValue()), brokeredMessage.getPartitionKey()); } amqpMessage.setMessageAnnotations(new MessageAnnotations(messageAnnotationsMap)); final Map<Symbol, Object> deliveryAnnotationsMap = new HashMap<>(); final Map<String, Object> deliveryAnnotations = brokeredMessage.getRawAmqpMessage() .getDeliveryAnnotations(); for (Map.Entry<String, Object> deliveryEntry : deliveryAnnotations.entrySet()) { deliveryAnnotationsMap.put(Symbol.valueOf(deliveryEntry.getKey()), deliveryEntry.getValue()); } amqpMessage.setDeliveryAnnotations(new DeliveryAnnotations(deliveryAnnotationsMap)); return amqpMessage; } private Map<String, Object> convertToDescribedType(Map<String, Object> propertiesValue) { for (Map.Entry<String, Object> entry : propertiesValue.entrySet()) { Object value = entry.getValue(); if (value instanceof URI) { propertiesValue.put(entry.getKey(), new ServiceBusDescribedType(ServiceBusConstants.URI_SYMBOL, ((URI) value).toString())); } else if (value instanceof OffsetDateTime) { propertiesValue.put(entry.getKey(), new ServiceBusDescribedType(ServiceBusConstants.OFFSETDATETIME_SYMBOL, ((OffsetDateTime) value).atZoneSameInstant(ZoneOffset.UTC).toInstant().toEpochMilli())); } else if (value instanceof Duration) { propertiesValue.put(entry.getKey(), new ServiceBusDescribedType(ServiceBusConstants.DURATION_SYMBOL, ((Duration) value).toNanos())); } } return propertiesValue; } private Map<String, Object> convertToOriginType(Map<String, Object> propertiesValue) { for (Map.Entry<String, Object> entry : propertiesValue.entrySet()) { Object value = entry.getValue(); if (value instanceof DescribedType) { DescribedType describedType = (DescribedType) value; if (ServiceBusConstants.URI_SYMBOL.equals(describedType.getDescriptor())) { propertiesValue.put(entry.getKey(), URI.create((String) describedType.getDescribed())); } else if (ServiceBusConstants.OFFSETDATETIME_SYMBOL.equals(describedType.getDescriptor())) { propertiesValue.put(entry.getKey(), OffsetDateTime.ofInstant(Instant.ofEpochMilli((long) describedType.getDescribed()), ZoneId.systemDefault())); } else if (ServiceBusConstants.DURATION_SYMBOL.equals(describedType.getDescriptor())) { propertiesValue.put(entry.getKey(), Duration.ofNanos((long) describedType.getDescribed())); } } } return propertiesValue; } @SuppressWarnings("unchecked") @Override public <T> T deserialize(Message message, Class<T> clazz) { Objects.requireNonNull(message, "'message' cannot be null."); Objects.requireNonNull(clazz, "'clazz' cannot be null."); if (clazz == ServiceBusReceivedMessage.class) { return (T) deserializeMessage(message); } else { throw logger.logExceptionAsError(new IllegalArgumentException( String.format(Messages.CLASS_NOT_A_SUPPORTED_TYPE, clazz))); } } @SuppressWarnings("unchecked") @Override public <T> List<T> deserializeList(Message message, Class<T> clazz) { if (clazz == ServiceBusReceivedMessage.class) { return (List<T>) deserializeListOfMessages(message); } else if (clazz == OffsetDateTime.class) { return (List<T>) deserializeListOfOffsetDateTime(message); } else if (clazz == OffsetDateTime.class) { return (List<T>) deserializeListOfOffsetDateTime(message); } else if (clazz == Long.class) { return (List<T>) deserializeListOfLong(message); } else { throw logger.logExceptionAsError(new IllegalArgumentException( String.format(Messages.CLASS_NOT_A_SUPPORTED_TYPE, clazz))); } } private List<Long> deserializeListOfLong(Message amqpMessage) { if (amqpMessage.getBody() instanceof AmqpValue) { AmqpValue amqpValue = ((AmqpValue) amqpMessage.getBody()); if (amqpValue.getValue() instanceof Map) { @SuppressWarnings("unchecked") Map<String, Object> responseBody = (Map<String, Object>) amqpValue.getValue(); Object expirationListObj = responseBody.get(ManagementConstants.SEQUENCE_NUMBERS); if (expirationListObj instanceof long[]) { return Arrays.stream((long[]) expirationListObj) .boxed() .collect(Collectors.toList()); } } } return Collections.emptyList(); } private List<OffsetDateTime> deserializeListOfOffsetDateTime(Message amqpMessage) { if (amqpMessage.getBody() instanceof AmqpValue) { AmqpValue amqpValue = ((AmqpValue) amqpMessage.getBody()); if (amqpValue.getValue() instanceof Map) { @SuppressWarnings("unchecked") Map<String, Object> responseBody = (Map<String, Object>) amqpValue.getValue(); Object expirationListObj = responseBody.get(ManagementConstants.EXPIRATIONS); if (expirationListObj instanceof Date[]) { return Arrays.stream((Date[]) expirationListObj) .map(date -> date.toInstant().atOffset(ZoneOffset.UTC)) .collect(Collectors.toList()); } } } return Collections.emptyList(); } @SuppressWarnings("rawtypes") private List<ServiceBusReceivedMessage> deserializeListOfMessages(Message amqpMessage) { final List<ServiceBusReceivedMessage> messageList = new ArrayList<>(); final AmqpResponseCode statusCode = RequestResponseUtils.getStatusCode(amqpMessage); if (statusCode != AmqpResponseCode.OK) { logger.warning("AMQP response did not contain OK status code. Actual: {}", statusCode); return Collections.emptyList(); } final Object responseBodyMap = ((AmqpValue) amqpMessage.getBody()).getValue(); if (responseBodyMap == null) { logger.warning("AMQP response did not contain a body."); return Collections.emptyList(); } else if (!(responseBodyMap instanceof Map)) { logger.warning("AMQP response body is not correct instance. Expected: {}. Actual: {}", Map.class, responseBodyMap.getClass()); return Collections.emptyList(); } final Object messages = ((Map) responseBodyMap).get(ManagementConstants.MESSAGES); if (messages == null) { logger.warning("Response body did not contain key: {}", ManagementConstants.MESSAGES); return Collections.emptyList(); } else if (!(messages instanceof Iterable)) { logger.warning("Response body contents is not the correct type. Expected: {}. Actual: {}", Iterable.class, messages.getClass()); return Collections.emptyList(); } for (Object message : (Iterable) messages) { if (!(message instanceof Map)) { logger.warning("Message inside iterable of message is not correct type. Expected: {}. Actual: {}", Map.class, message.getClass()); continue; } final Message responseMessage = Message.Factory.create(); final Binary messagePayLoad = (Binary) ((Map) message).get(ManagementConstants.MESSAGE); responseMessage.decode(messagePayLoad.getArray(), messagePayLoad.getArrayOffset(), messagePayLoad.getLength()); final ServiceBusReceivedMessage receivedMessage = deserializeMessage(responseMessage); if (((Map) message).containsKey(ManagementConstants.LOCK_TOKEN_KEY)) { receivedMessage.setLockToken((UUID) ((Map) message).get(ManagementConstants.LOCK_TOKEN_KEY)); } messageList.add(receivedMessage); } return messageList; } private ServiceBusReceivedMessage deserializeMessage(Message amqpMessage) { final Section body = amqpMessage.getBody(); AmqpMessageBody amqpMessageBody; if (body != null) { if (body instanceof Data) { final Binary messageData = ((Data) body).getValue(); amqpMessageBody = AmqpMessageBody.fromData(messageData.getArray()); } else if (body instanceof AmqpValue) { amqpMessageBody = AmqpMessageBody.fromValue(((AmqpValue) body).getValue()); } else if (body instanceof AmqpSequence) { @SuppressWarnings("unchecked") List<Object> messageData = ((AmqpSequence) body).getValue(); amqpMessageBody = AmqpMessageBody.fromSequence(messageData); } else { logger.warning(String.format(Messages.MESSAGE_NOT_OF_TYPE, body.getType())); amqpMessageBody = AmqpMessageBody.fromData(EMPTY_BYTE_ARRAY); } } else { logger.warning(String.format(Messages.MESSAGE_NOT_OF_TYPE, "null")); amqpMessageBody = AmqpMessageBody.fromData(EMPTY_BYTE_ARRAY); } final ServiceBusReceivedMessage brokeredMessage = new ServiceBusReceivedMessage(amqpMessageBody); AmqpAnnotatedMessage brokeredAmqpAnnotatedMessage = brokeredMessage.getRawAmqpMessage(); ApplicationProperties applicationProperties = amqpMessage.getApplicationProperties(); if (applicationProperties != null) { final Map<String, Object> propertiesValue = applicationProperties.getValue(); brokeredAmqpAnnotatedMessage.getApplicationProperties().putAll(convertToOriginType(propertiesValue)); } final AmqpMessageHeader brokeredHeader = brokeredAmqpAnnotatedMessage.getHeader(); brokeredHeader.setTimeToLive(Duration.ofMillis(amqpMessage.getTtl())); brokeredHeader.setDeliveryCount(amqpMessage.getDeliveryCount()); brokeredHeader.setDurable(amqpMessage.getHeader().getDurable()); brokeredHeader.setFirstAcquirer(amqpMessage.getHeader().getFirstAcquirer()); brokeredHeader.setPriority(amqpMessage.getPriority()); final Footer footer = amqpMessage.getFooter(); if (footer != null && footer.getValue() != null) { @SuppressWarnings("unchecked") final Map<Symbol, Object> footerValue = footer.getValue(); setValues(footerValue, brokeredAmqpAnnotatedMessage.getFooter()); } final AmqpMessageProperties brokeredProperties = brokeredAmqpAnnotatedMessage.getProperties(); brokeredProperties.setReplyToGroupId(amqpMessage.getReplyToGroupId()); final String replyTo = amqpMessage.getReplyTo(); if (replyTo != null) { brokeredProperties.setReplyTo(new AmqpAddress(amqpMessage.getReplyTo())); } final Object messageId = amqpMessage.getMessageId(); if (messageId != null) { brokeredProperties.setMessageId(new AmqpMessageId(messageId.toString())); } brokeredProperties.setContentType(amqpMessage.getContentType()); final Object correlationId = amqpMessage.getCorrelationId(); if (correlationId != null) { brokeredProperties.setCorrelationId(new AmqpMessageId(correlationId.toString())); } final Properties amqpProperties = amqpMessage.getProperties(); if (amqpProperties != null) { final String to = amqpProperties.getTo(); if (to != null) { brokeredProperties.setTo(new AmqpAddress(amqpProperties.getTo())); } if (amqpProperties.getAbsoluteExpiryTime() != null) { brokeredProperties.setAbsoluteExpiryTime(amqpProperties.getAbsoluteExpiryTime().toInstant() .atOffset(ZoneOffset.UTC)); } if (amqpProperties.getCreationTime() != null) { brokeredProperties.setCreationTime(amqpProperties.getCreationTime().toInstant() .atOffset(ZoneOffset.UTC)); } } brokeredProperties.setSubject(amqpMessage.getSubject()); brokeredProperties.setGroupId(amqpMessage.getGroupId()); brokeredProperties.setContentEncoding(amqpMessage.getContentEncoding()); brokeredProperties.setGroupSequence(amqpMessage.getGroupSequence()); brokeredProperties.setUserId(amqpMessage.getUserId()); final DeliveryAnnotations deliveryAnnotations = amqpMessage.getDeliveryAnnotations(); if (deliveryAnnotations != null) { setValues(deliveryAnnotations.getValue(), brokeredAmqpAnnotatedMessage.getDeliveryAnnotations()); } final MessageAnnotations messageAnnotations = amqpMessage.getMessageAnnotations(); if (messageAnnotations != null) { setValues(messageAnnotations.getValue(), brokeredAmqpAnnotatedMessage.getMessageAnnotations()); } if (amqpMessage instanceof MessageWithLockToken) { brokeredMessage.setLockToken(((MessageWithLockToken) amqpMessage).getLockToken()); } return brokeredMessage; } private static int getPayloadSize(Message msg) { if (msg == null || msg.getBody() == null) { return 0; } final Section bodySection = msg.getBody(); if (bodySection instanceof AmqpValue) { return sizeof(((AmqpValue) bodySection).getValue()); } else if (bodySection instanceof AmqpSequence) { return sizeof(((AmqpSequence) bodySection).getValue()); } else if (bodySection instanceof Data) { final Data payloadSection = (Data) bodySection; final Binary payloadBytes = payloadSection.getValue(); return sizeof(payloadBytes); } else { return 0; } } private void setValues(Map<Symbol, Object> sourceMap, Map<String, Object> targetMap) { if (sourceMap != null) { for (Map.Entry<Symbol, Object> entry : sourceMap.entrySet()) { targetMap.put(entry.getKey().toString(), entry.getValue()); } } } @SuppressWarnings("rawtypes") }
class ServiceBusMessageSerializer implements MessageSerializer { private static final byte[] EMPTY_BYTE_ARRAY = new byte[0]; private final ClientLogger logger = new ClientLogger(ServiceBusMessageSerializer.class); /** * Gets the serialized size of the AMQP message. */ @Override public int getSize(Message amqpMessage) { if (amqpMessage == null) { return 0; } int payloadSize = getPayloadSize(amqpMessage); final MessageAnnotations messageAnnotations = amqpMessage.getMessageAnnotations(); final ApplicationProperties applicationProperties = amqpMessage.getApplicationProperties(); int annotationsSize = 0; int applicationPropertiesSize = 0; if (messageAnnotations != null) { final Map<Symbol, Object> map = messageAnnotations.getValue(); for (Map.Entry<Symbol, Object> entry : map.entrySet()) { final int size = sizeof(entry.getKey()) + sizeof(entry.getValue()); annotationsSize += size; } } if (applicationProperties != null) { final Map<String, Object> map = applicationProperties.getValue(); for (Map.Entry<String, Object> entry : map.entrySet()) { final int size = sizeof(entry.getKey()) + sizeof(entry.getValue()); applicationPropertiesSize += size; } } return annotationsSize + applicationPropertiesSize + payloadSize; } /** * Creates the AMQP message represented by this {@code object}. Currently, only supports serializing {@link * ServiceBusMessage}. * * @param object Concrete object to deserialize. * * @return A new AMQP message for this {@code object}. * * @throws IllegalArgumentException if {@code object} is not an instance of {@link ServiceBusMessage}. */ @Override public <T> Message serialize(T object) { Objects.requireNonNull(object, "'object' to serialize cannot be null."); if (!(object instanceof ServiceBusMessage)) { throw logger.logExceptionAsError(new IllegalArgumentException( "Cannot serialize object that is not ServiceBusMessage. Clazz: " + object.getClass())); } final ServiceBusMessage brokeredMessage = (ServiceBusMessage) object; AmqpMessageBodyType brokeredBodyType = brokeredMessage.getRawAmqpMessage().getBody().getBodyType(); final Message amqpMessage = Proton.message(); byte[] body; if (brokeredBodyType == AmqpMessageBodyType.DATA || brokeredBodyType == null) { body = brokeredMessage.getBody().toBytes(); amqpMessage.setBody(new Data(new Binary(body))); } else if (brokeredBodyType == AmqpMessageBodyType.SEQUENCE) { List<Object> sequenceList = brokeredMessage.getRawAmqpMessage().getBody().getSequence(); amqpMessage.setBody(new AmqpSequence(sequenceList)); } else if (brokeredBodyType == AmqpMessageBodyType.VALUE) { amqpMessage.setBody(new AmqpValue(brokeredMessage.getRawAmqpMessage().getBody().getValue())); } if (brokeredMessage.getApplicationProperties() != null) { Map<String, Object> describedTypeMap = convertToDescribedType(brokeredMessage.getApplicationProperties()); amqpMessage.setApplicationProperties(new ApplicationProperties(describedTypeMap)); } if (brokeredMessage.getTimeToLive() != null) { amqpMessage.setTtl(brokeredMessage.getTimeToLive().toMillis()); } if (amqpMessage.getProperties() == null) { amqpMessage.setProperties(new Properties()); } amqpMessage.setMessageId(brokeredMessage.getMessageId()); amqpMessage.setContentType(brokeredMessage.getContentType()); amqpMessage.setCorrelationId(brokeredMessage.getCorrelationId()); amqpMessage.setSubject(brokeredMessage.getSubject()); amqpMessage.setReplyTo(brokeredMessage.getReplyTo()); amqpMessage.setReplyToGroupId(brokeredMessage.getReplyToSessionId()); amqpMessage.setGroupId(brokeredMessage.getSessionId()); final AmqpMessageProperties brokeredProperties = brokeredMessage.getRawAmqpMessage().getProperties(); amqpMessage.setContentEncoding(brokeredProperties.getContentEncoding()); if (brokeredProperties.getGroupSequence() != null) { amqpMessage.setGroupSequence(brokeredProperties.getGroupSequence()); } amqpMessage.getProperties().setTo(brokeredMessage.getTo()); amqpMessage.getProperties().setUserId(new Binary(brokeredProperties.getUserId())); if (brokeredProperties.getAbsoluteExpiryTime() != null) { amqpMessage.getProperties().setAbsoluteExpiryTime(Date.from(brokeredProperties.getAbsoluteExpiryTime() .toInstant())); } if (brokeredProperties.getCreationTime() != null) { amqpMessage.getProperties().setCreationTime(Date.from(brokeredProperties.getCreationTime().toInstant())); } amqpMessage.setFooter(new Footer(brokeredMessage.getRawAmqpMessage().getFooter())); AmqpMessageHeader header = brokeredMessage.getRawAmqpMessage().getHeader(); if (header.getDeliveryCount() != null) { amqpMessage.setDeliveryCount(header.getDeliveryCount()); } if (header.getPriority() != null) { amqpMessage.setPriority(header.getPriority()); } if (header.isDurable() != null) { amqpMessage.setDurable(header.isDurable()); } if (header.isFirstAcquirer() != null) { amqpMessage.setFirstAcquirer(header.isFirstAcquirer()); } if (header.getTimeToLive() != null) { amqpMessage.setTtl(header.getTimeToLive().toMillis()); } final Map<Symbol, Object> messageAnnotationsMap = new HashMap<>(); if (brokeredMessage.getScheduledEnqueueTime() != null) { messageAnnotationsMap.put(Symbol.valueOf(SCHEDULED_ENQUEUE_UTC_TIME_NAME.getValue()), Date.from(brokeredMessage.getScheduledEnqueueTime().toInstant())); } final String partitionKey = brokeredMessage.getPartitionKey(); if (partitionKey != null && !partitionKey.isEmpty()) { messageAnnotationsMap.put(Symbol.valueOf(PARTITION_KEY_ANNOTATION_NAME.getValue()), brokeredMessage.getPartitionKey()); } amqpMessage.setMessageAnnotations(new MessageAnnotations(messageAnnotationsMap)); final Map<Symbol, Object> deliveryAnnotationsMap = new HashMap<>(); final Map<String, Object> deliveryAnnotations = brokeredMessage.getRawAmqpMessage() .getDeliveryAnnotations(); for (Map.Entry<String, Object> deliveryEntry : deliveryAnnotations.entrySet()) { deliveryAnnotationsMap.put(Symbol.valueOf(deliveryEntry.getKey()), deliveryEntry.getValue()); } amqpMessage.setDeliveryAnnotations(new DeliveryAnnotations(deliveryAnnotationsMap)); return amqpMessage; } /** * Convert specific type to described type for sending on the wire. * @param propertiesValue application properties set by user which may contain specific type. * @return Map only contains primitive type and described type. */ private static Map<String, Object> convertToDescribedType(Map<String, Object> propertiesValue) { for (Map.Entry<String, Object> entry : propertiesValue.entrySet()) { Object value = entry.getValue(); if (value instanceof URI) { entry.setValue(new UriDescribedType((URI) value)); } else if (value instanceof OffsetDateTime) { entry.setValue(new OffsetDateTimeDescribedType((OffsetDateTime) value)); } else if (value instanceof Duration) { entry.setValue(new DurationDescribedType((Duration) value)); } } return propertiesValue; } /** * Convert described type to origin type. * @param propertiesValue application properties from amqp message may contain described type. * @return Map without described type. */ private static Map<String, Object> convertToOriginType(Map<String, Object> propertiesValue) { for (Map.Entry<String, Object> entry : propertiesValue.entrySet()) { Object value = entry.getValue(); if (value instanceof DescribedType) { entry.setValue(MessageUtils.describedToOrigin((DescribedType) value)); } } return propertiesValue; } @SuppressWarnings("unchecked") @Override public <T> T deserialize(Message message, Class<T> clazz) { Objects.requireNonNull(message, "'message' cannot be null."); Objects.requireNonNull(clazz, "'clazz' cannot be null."); if (clazz == ServiceBusReceivedMessage.class) { return (T) deserializeMessage(message); } else { throw logger.logExceptionAsError(new IllegalArgumentException( String.format(Messages.CLASS_NOT_A_SUPPORTED_TYPE, clazz))); } } @SuppressWarnings("unchecked") @Override public <T> List<T> deserializeList(Message message, Class<T> clazz) { if (clazz == ServiceBusReceivedMessage.class) { return (List<T>) deserializeListOfMessages(message); } else if (clazz == OffsetDateTime.class) { return (List<T>) deserializeListOfOffsetDateTime(message); } else if (clazz == OffsetDateTime.class) { return (List<T>) deserializeListOfOffsetDateTime(message); } else if (clazz == Long.class) { return (List<T>) deserializeListOfLong(message); } else { throw logger.logExceptionAsError(new IllegalArgumentException( String.format(Messages.CLASS_NOT_A_SUPPORTED_TYPE, clazz))); } } private List<Long> deserializeListOfLong(Message amqpMessage) { if (amqpMessage.getBody() instanceof AmqpValue) { AmqpValue amqpValue = ((AmqpValue) amqpMessage.getBody()); if (amqpValue.getValue() instanceof Map) { @SuppressWarnings("unchecked") Map<String, Object> responseBody = (Map<String, Object>) amqpValue.getValue(); Object expirationListObj = responseBody.get(ManagementConstants.SEQUENCE_NUMBERS); if (expirationListObj instanceof long[]) { return Arrays.stream((long[]) expirationListObj) .boxed() .collect(Collectors.toList()); } } } return Collections.emptyList(); } private List<OffsetDateTime> deserializeListOfOffsetDateTime(Message amqpMessage) { if (amqpMessage.getBody() instanceof AmqpValue) { AmqpValue amqpValue = ((AmqpValue) amqpMessage.getBody()); if (amqpValue.getValue() instanceof Map) { @SuppressWarnings("unchecked") Map<String, Object> responseBody = (Map<String, Object>) amqpValue.getValue(); Object expirationListObj = responseBody.get(ManagementConstants.EXPIRATIONS); if (expirationListObj instanceof Date[]) { return Arrays.stream((Date[]) expirationListObj) .map(date -> date.toInstant().atOffset(ZoneOffset.UTC)) .collect(Collectors.toList()); } } } return Collections.emptyList(); } @SuppressWarnings("rawtypes") private List<ServiceBusReceivedMessage> deserializeListOfMessages(Message amqpMessage) { final List<ServiceBusReceivedMessage> messageList = new ArrayList<>(); final AmqpResponseCode statusCode = RequestResponseUtils.getStatusCode(amqpMessage); if (statusCode != AmqpResponseCode.OK) { logger.warning("AMQP response did not contain OK status code. Actual: {}", statusCode); return Collections.emptyList(); } final Object responseBodyMap = ((AmqpValue) amqpMessage.getBody()).getValue(); if (responseBodyMap == null) { logger.warning("AMQP response did not contain a body."); return Collections.emptyList(); } else if (!(responseBodyMap instanceof Map)) { logger.warning("AMQP response body is not correct instance. Expected: {}. Actual: {}", Map.class, responseBodyMap.getClass()); return Collections.emptyList(); } final Object messages = ((Map) responseBodyMap).get(ManagementConstants.MESSAGES); if (messages == null) { logger.warning("Response body did not contain key: {}", ManagementConstants.MESSAGES); return Collections.emptyList(); } else if (!(messages instanceof Iterable)) { logger.warning("Response body contents is not the correct type. Expected: {}. Actual: {}", Iterable.class, messages.getClass()); return Collections.emptyList(); } for (Object message : (Iterable) messages) { if (!(message instanceof Map)) { logger.warning("Message inside iterable of message is not correct type. Expected: {}. Actual: {}", Map.class, message.getClass()); continue; } final Message responseMessage = Message.Factory.create(); final Binary messagePayLoad = (Binary) ((Map) message).get(ManagementConstants.MESSAGE); responseMessage.decode(messagePayLoad.getArray(), messagePayLoad.getArrayOffset(), messagePayLoad.getLength()); final ServiceBusReceivedMessage receivedMessage = deserializeMessage(responseMessage); if (((Map) message).containsKey(ManagementConstants.LOCK_TOKEN_KEY)) { receivedMessage.setLockToken((UUID) ((Map) message).get(ManagementConstants.LOCK_TOKEN_KEY)); } messageList.add(receivedMessage); } return messageList; } private ServiceBusReceivedMessage deserializeMessage(Message amqpMessage) { final Section body = amqpMessage.getBody(); AmqpMessageBody amqpMessageBody; if (body != null) { if (body instanceof Data) { final Binary messageData = ((Data) body).getValue(); amqpMessageBody = AmqpMessageBody.fromData(messageData.getArray()); } else if (body instanceof AmqpValue) { amqpMessageBody = AmqpMessageBody.fromValue(((AmqpValue) body).getValue()); } else if (body instanceof AmqpSequence) { @SuppressWarnings("unchecked") List<Object> messageData = ((AmqpSequence) body).getValue(); amqpMessageBody = AmqpMessageBody.fromSequence(messageData); } else { logger.warning(String.format(Messages.MESSAGE_NOT_OF_TYPE, body.getType())); amqpMessageBody = AmqpMessageBody.fromData(EMPTY_BYTE_ARRAY); } } else { logger.warning(String.format(Messages.MESSAGE_NOT_OF_TYPE, "null")); amqpMessageBody = AmqpMessageBody.fromData(EMPTY_BYTE_ARRAY); } final ServiceBusReceivedMessage brokeredMessage = new ServiceBusReceivedMessage(amqpMessageBody); AmqpAnnotatedMessage brokeredAmqpAnnotatedMessage = brokeredMessage.getRawAmqpMessage(); ApplicationProperties applicationProperties = amqpMessage.getApplicationProperties(); if (applicationProperties != null) { final Map<String, Object> propertiesValue = convertToOriginType(applicationProperties.getValue()); brokeredAmqpAnnotatedMessage.getApplicationProperties().putAll(propertiesValue); } final AmqpMessageHeader brokeredHeader = brokeredAmqpAnnotatedMessage.getHeader(); brokeredHeader.setTimeToLive(Duration.ofMillis(amqpMessage.getTtl())); brokeredHeader.setDeliveryCount(amqpMessage.getDeliveryCount()); brokeredHeader.setDurable(amqpMessage.getHeader().getDurable()); brokeredHeader.setFirstAcquirer(amqpMessage.getHeader().getFirstAcquirer()); brokeredHeader.setPriority(amqpMessage.getPriority()); final Footer footer = amqpMessage.getFooter(); if (footer != null && footer.getValue() != null) { @SuppressWarnings("unchecked") final Map<Symbol, Object> footerValue = footer.getValue(); setValues(footerValue, brokeredAmqpAnnotatedMessage.getFooter()); } final AmqpMessageProperties brokeredProperties = brokeredAmqpAnnotatedMessage.getProperties(); brokeredProperties.setReplyToGroupId(amqpMessage.getReplyToGroupId()); final String replyTo = amqpMessage.getReplyTo(); if (replyTo != null) { brokeredProperties.setReplyTo(new AmqpAddress(amqpMessage.getReplyTo())); } final Object messageId = amqpMessage.getMessageId(); if (messageId != null) { brokeredProperties.setMessageId(new AmqpMessageId(messageId.toString())); } brokeredProperties.setContentType(amqpMessage.getContentType()); final Object correlationId = amqpMessage.getCorrelationId(); if (correlationId != null) { brokeredProperties.setCorrelationId(new AmqpMessageId(correlationId.toString())); } final Properties amqpProperties = amqpMessage.getProperties(); if (amqpProperties != null) { final String to = amqpProperties.getTo(); if (to != null) { brokeredProperties.setTo(new AmqpAddress(amqpProperties.getTo())); } if (amqpProperties.getAbsoluteExpiryTime() != null) { brokeredProperties.setAbsoluteExpiryTime(amqpProperties.getAbsoluteExpiryTime().toInstant() .atOffset(ZoneOffset.UTC)); } if (amqpProperties.getCreationTime() != null) { brokeredProperties.setCreationTime(amqpProperties.getCreationTime().toInstant() .atOffset(ZoneOffset.UTC)); } } brokeredProperties.setSubject(amqpMessage.getSubject()); brokeredProperties.setGroupId(amqpMessage.getGroupId()); brokeredProperties.setContentEncoding(amqpMessage.getContentEncoding()); brokeredProperties.setGroupSequence(amqpMessage.getGroupSequence()); brokeredProperties.setUserId(amqpMessage.getUserId()); final DeliveryAnnotations deliveryAnnotations = amqpMessage.getDeliveryAnnotations(); if (deliveryAnnotations != null) { setValues(deliveryAnnotations.getValue(), brokeredAmqpAnnotatedMessage.getDeliveryAnnotations()); } final MessageAnnotations messageAnnotations = amqpMessage.getMessageAnnotations(); if (messageAnnotations != null) { setValues(messageAnnotations.getValue(), brokeredAmqpAnnotatedMessage.getMessageAnnotations()); } if (amqpMessage instanceof MessageWithLockToken) { brokeredMessage.setLockToken(((MessageWithLockToken) amqpMessage).getLockToken()); } return brokeredMessage; } private static int getPayloadSize(Message msg) { if (msg == null || msg.getBody() == null) { return 0; } final Section bodySection = msg.getBody(); if (bodySection instanceof AmqpValue) { return sizeof(((AmqpValue) bodySection).getValue()); } else if (bodySection instanceof AmqpSequence) { return sizeof(((AmqpSequence) bodySection).getValue()); } else if (bodySection instanceof Data) { final Data payloadSection = (Data) bodySection; final Binary payloadBytes = payloadSection.getValue(); return sizeof(payloadBytes); } else { return 0; } } private void setValues(Map<Symbol, Object> sourceMap, Map<String, Object> targetMap) { if (sourceMap != null) { for (Map.Entry<Symbol, Object> entry : sourceMap.entrySet()) { targetMap.put(entry.getKey().toString(), entry.getValue()); } } } @SuppressWarnings("rawtypes") }
> @ZejiaJiang My guess is you are correct on the size, but you need to know it for sure. > > My curiosity is why String need `length * 2`. I assume AMQP uses utf-8, not sure. Yeah, I've asked Connie about the size method, she told me that the method was copied from the legacy library. I'll check the legacy library and find some docs to confirm the approach of calculating size.
private static int sizeof(Object obj) { if (obj == null) { return 0; } if (obj instanceof String) { return obj.toString().length() << 1; } if (obj instanceof Symbol) { return ((Symbol) obj).length() << 1; } if (obj instanceof Byte || obj instanceof UnsignedByte) { return Byte.BYTES; } if (obj instanceof Integer || obj instanceof UnsignedInteger) { return Integer.BYTES; } if (obj instanceof Long || obj instanceof UnsignedLong || obj instanceof Date) { return Long.BYTES; } if (obj instanceof Short || obj instanceof UnsignedShort) { return Short.BYTES; } if (obj instanceof Boolean) { return 1; } if (obj instanceof Character) { return 4; } if (obj instanceof Float) { return Float.BYTES; } if (obj instanceof Double) { return Double.BYTES; } if (obj instanceof UUID) { return 16; } if (obj instanceof Decimal32) { return 4; } if (obj instanceof Decimal64) { return 8; } if (obj instanceof Decimal128) { return 16; } if (obj instanceof Binary) { return ((Binary) obj).getLength(); } if (obj instanceof Declare) { return 7; } if (obj instanceof Discharge) { Discharge discharge = (Discharge) obj; return 12 + discharge.getTxnId().getLength(); } if (obj instanceof ServiceBusDescribedType) { ServiceBusDescribedType describedType = (ServiceBusDescribedType) obj; int size = ((Symbol) describedType.getDescriptor()).length() << 1; if (describedType.getDescribed() instanceof String) { size += ((String) describedType.getDescribed()).length(); } else if (describedType.getDescribed() instanceof Long) { size += Long.BYTES; } return size; } if (obj instanceof Map) { int size = 8; Map map = (Map) obj; for (Object value : map.keySet()) { size += sizeof(value); } for (Object value : map.values()) { size += sizeof(value); } return size; } if (obj instanceof Iterable) { int size = 8; for (Object innerObject : (Iterable) obj) { size += sizeof(innerObject); } return size; } if (obj.getClass().isArray()) { int size = 8; int length = Array.getLength(obj); for (int i = 0; i < length; i++) { size += sizeof(Array.get(obj, i)); } return size; } throw new IllegalArgumentException(String.format(Locale.US, "Encoding Type: %s is not supported", obj.getClass())); }
}
private static int sizeof(Object obj) { if (obj == null) { return 0; } if (obj instanceof String) { return obj.toString().length() << 1; } if (obj instanceof Symbol) { return ((Symbol) obj).length() << 1; } if (obj instanceof Byte || obj instanceof UnsignedByte) { return Byte.BYTES; } if (obj instanceof Integer || obj instanceof UnsignedInteger) { return Integer.BYTES; } if (obj instanceof Long || obj instanceof UnsignedLong || obj instanceof Date) { return Long.BYTES; } if (obj instanceof Short || obj instanceof UnsignedShort) { return Short.BYTES; } if (obj instanceof Boolean) { return 1; } if (obj instanceof Character) { return 4; } if (obj instanceof Float) { return Float.BYTES; } if (obj instanceof Double) { return Double.BYTES; } if (obj instanceof UUID) { return 16; } if (obj instanceof Decimal32) { return 4; } if (obj instanceof Decimal64) { return 8; } if (obj instanceof Decimal128) { return 16; } if (obj instanceof Binary) { return ((Binary) obj).getLength(); } if (obj instanceof Declare) { return 7; } if (obj instanceof Discharge) { Discharge discharge = (Discharge) obj; return 12 + discharge.getTxnId().getLength(); } if (obj instanceof ServiceBusDescribedType) { ServiceBusDescribedType describedType = (ServiceBusDescribedType) obj; return describedType.size(); } if (obj instanceof Map) { int size = 8; Map map = (Map) obj; for (Object value : map.keySet()) { size += sizeof(value); } for (Object value : map.values()) { size += sizeof(value); } return size; } if (obj instanceof Iterable) { int size = 8; for (Object innerObject : (Iterable) obj) { size += sizeof(innerObject); } return size; } if (obj.getClass().isArray()) { int size = 8; int length = Array.getLength(obj); for (int i = 0; i < length; i++) { size += sizeof(Array.get(obj, i)); } return size; } throw new IllegalArgumentException(String.format(Locale.US, "Encoding Type: %s is not supported", obj.getClass())); }
class ServiceBusMessageSerializer implements MessageSerializer { private static final byte[] EMPTY_BYTE_ARRAY = new byte[0]; private final ClientLogger logger = new ClientLogger(ServiceBusMessageSerializer.class); /** * Gets the serialized size of the AMQP message. */ @Override public int getSize(Message amqpMessage) { if (amqpMessage == null) { return 0; } int payloadSize = getPayloadSize(amqpMessage); final MessageAnnotations messageAnnotations = amqpMessage.getMessageAnnotations(); final ApplicationProperties applicationProperties = amqpMessage.getApplicationProperties(); int annotationsSize = 0; int applicationPropertiesSize = 0; if (messageAnnotations != null) { final Map<Symbol, Object> map = messageAnnotations.getValue(); for (Map.Entry<Symbol, Object> entry : map.entrySet()) { final int size = sizeof(entry.getKey()) + sizeof(entry.getValue()); annotationsSize += size; } } if (applicationProperties != null) { final Map<String, Object> map = applicationProperties.getValue(); for (Map.Entry<String, Object> entry : map.entrySet()) { final int size = sizeof(entry.getKey()) + sizeof(entry.getValue()); applicationPropertiesSize += size; } } return annotationsSize + applicationPropertiesSize + payloadSize; } /** * Creates the AMQP message represented by this {@code object}. Currently, only supports serializing {@link * ServiceBusMessage}. * * @param object Concrete object to deserialize. * * @return A new AMQP message for this {@code object}. * * @throws IllegalArgumentException if {@code object} is not an instance of {@link ServiceBusMessage}. */ @Override public <T> Message serialize(T object) { Objects.requireNonNull(object, "'object' to serialize cannot be null."); if (!(object instanceof ServiceBusMessage)) { throw logger.logExceptionAsError(new IllegalArgumentException( "Cannot serialize object that is not ServiceBusMessage. Clazz: " + object.getClass())); } final ServiceBusMessage brokeredMessage = (ServiceBusMessage) object; AmqpMessageBodyType brokeredBodyType = brokeredMessage.getRawAmqpMessage().getBody().getBodyType(); final Message amqpMessage = Proton.message(); byte[] body; if (brokeredBodyType == AmqpMessageBodyType.DATA || brokeredBodyType == null) { body = brokeredMessage.getBody().toBytes(); amqpMessage.setBody(new Data(new Binary(body))); } else if (brokeredBodyType == AmqpMessageBodyType.SEQUENCE) { List<Object> sequenceList = brokeredMessage.getRawAmqpMessage().getBody().getSequence(); amqpMessage.setBody(new AmqpSequence(sequenceList)); } else if (brokeredBodyType == AmqpMessageBodyType.VALUE) { amqpMessage.setBody(new AmqpValue(brokeredMessage.getRawAmqpMessage().getBody().getValue())); } if (brokeredMessage.getApplicationProperties() != null) { amqpMessage.setApplicationProperties(new ApplicationProperties(convertToDescribedType(brokeredMessage.getApplicationProperties()))); } if (brokeredMessage.getTimeToLive() != null) { amqpMessage.setTtl(brokeredMessage.getTimeToLive().toMillis()); } if (amqpMessage.getProperties() == null) { amqpMessage.setProperties(new Properties()); } amqpMessage.setMessageId(brokeredMessage.getMessageId()); amqpMessage.setContentType(brokeredMessage.getContentType()); amqpMessage.setCorrelationId(brokeredMessage.getCorrelationId()); amqpMessage.setSubject(brokeredMessage.getSubject()); amqpMessage.setReplyTo(brokeredMessage.getReplyTo()); amqpMessage.setReplyToGroupId(brokeredMessage.getReplyToSessionId()); amqpMessage.setGroupId(brokeredMessage.getSessionId()); final AmqpMessageProperties brokeredProperties = brokeredMessage.getRawAmqpMessage().getProperties(); amqpMessage.setContentEncoding(brokeredProperties.getContentEncoding()); if (brokeredProperties.getGroupSequence() != null) { amqpMessage.setGroupSequence(brokeredProperties.getGroupSequence()); } amqpMessage.getProperties().setTo(brokeredMessage.getTo()); amqpMessage.getProperties().setUserId(new Binary(brokeredProperties.getUserId())); if (brokeredProperties.getAbsoluteExpiryTime() != null) { amqpMessage.getProperties().setAbsoluteExpiryTime(Date.from(brokeredProperties.getAbsoluteExpiryTime() .toInstant())); } if (brokeredProperties.getCreationTime() != null) { amqpMessage.getProperties().setCreationTime(Date.from(brokeredProperties.getCreationTime().toInstant())); } amqpMessage.setFooter(new Footer(brokeredMessage.getRawAmqpMessage().getFooter())); AmqpMessageHeader header = brokeredMessage.getRawAmqpMessage().getHeader(); if (header.getDeliveryCount() != null) { amqpMessage.setDeliveryCount(header.getDeliveryCount()); } if (header.getPriority() != null) { amqpMessage.setPriority(header.getPriority()); } if (header.isDurable() != null) { amqpMessage.setDurable(header.isDurable()); } if (header.isFirstAcquirer() != null) { amqpMessage.setFirstAcquirer(header.isFirstAcquirer()); } if (header.getTimeToLive() != null) { amqpMessage.setTtl(header.getTimeToLive().toMillis()); } final Map<Symbol, Object> messageAnnotationsMap = new HashMap<>(); if (brokeredMessage.getScheduledEnqueueTime() != null) { messageAnnotationsMap.put(Symbol.valueOf(SCHEDULED_ENQUEUE_UTC_TIME_NAME.getValue()), Date.from(brokeredMessage.getScheduledEnqueueTime().toInstant())); } final String partitionKey = brokeredMessage.getPartitionKey(); if (partitionKey != null && !partitionKey.isEmpty()) { messageAnnotationsMap.put(Symbol.valueOf(PARTITION_KEY_ANNOTATION_NAME.getValue()), brokeredMessage.getPartitionKey()); } amqpMessage.setMessageAnnotations(new MessageAnnotations(messageAnnotationsMap)); final Map<Symbol, Object> deliveryAnnotationsMap = new HashMap<>(); final Map<String, Object> deliveryAnnotations = brokeredMessage.getRawAmqpMessage() .getDeliveryAnnotations(); for (Map.Entry<String, Object> deliveryEntry : deliveryAnnotations.entrySet()) { deliveryAnnotationsMap.put(Symbol.valueOf(deliveryEntry.getKey()), deliveryEntry.getValue()); } amqpMessage.setDeliveryAnnotations(new DeliveryAnnotations(deliveryAnnotationsMap)); return amqpMessage; } private Map<String, Object> convertToDescribedType(Map<String, Object> propertiesValue) { for (Map.Entry<String, Object> entry : propertiesValue.entrySet()) { Object value = entry.getValue(); if (value instanceof URI) { propertiesValue.put(entry.getKey(), new ServiceBusDescribedType(ServiceBusConstants.URI_SYMBOL, ((URI) value).toString())); } else if (value instanceof OffsetDateTime) { propertiesValue.put(entry.getKey(), new ServiceBusDescribedType(ServiceBusConstants.OFFSETDATETIME_SYMBOL, ((OffsetDateTime) value).atZoneSameInstant(ZoneOffset.UTC).toInstant().toEpochMilli())); } else if (value instanceof Duration) { propertiesValue.put(entry.getKey(), new ServiceBusDescribedType(ServiceBusConstants.DURATION_SYMBOL, ((Duration) value).toNanos())); } } return propertiesValue; } private Map<String, Object> convertToOriginType(Map<String, Object> propertiesValue) { for (Map.Entry<String, Object> entry : propertiesValue.entrySet()) { Object value = entry.getValue(); if (value instanceof DescribedType) { DescribedType describedType = (DescribedType) value; if (ServiceBusConstants.URI_SYMBOL.equals(describedType.getDescriptor())) { propertiesValue.put(entry.getKey(), URI.create((String) describedType.getDescribed())); } else if (ServiceBusConstants.OFFSETDATETIME_SYMBOL.equals(describedType.getDescriptor())) { propertiesValue.put(entry.getKey(), OffsetDateTime.ofInstant(Instant.ofEpochMilli((long) describedType.getDescribed()), ZoneId.systemDefault())); } else if (ServiceBusConstants.DURATION_SYMBOL.equals(describedType.getDescriptor())) { propertiesValue.put(entry.getKey(), Duration.ofNanos((long) describedType.getDescribed())); } } } return propertiesValue; } @SuppressWarnings("unchecked") @Override public <T> T deserialize(Message message, Class<T> clazz) { Objects.requireNonNull(message, "'message' cannot be null."); Objects.requireNonNull(clazz, "'clazz' cannot be null."); if (clazz == ServiceBusReceivedMessage.class) { return (T) deserializeMessage(message); } else { throw logger.logExceptionAsError(new IllegalArgumentException( String.format(Messages.CLASS_NOT_A_SUPPORTED_TYPE, clazz))); } } @SuppressWarnings("unchecked") @Override public <T> List<T> deserializeList(Message message, Class<T> clazz) { if (clazz == ServiceBusReceivedMessage.class) { return (List<T>) deserializeListOfMessages(message); } else if (clazz == OffsetDateTime.class) { return (List<T>) deserializeListOfOffsetDateTime(message); } else if (clazz == OffsetDateTime.class) { return (List<T>) deserializeListOfOffsetDateTime(message); } else if (clazz == Long.class) { return (List<T>) deserializeListOfLong(message); } else { throw logger.logExceptionAsError(new IllegalArgumentException( String.format(Messages.CLASS_NOT_A_SUPPORTED_TYPE, clazz))); } } private List<Long> deserializeListOfLong(Message amqpMessage) { if (amqpMessage.getBody() instanceof AmqpValue) { AmqpValue amqpValue = ((AmqpValue) amqpMessage.getBody()); if (amqpValue.getValue() instanceof Map) { @SuppressWarnings("unchecked") Map<String, Object> responseBody = (Map<String, Object>) amqpValue.getValue(); Object expirationListObj = responseBody.get(ManagementConstants.SEQUENCE_NUMBERS); if (expirationListObj instanceof long[]) { return Arrays.stream((long[]) expirationListObj) .boxed() .collect(Collectors.toList()); } } } return Collections.emptyList(); } private List<OffsetDateTime> deserializeListOfOffsetDateTime(Message amqpMessage) { if (amqpMessage.getBody() instanceof AmqpValue) { AmqpValue amqpValue = ((AmqpValue) amqpMessage.getBody()); if (amqpValue.getValue() instanceof Map) { @SuppressWarnings("unchecked") Map<String, Object> responseBody = (Map<String, Object>) amqpValue.getValue(); Object expirationListObj = responseBody.get(ManagementConstants.EXPIRATIONS); if (expirationListObj instanceof Date[]) { return Arrays.stream((Date[]) expirationListObj) .map(date -> date.toInstant().atOffset(ZoneOffset.UTC)) .collect(Collectors.toList()); } } } return Collections.emptyList(); } @SuppressWarnings("rawtypes") private List<ServiceBusReceivedMessage> deserializeListOfMessages(Message amqpMessage) { final List<ServiceBusReceivedMessage> messageList = new ArrayList<>(); final AmqpResponseCode statusCode = RequestResponseUtils.getStatusCode(amqpMessage); if (statusCode != AmqpResponseCode.OK) { logger.warning("AMQP response did not contain OK status code. Actual: {}", statusCode); return Collections.emptyList(); } final Object responseBodyMap = ((AmqpValue) amqpMessage.getBody()).getValue(); if (responseBodyMap == null) { logger.warning("AMQP response did not contain a body."); return Collections.emptyList(); } else if (!(responseBodyMap instanceof Map)) { logger.warning("AMQP response body is not correct instance. Expected: {}. Actual: {}", Map.class, responseBodyMap.getClass()); return Collections.emptyList(); } final Object messages = ((Map) responseBodyMap).get(ManagementConstants.MESSAGES); if (messages == null) { logger.warning("Response body did not contain key: {}", ManagementConstants.MESSAGES); return Collections.emptyList(); } else if (!(messages instanceof Iterable)) { logger.warning("Response body contents is not the correct type. Expected: {}. Actual: {}", Iterable.class, messages.getClass()); return Collections.emptyList(); } for (Object message : (Iterable) messages) { if (!(message instanceof Map)) { logger.warning("Message inside iterable of message is not correct type. Expected: {}. Actual: {}", Map.class, message.getClass()); continue; } final Message responseMessage = Message.Factory.create(); final Binary messagePayLoad = (Binary) ((Map) message).get(ManagementConstants.MESSAGE); responseMessage.decode(messagePayLoad.getArray(), messagePayLoad.getArrayOffset(), messagePayLoad.getLength()); final ServiceBusReceivedMessage receivedMessage = deserializeMessage(responseMessage); if (((Map) message).containsKey(ManagementConstants.LOCK_TOKEN_KEY)) { receivedMessage.setLockToken((UUID) ((Map) message).get(ManagementConstants.LOCK_TOKEN_KEY)); } messageList.add(receivedMessage); } return messageList; } private ServiceBusReceivedMessage deserializeMessage(Message amqpMessage) { final Section body = amqpMessage.getBody(); AmqpMessageBody amqpMessageBody; if (body != null) { if (body instanceof Data) { final Binary messageData = ((Data) body).getValue(); amqpMessageBody = AmqpMessageBody.fromData(messageData.getArray()); } else if (body instanceof AmqpValue) { amqpMessageBody = AmqpMessageBody.fromValue(((AmqpValue) body).getValue()); } else if (body instanceof AmqpSequence) { @SuppressWarnings("unchecked") List<Object> messageData = ((AmqpSequence) body).getValue(); amqpMessageBody = AmqpMessageBody.fromSequence(messageData); } else { logger.warning(String.format(Messages.MESSAGE_NOT_OF_TYPE, body.getType())); amqpMessageBody = AmqpMessageBody.fromData(EMPTY_BYTE_ARRAY); } } else { logger.warning(String.format(Messages.MESSAGE_NOT_OF_TYPE, "null")); amqpMessageBody = AmqpMessageBody.fromData(EMPTY_BYTE_ARRAY); } final ServiceBusReceivedMessage brokeredMessage = new ServiceBusReceivedMessage(amqpMessageBody); AmqpAnnotatedMessage brokeredAmqpAnnotatedMessage = brokeredMessage.getRawAmqpMessage(); ApplicationProperties applicationProperties = amqpMessage.getApplicationProperties(); if (applicationProperties != null) { final Map<String, Object> propertiesValue = applicationProperties.getValue(); brokeredAmqpAnnotatedMessage.getApplicationProperties().putAll(convertToOriginType(propertiesValue)); } final AmqpMessageHeader brokeredHeader = brokeredAmqpAnnotatedMessage.getHeader(); brokeredHeader.setTimeToLive(Duration.ofMillis(amqpMessage.getTtl())); brokeredHeader.setDeliveryCount(amqpMessage.getDeliveryCount()); brokeredHeader.setDurable(amqpMessage.getHeader().getDurable()); brokeredHeader.setFirstAcquirer(amqpMessage.getHeader().getFirstAcquirer()); brokeredHeader.setPriority(amqpMessage.getPriority()); final Footer footer = amqpMessage.getFooter(); if (footer != null && footer.getValue() != null) { @SuppressWarnings("unchecked") final Map<Symbol, Object> footerValue = footer.getValue(); setValues(footerValue, brokeredAmqpAnnotatedMessage.getFooter()); } final AmqpMessageProperties brokeredProperties = brokeredAmqpAnnotatedMessage.getProperties(); brokeredProperties.setReplyToGroupId(amqpMessage.getReplyToGroupId()); final String replyTo = amqpMessage.getReplyTo(); if (replyTo != null) { brokeredProperties.setReplyTo(new AmqpAddress(amqpMessage.getReplyTo())); } final Object messageId = amqpMessage.getMessageId(); if (messageId != null) { brokeredProperties.setMessageId(new AmqpMessageId(messageId.toString())); } brokeredProperties.setContentType(amqpMessage.getContentType()); final Object correlationId = amqpMessage.getCorrelationId(); if (correlationId != null) { brokeredProperties.setCorrelationId(new AmqpMessageId(correlationId.toString())); } final Properties amqpProperties = amqpMessage.getProperties(); if (amqpProperties != null) { final String to = amqpProperties.getTo(); if (to != null) { brokeredProperties.setTo(new AmqpAddress(amqpProperties.getTo())); } if (amqpProperties.getAbsoluteExpiryTime() != null) { brokeredProperties.setAbsoluteExpiryTime(amqpProperties.getAbsoluteExpiryTime().toInstant() .atOffset(ZoneOffset.UTC)); } if (amqpProperties.getCreationTime() != null) { brokeredProperties.setCreationTime(amqpProperties.getCreationTime().toInstant() .atOffset(ZoneOffset.UTC)); } } brokeredProperties.setSubject(amqpMessage.getSubject()); brokeredProperties.setGroupId(amqpMessage.getGroupId()); brokeredProperties.setContentEncoding(amqpMessage.getContentEncoding()); brokeredProperties.setGroupSequence(amqpMessage.getGroupSequence()); brokeredProperties.setUserId(amqpMessage.getUserId()); final DeliveryAnnotations deliveryAnnotations = amqpMessage.getDeliveryAnnotations(); if (deliveryAnnotations != null) { setValues(deliveryAnnotations.getValue(), brokeredAmqpAnnotatedMessage.getDeliveryAnnotations()); } final MessageAnnotations messageAnnotations = amqpMessage.getMessageAnnotations(); if (messageAnnotations != null) { setValues(messageAnnotations.getValue(), brokeredAmqpAnnotatedMessage.getMessageAnnotations()); } if (amqpMessage instanceof MessageWithLockToken) { brokeredMessage.setLockToken(((MessageWithLockToken) amqpMessage).getLockToken()); } return brokeredMessage; } private static int getPayloadSize(Message msg) { if (msg == null || msg.getBody() == null) { return 0; } final Section bodySection = msg.getBody(); if (bodySection instanceof AmqpValue) { return sizeof(((AmqpValue) bodySection).getValue()); } else if (bodySection instanceof AmqpSequence) { return sizeof(((AmqpSequence) bodySection).getValue()); } else if (bodySection instanceof Data) { final Data payloadSection = (Data) bodySection; final Binary payloadBytes = payloadSection.getValue(); return sizeof(payloadBytes); } else { return 0; } } private void setValues(Map<Symbol, Object> sourceMap, Map<String, Object> targetMap) { if (sourceMap != null) { for (Map.Entry<Symbol, Object> entry : sourceMap.entrySet()) { targetMap.put(entry.getKey().toString(), entry.getValue()); } } } @SuppressWarnings("rawtypes") }
class ServiceBusMessageSerializer implements MessageSerializer { private static final byte[] EMPTY_BYTE_ARRAY = new byte[0]; private final ClientLogger logger = new ClientLogger(ServiceBusMessageSerializer.class); /** * Gets the serialized size of the AMQP message. */ @Override public int getSize(Message amqpMessage) { if (amqpMessage == null) { return 0; } int payloadSize = getPayloadSize(amqpMessage); final MessageAnnotations messageAnnotations = amqpMessage.getMessageAnnotations(); final ApplicationProperties applicationProperties = amqpMessage.getApplicationProperties(); int annotationsSize = 0; int applicationPropertiesSize = 0; if (messageAnnotations != null) { final Map<Symbol, Object> map = messageAnnotations.getValue(); for (Map.Entry<Symbol, Object> entry : map.entrySet()) { final int size = sizeof(entry.getKey()) + sizeof(entry.getValue()); annotationsSize += size; } } if (applicationProperties != null) { final Map<String, Object> map = applicationProperties.getValue(); for (Map.Entry<String, Object> entry : map.entrySet()) { final int size = sizeof(entry.getKey()) + sizeof(entry.getValue()); applicationPropertiesSize += size; } } return annotationsSize + applicationPropertiesSize + payloadSize; } /** * Creates the AMQP message represented by this {@code object}. Currently, only supports serializing {@link * ServiceBusMessage}. * * @param object Concrete object to deserialize. * * @return A new AMQP message for this {@code object}. * * @throws IllegalArgumentException if {@code object} is not an instance of {@link ServiceBusMessage}. */ @Override public <T> Message serialize(T object) { Objects.requireNonNull(object, "'object' to serialize cannot be null."); if (!(object instanceof ServiceBusMessage)) { throw logger.logExceptionAsError(new IllegalArgumentException( "Cannot serialize object that is not ServiceBusMessage. Clazz: " + object.getClass())); } final ServiceBusMessage brokeredMessage = (ServiceBusMessage) object; AmqpMessageBodyType brokeredBodyType = brokeredMessage.getRawAmqpMessage().getBody().getBodyType(); final Message amqpMessage = Proton.message(); byte[] body; if (brokeredBodyType == AmqpMessageBodyType.DATA || brokeredBodyType == null) { body = brokeredMessage.getBody().toBytes(); amqpMessage.setBody(new Data(new Binary(body))); } else if (brokeredBodyType == AmqpMessageBodyType.SEQUENCE) { List<Object> sequenceList = brokeredMessage.getRawAmqpMessage().getBody().getSequence(); amqpMessage.setBody(new AmqpSequence(sequenceList)); } else if (brokeredBodyType == AmqpMessageBodyType.VALUE) { amqpMessage.setBody(new AmqpValue(brokeredMessage.getRawAmqpMessage().getBody().getValue())); } if (brokeredMessage.getApplicationProperties() != null) { Map<String, Object> describedTypeMap = convertToDescribedType(brokeredMessage.getApplicationProperties()); amqpMessage.setApplicationProperties(new ApplicationProperties(describedTypeMap)); } if (brokeredMessage.getTimeToLive() != null) { amqpMessage.setTtl(brokeredMessage.getTimeToLive().toMillis()); } if (amqpMessage.getProperties() == null) { amqpMessage.setProperties(new Properties()); } amqpMessage.setMessageId(brokeredMessage.getMessageId()); amqpMessage.setContentType(brokeredMessage.getContentType()); amqpMessage.setCorrelationId(brokeredMessage.getCorrelationId()); amqpMessage.setSubject(brokeredMessage.getSubject()); amqpMessage.setReplyTo(brokeredMessage.getReplyTo()); amqpMessage.setReplyToGroupId(brokeredMessage.getReplyToSessionId()); amqpMessage.setGroupId(brokeredMessage.getSessionId()); final AmqpMessageProperties brokeredProperties = brokeredMessage.getRawAmqpMessage().getProperties(); amqpMessage.setContentEncoding(brokeredProperties.getContentEncoding()); if (brokeredProperties.getGroupSequence() != null) { amqpMessage.setGroupSequence(brokeredProperties.getGroupSequence()); } amqpMessage.getProperties().setTo(brokeredMessage.getTo()); amqpMessage.getProperties().setUserId(new Binary(brokeredProperties.getUserId())); if (brokeredProperties.getAbsoluteExpiryTime() != null) { amqpMessage.getProperties().setAbsoluteExpiryTime(Date.from(brokeredProperties.getAbsoluteExpiryTime() .toInstant())); } if (brokeredProperties.getCreationTime() != null) { amqpMessage.getProperties().setCreationTime(Date.from(brokeredProperties.getCreationTime().toInstant())); } amqpMessage.setFooter(new Footer(brokeredMessage.getRawAmqpMessage().getFooter())); AmqpMessageHeader header = brokeredMessage.getRawAmqpMessage().getHeader(); if (header.getDeliveryCount() != null) { amqpMessage.setDeliveryCount(header.getDeliveryCount()); } if (header.getPriority() != null) { amqpMessage.setPriority(header.getPriority()); } if (header.isDurable() != null) { amqpMessage.setDurable(header.isDurable()); } if (header.isFirstAcquirer() != null) { amqpMessage.setFirstAcquirer(header.isFirstAcquirer()); } if (header.getTimeToLive() != null) { amqpMessage.setTtl(header.getTimeToLive().toMillis()); } final Map<Symbol, Object> messageAnnotationsMap = new HashMap<>(); if (brokeredMessage.getScheduledEnqueueTime() != null) { messageAnnotationsMap.put(Symbol.valueOf(SCHEDULED_ENQUEUE_UTC_TIME_NAME.getValue()), Date.from(brokeredMessage.getScheduledEnqueueTime().toInstant())); } final String partitionKey = brokeredMessage.getPartitionKey(); if (partitionKey != null && !partitionKey.isEmpty()) { messageAnnotationsMap.put(Symbol.valueOf(PARTITION_KEY_ANNOTATION_NAME.getValue()), brokeredMessage.getPartitionKey()); } amqpMessage.setMessageAnnotations(new MessageAnnotations(messageAnnotationsMap)); final Map<Symbol, Object> deliveryAnnotationsMap = new HashMap<>(); final Map<String, Object> deliveryAnnotations = brokeredMessage.getRawAmqpMessage() .getDeliveryAnnotations(); for (Map.Entry<String, Object> deliveryEntry : deliveryAnnotations.entrySet()) { deliveryAnnotationsMap.put(Symbol.valueOf(deliveryEntry.getKey()), deliveryEntry.getValue()); } amqpMessage.setDeliveryAnnotations(new DeliveryAnnotations(deliveryAnnotationsMap)); return amqpMessage; } /** * Convert specific type to described type for sending on the wire. * @param propertiesValue application properties set by user which may contain specific type. * @return Map only contains primitive type and described type. */ private static Map<String, Object> convertToDescribedType(Map<String, Object> propertiesValue) { for (Map.Entry<String, Object> entry : propertiesValue.entrySet()) { Object value = entry.getValue(); if (value instanceof URI) { entry.setValue(new UriDescribedType((URI) value)); } else if (value instanceof OffsetDateTime) { entry.setValue(new OffsetDateTimeDescribedType((OffsetDateTime) value)); } else if (value instanceof Duration) { entry.setValue(new DurationDescribedType((Duration) value)); } } return propertiesValue; } /** * Convert described type to origin type. * @param propertiesValue application properties from amqp message may contain described type. * @return Map without described type. */ private static Map<String, Object> convertToOriginType(Map<String, Object> propertiesValue) { for (Map.Entry<String, Object> entry : propertiesValue.entrySet()) { Object value = entry.getValue(); if (value instanceof DescribedType) { entry.setValue(MessageUtils.describedToOrigin((DescribedType) value)); } } return propertiesValue; } @SuppressWarnings("unchecked") @Override public <T> T deserialize(Message message, Class<T> clazz) { Objects.requireNonNull(message, "'message' cannot be null."); Objects.requireNonNull(clazz, "'clazz' cannot be null."); if (clazz == ServiceBusReceivedMessage.class) { return (T) deserializeMessage(message); } else { throw logger.logExceptionAsError(new IllegalArgumentException( String.format(Messages.CLASS_NOT_A_SUPPORTED_TYPE, clazz))); } } @SuppressWarnings("unchecked") @Override public <T> List<T> deserializeList(Message message, Class<T> clazz) { if (clazz == ServiceBusReceivedMessage.class) { return (List<T>) deserializeListOfMessages(message); } else if (clazz == OffsetDateTime.class) { return (List<T>) deserializeListOfOffsetDateTime(message); } else if (clazz == OffsetDateTime.class) { return (List<T>) deserializeListOfOffsetDateTime(message); } else if (clazz == Long.class) { return (List<T>) deserializeListOfLong(message); } else { throw logger.logExceptionAsError(new IllegalArgumentException( String.format(Messages.CLASS_NOT_A_SUPPORTED_TYPE, clazz))); } } private List<Long> deserializeListOfLong(Message amqpMessage) { if (amqpMessage.getBody() instanceof AmqpValue) { AmqpValue amqpValue = ((AmqpValue) amqpMessage.getBody()); if (amqpValue.getValue() instanceof Map) { @SuppressWarnings("unchecked") Map<String, Object> responseBody = (Map<String, Object>) amqpValue.getValue(); Object expirationListObj = responseBody.get(ManagementConstants.SEQUENCE_NUMBERS); if (expirationListObj instanceof long[]) { return Arrays.stream((long[]) expirationListObj) .boxed() .collect(Collectors.toList()); } } } return Collections.emptyList(); } private List<OffsetDateTime> deserializeListOfOffsetDateTime(Message amqpMessage) { if (amqpMessage.getBody() instanceof AmqpValue) { AmqpValue amqpValue = ((AmqpValue) amqpMessage.getBody()); if (amqpValue.getValue() instanceof Map) { @SuppressWarnings("unchecked") Map<String, Object> responseBody = (Map<String, Object>) amqpValue.getValue(); Object expirationListObj = responseBody.get(ManagementConstants.EXPIRATIONS); if (expirationListObj instanceof Date[]) { return Arrays.stream((Date[]) expirationListObj) .map(date -> date.toInstant().atOffset(ZoneOffset.UTC)) .collect(Collectors.toList()); } } } return Collections.emptyList(); } @SuppressWarnings("rawtypes") private List<ServiceBusReceivedMessage> deserializeListOfMessages(Message amqpMessage) { final List<ServiceBusReceivedMessage> messageList = new ArrayList<>(); final AmqpResponseCode statusCode = RequestResponseUtils.getStatusCode(amqpMessage); if (statusCode != AmqpResponseCode.OK) { logger.warning("AMQP response did not contain OK status code. Actual: {}", statusCode); return Collections.emptyList(); } final Object responseBodyMap = ((AmqpValue) amqpMessage.getBody()).getValue(); if (responseBodyMap == null) { logger.warning("AMQP response did not contain a body."); return Collections.emptyList(); } else if (!(responseBodyMap instanceof Map)) { logger.warning("AMQP response body is not correct instance. Expected: {}. Actual: {}", Map.class, responseBodyMap.getClass()); return Collections.emptyList(); } final Object messages = ((Map) responseBodyMap).get(ManagementConstants.MESSAGES); if (messages == null) { logger.warning("Response body did not contain key: {}", ManagementConstants.MESSAGES); return Collections.emptyList(); } else if (!(messages instanceof Iterable)) { logger.warning("Response body contents is not the correct type. Expected: {}. Actual: {}", Iterable.class, messages.getClass()); return Collections.emptyList(); } for (Object message : (Iterable) messages) { if (!(message instanceof Map)) { logger.warning("Message inside iterable of message is not correct type. Expected: {}. Actual: {}", Map.class, message.getClass()); continue; } final Message responseMessage = Message.Factory.create(); final Binary messagePayLoad = (Binary) ((Map) message).get(ManagementConstants.MESSAGE); responseMessage.decode(messagePayLoad.getArray(), messagePayLoad.getArrayOffset(), messagePayLoad.getLength()); final ServiceBusReceivedMessage receivedMessage = deserializeMessage(responseMessage); if (((Map) message).containsKey(ManagementConstants.LOCK_TOKEN_KEY)) { receivedMessage.setLockToken((UUID) ((Map) message).get(ManagementConstants.LOCK_TOKEN_KEY)); } messageList.add(receivedMessage); } return messageList; } private ServiceBusReceivedMessage deserializeMessage(Message amqpMessage) { final Section body = amqpMessage.getBody(); AmqpMessageBody amqpMessageBody; if (body != null) { if (body instanceof Data) { final Binary messageData = ((Data) body).getValue(); amqpMessageBody = AmqpMessageBody.fromData(messageData.getArray()); } else if (body instanceof AmqpValue) { amqpMessageBody = AmqpMessageBody.fromValue(((AmqpValue) body).getValue()); } else if (body instanceof AmqpSequence) { @SuppressWarnings("unchecked") List<Object> messageData = ((AmqpSequence) body).getValue(); amqpMessageBody = AmqpMessageBody.fromSequence(messageData); } else { logger.warning(String.format(Messages.MESSAGE_NOT_OF_TYPE, body.getType())); amqpMessageBody = AmqpMessageBody.fromData(EMPTY_BYTE_ARRAY); } } else { logger.warning(String.format(Messages.MESSAGE_NOT_OF_TYPE, "null")); amqpMessageBody = AmqpMessageBody.fromData(EMPTY_BYTE_ARRAY); } final ServiceBusReceivedMessage brokeredMessage = new ServiceBusReceivedMessage(amqpMessageBody); AmqpAnnotatedMessage brokeredAmqpAnnotatedMessage = brokeredMessage.getRawAmqpMessage(); ApplicationProperties applicationProperties = amqpMessage.getApplicationProperties(); if (applicationProperties != null) { final Map<String, Object> propertiesValue = convertToOriginType(applicationProperties.getValue()); brokeredAmqpAnnotatedMessage.getApplicationProperties().putAll(propertiesValue); } final AmqpMessageHeader brokeredHeader = brokeredAmqpAnnotatedMessage.getHeader(); brokeredHeader.setTimeToLive(Duration.ofMillis(amqpMessage.getTtl())); brokeredHeader.setDeliveryCount(amqpMessage.getDeliveryCount()); brokeredHeader.setDurable(amqpMessage.getHeader().getDurable()); brokeredHeader.setFirstAcquirer(amqpMessage.getHeader().getFirstAcquirer()); brokeredHeader.setPriority(amqpMessage.getPriority()); final Footer footer = amqpMessage.getFooter(); if (footer != null && footer.getValue() != null) { @SuppressWarnings("unchecked") final Map<Symbol, Object> footerValue = footer.getValue(); setValues(footerValue, brokeredAmqpAnnotatedMessage.getFooter()); } final AmqpMessageProperties brokeredProperties = brokeredAmqpAnnotatedMessage.getProperties(); brokeredProperties.setReplyToGroupId(amqpMessage.getReplyToGroupId()); final String replyTo = amqpMessage.getReplyTo(); if (replyTo != null) { brokeredProperties.setReplyTo(new AmqpAddress(amqpMessage.getReplyTo())); } final Object messageId = amqpMessage.getMessageId(); if (messageId != null) { brokeredProperties.setMessageId(new AmqpMessageId(messageId.toString())); } brokeredProperties.setContentType(amqpMessage.getContentType()); final Object correlationId = amqpMessage.getCorrelationId(); if (correlationId != null) { brokeredProperties.setCorrelationId(new AmqpMessageId(correlationId.toString())); } final Properties amqpProperties = amqpMessage.getProperties(); if (amqpProperties != null) { final String to = amqpProperties.getTo(); if (to != null) { brokeredProperties.setTo(new AmqpAddress(amqpProperties.getTo())); } if (amqpProperties.getAbsoluteExpiryTime() != null) { brokeredProperties.setAbsoluteExpiryTime(amqpProperties.getAbsoluteExpiryTime().toInstant() .atOffset(ZoneOffset.UTC)); } if (amqpProperties.getCreationTime() != null) { brokeredProperties.setCreationTime(amqpProperties.getCreationTime().toInstant() .atOffset(ZoneOffset.UTC)); } } brokeredProperties.setSubject(amqpMessage.getSubject()); brokeredProperties.setGroupId(amqpMessage.getGroupId()); brokeredProperties.setContentEncoding(amqpMessage.getContentEncoding()); brokeredProperties.setGroupSequence(amqpMessage.getGroupSequence()); brokeredProperties.setUserId(amqpMessage.getUserId()); final DeliveryAnnotations deliveryAnnotations = amqpMessage.getDeliveryAnnotations(); if (deliveryAnnotations != null) { setValues(deliveryAnnotations.getValue(), brokeredAmqpAnnotatedMessage.getDeliveryAnnotations()); } final MessageAnnotations messageAnnotations = amqpMessage.getMessageAnnotations(); if (messageAnnotations != null) { setValues(messageAnnotations.getValue(), brokeredAmqpAnnotatedMessage.getMessageAnnotations()); } if (amqpMessage instanceof MessageWithLockToken) { brokeredMessage.setLockToken(((MessageWithLockToken) amqpMessage).getLockToken()); } return brokeredMessage; } private static int getPayloadSize(Message msg) { if (msg == null || msg.getBody() == null) { return 0; } final Section bodySection = msg.getBody(); if (bodySection instanceof AmqpValue) { return sizeof(((AmqpValue) bodySection).getValue()); } else if (bodySection instanceof AmqpSequence) { return sizeof(((AmqpSequence) bodySection).getValue()); } else if (bodySection instanceof Data) { final Data payloadSection = (Data) bodySection; final Binary payloadBytes = payloadSection.getValue(); return sizeof(payloadBytes); } else { return 0; } } private void setValues(Map<Symbol, Object> sourceMap, Map<String, Object> targetMap) { if (sourceMap != null) { for (Map.Entry<Symbol, Object> entry : sourceMap.entrySet()) { targetMap.put(entry.getKey().toString(), entry.getValue()); } } } @SuppressWarnings("rawtypes") }
add a getSize() interface in ServiceBusDescribedType?
private static int sizeof(Object obj) { if (obj == null) { return 0; } if (obj instanceof String) { return obj.toString().length() << 1; } if (obj instanceof Symbol) { return ((Symbol) obj).length() << 1; } if (obj instanceof Byte || obj instanceof UnsignedByte) { return Byte.BYTES; } if (obj instanceof Integer || obj instanceof UnsignedInteger) { return Integer.BYTES; } if (obj instanceof Long || obj instanceof UnsignedLong || obj instanceof Date) { return Long.BYTES; } if (obj instanceof Short || obj instanceof UnsignedShort) { return Short.BYTES; } if (obj instanceof Boolean) { return 1; } if (obj instanceof Character) { return 4; } if (obj instanceof Float) { return Float.BYTES; } if (obj instanceof Double) { return Double.BYTES; } if (obj instanceof UUID) { return 16; } if (obj instanceof Decimal32) { return 4; } if (obj instanceof Decimal64) { return 8; } if (obj instanceof Decimal128) { return 16; } if (obj instanceof Binary) { return ((Binary) obj).getLength(); } if (obj instanceof Declare) { return 7; } if (obj instanceof Discharge) { Discharge discharge = (Discharge) obj; return 12 + discharge.getTxnId().getLength(); } if (obj instanceof ServiceBusDescribedType) { ServiceBusDescribedType describedType = (ServiceBusDescribedType) obj; int size = ((Symbol) describedType.getDescriptor()).length() << 1; if (describedType.getDescribed() instanceof String) { size += ((String) describedType.getDescribed()).length(); } else if (describedType.getDescribed() instanceof Long) { size += Long.BYTES; } return size; } if (obj instanceof Map) { int size = 8; Map map = (Map) obj; for (Object value : map.keySet()) { size += sizeof(value); } for (Object value : map.values()) { size += sizeof(value); } return size; } if (obj instanceof Iterable) { int size = 8; for (Object innerObject : (Iterable) obj) { size += sizeof(innerObject); } return size; } if (obj.getClass().isArray()) { int size = 8; int length = Array.getLength(obj); for (int i = 0; i < length; i++) { size += sizeof(Array.get(obj, i)); } return size; } throw new IllegalArgumentException(String.format(Locale.US, "Encoding Type: %s is not supported", obj.getClass())); }
if (obj instanceof Declare) {
private static int sizeof(Object obj) { if (obj == null) { return 0; } if (obj instanceof String) { return obj.toString().length() << 1; } if (obj instanceof Symbol) { return ((Symbol) obj).length() << 1; } if (obj instanceof Byte || obj instanceof UnsignedByte) { return Byte.BYTES; } if (obj instanceof Integer || obj instanceof UnsignedInteger) { return Integer.BYTES; } if (obj instanceof Long || obj instanceof UnsignedLong || obj instanceof Date) { return Long.BYTES; } if (obj instanceof Short || obj instanceof UnsignedShort) { return Short.BYTES; } if (obj instanceof Boolean) { return 1; } if (obj instanceof Character) { return 4; } if (obj instanceof Float) { return Float.BYTES; } if (obj instanceof Double) { return Double.BYTES; } if (obj instanceof UUID) { return 16; } if (obj instanceof Decimal32) { return 4; } if (obj instanceof Decimal64) { return 8; } if (obj instanceof Decimal128) { return 16; } if (obj instanceof Binary) { return ((Binary) obj).getLength(); } if (obj instanceof Declare) { return 7; } if (obj instanceof Discharge) { Discharge discharge = (Discharge) obj; return 12 + discharge.getTxnId().getLength(); } if (obj instanceof ServiceBusDescribedType) { ServiceBusDescribedType describedType = (ServiceBusDescribedType) obj; return describedType.size(); } if (obj instanceof Map) { int size = 8; Map map = (Map) obj; for (Object value : map.keySet()) { size += sizeof(value); } for (Object value : map.values()) { size += sizeof(value); } return size; } if (obj instanceof Iterable) { int size = 8; for (Object innerObject : (Iterable) obj) { size += sizeof(innerObject); } return size; } if (obj.getClass().isArray()) { int size = 8; int length = Array.getLength(obj); for (int i = 0; i < length; i++) { size += sizeof(Array.get(obj, i)); } return size; } throw new IllegalArgumentException(String.format(Locale.US, "Encoding Type: %s is not supported", obj.getClass())); }
class ServiceBusMessageSerializer implements MessageSerializer { private static final byte[] EMPTY_BYTE_ARRAY = new byte[0]; private final ClientLogger logger = new ClientLogger(ServiceBusMessageSerializer.class); /** * Gets the serialized size of the AMQP message. */ @Override public int getSize(Message amqpMessage) { if (amqpMessage == null) { return 0; } int payloadSize = getPayloadSize(amqpMessage); final MessageAnnotations messageAnnotations = amqpMessage.getMessageAnnotations(); final ApplicationProperties applicationProperties = amqpMessage.getApplicationProperties(); int annotationsSize = 0; int applicationPropertiesSize = 0; if (messageAnnotations != null) { final Map<Symbol, Object> map = messageAnnotations.getValue(); for (Map.Entry<Symbol, Object> entry : map.entrySet()) { final int size = sizeof(entry.getKey()) + sizeof(entry.getValue()); annotationsSize += size; } } if (applicationProperties != null) { final Map<String, Object> map = applicationProperties.getValue(); for (Map.Entry<String, Object> entry : map.entrySet()) { final int size = sizeof(entry.getKey()) + sizeof(entry.getValue()); applicationPropertiesSize += size; } } return annotationsSize + applicationPropertiesSize + payloadSize; } /** * Creates the AMQP message represented by this {@code object}. Currently, only supports serializing {@link * ServiceBusMessage}. * * @param object Concrete object to deserialize. * * @return A new AMQP message for this {@code object}. * * @throws IllegalArgumentException if {@code object} is not an instance of {@link ServiceBusMessage}. */ @Override public <T> Message serialize(T object) { Objects.requireNonNull(object, "'object' to serialize cannot be null."); if (!(object instanceof ServiceBusMessage)) { throw logger.logExceptionAsError(new IllegalArgumentException( "Cannot serialize object that is not ServiceBusMessage. Clazz: " + object.getClass())); } final ServiceBusMessage brokeredMessage = (ServiceBusMessage) object; AmqpMessageBodyType brokeredBodyType = brokeredMessage.getRawAmqpMessage().getBody().getBodyType(); final Message amqpMessage = Proton.message(); byte[] body; if (brokeredBodyType == AmqpMessageBodyType.DATA || brokeredBodyType == null) { body = brokeredMessage.getBody().toBytes(); amqpMessage.setBody(new Data(new Binary(body))); } else if (brokeredBodyType == AmqpMessageBodyType.SEQUENCE) { List<Object> sequenceList = brokeredMessage.getRawAmqpMessage().getBody().getSequence(); amqpMessage.setBody(new AmqpSequence(sequenceList)); } else if (brokeredBodyType == AmqpMessageBodyType.VALUE) { amqpMessage.setBody(new AmqpValue(brokeredMessage.getRawAmqpMessage().getBody().getValue())); } if (brokeredMessage.getApplicationProperties() != null) { Map<String, Object> describedTypeMap = convertToDescribedType(brokeredMessage.getApplicationProperties()); amqpMessage.setApplicationProperties(new ApplicationProperties(describedTypeMap)); } if (brokeredMessage.getTimeToLive() != null) { amqpMessage.setTtl(brokeredMessage.getTimeToLive().toMillis()); } if (amqpMessage.getProperties() == null) { amqpMessage.setProperties(new Properties()); } amqpMessage.setMessageId(brokeredMessage.getMessageId()); amqpMessage.setContentType(brokeredMessage.getContentType()); amqpMessage.setCorrelationId(brokeredMessage.getCorrelationId()); amqpMessage.setSubject(brokeredMessage.getSubject()); amqpMessage.setReplyTo(brokeredMessage.getReplyTo()); amqpMessage.setReplyToGroupId(brokeredMessage.getReplyToSessionId()); amqpMessage.setGroupId(brokeredMessage.getSessionId()); final AmqpMessageProperties brokeredProperties = brokeredMessage.getRawAmqpMessage().getProperties(); amqpMessage.setContentEncoding(brokeredProperties.getContentEncoding()); if (brokeredProperties.getGroupSequence() != null) { amqpMessage.setGroupSequence(brokeredProperties.getGroupSequence()); } amqpMessage.getProperties().setTo(brokeredMessage.getTo()); amqpMessage.getProperties().setUserId(new Binary(brokeredProperties.getUserId())); if (brokeredProperties.getAbsoluteExpiryTime() != null) { amqpMessage.getProperties().setAbsoluteExpiryTime(Date.from(brokeredProperties.getAbsoluteExpiryTime() .toInstant())); } if (brokeredProperties.getCreationTime() != null) { amqpMessage.getProperties().setCreationTime(Date.from(brokeredProperties.getCreationTime().toInstant())); } amqpMessage.setFooter(new Footer(brokeredMessage.getRawAmqpMessage().getFooter())); AmqpMessageHeader header = brokeredMessage.getRawAmqpMessage().getHeader(); if (header.getDeliveryCount() != null) { amqpMessage.setDeliveryCount(header.getDeliveryCount()); } if (header.getPriority() != null) { amqpMessage.setPriority(header.getPriority()); } if (header.isDurable() != null) { amqpMessage.setDurable(header.isDurable()); } if (header.isFirstAcquirer() != null) { amqpMessage.setFirstAcquirer(header.isFirstAcquirer()); } if (header.getTimeToLive() != null) { amqpMessage.setTtl(header.getTimeToLive().toMillis()); } final Map<Symbol, Object> messageAnnotationsMap = new HashMap<>(); if (brokeredMessage.getScheduledEnqueueTime() != null) { messageAnnotationsMap.put(Symbol.valueOf(SCHEDULED_ENQUEUE_UTC_TIME_NAME.getValue()), Date.from(brokeredMessage.getScheduledEnqueueTime().toInstant())); } final String partitionKey = brokeredMessage.getPartitionKey(); if (partitionKey != null && !partitionKey.isEmpty()) { messageAnnotationsMap.put(Symbol.valueOf(PARTITION_KEY_ANNOTATION_NAME.getValue()), brokeredMessage.getPartitionKey()); } amqpMessage.setMessageAnnotations(new MessageAnnotations(messageAnnotationsMap)); final Map<Symbol, Object> deliveryAnnotationsMap = new HashMap<>(); final Map<String, Object> deliveryAnnotations = brokeredMessage.getRawAmqpMessage() .getDeliveryAnnotations(); for (Map.Entry<String, Object> deliveryEntry : deliveryAnnotations.entrySet()) { deliveryAnnotationsMap.put(Symbol.valueOf(deliveryEntry.getKey()), deliveryEntry.getValue()); } amqpMessage.setDeliveryAnnotations(new DeliveryAnnotations(deliveryAnnotationsMap)); return amqpMessage; } /** * For send application properties with OffsetDateTime, Duration and URI on the wire in amqp, * we need to convert these object to described type and then amqp can write these data into buffer. * @param propertiesValue application properties set by user which may contain specific type mentioned above. * @return Map only contains primitive type and described type. */ private static Map<String, Object> convertToDescribedType(Map<String, Object> propertiesValue) { for (Map.Entry<String, Object> entry : propertiesValue.entrySet()) { Object value = entry.getValue(); if (value instanceof URI) { propertiesValue.put(entry.getKey(), new UriDescribedType(value)); } else if (value instanceof OffsetDateTime) { propertiesValue.put(entry.getKey(), new OffsetDateTimeDescribedType(value)); } else if (value instanceof Duration) { propertiesValue.put(entry.getKey(), new DurationDescribedType(value)); } } return propertiesValue; } /** * Reverse convert ServiceBusMessageSerializer * @param propertiesValue application properties from amqp message may contain described type. * @return Map without described type. */ private static Map<String, Object> convertToOriginType(Map<String, Object> propertiesValue) { for (Map.Entry<String, Object> entry : propertiesValue.entrySet()) { Object value = entry.getValue(); if (value instanceof DescribedType) { DescribedType describedType = (DescribedType) value; if (ServiceBusConstants.URI_SYMBOL.equals(describedType.getDescriptor())) { propertiesValue.put(entry.getKey(), describedToURI(describedType.getDescribed())); } else if (ServiceBusConstants.OFFSETDATETIME_SYMBOL.equals(describedType.getDescriptor())) { propertiesValue.put(entry.getKey(), describedToOffsetDateTime(describedType.getDescribed())); } else if (ServiceBusConstants.DURATION_SYMBOL.equals(describedType.getDescriptor())) { propertiesValue.put(entry.getKey(), describedToDuration(describedType.getDescribed())); } } } return propertiesValue; } @SuppressWarnings("unchecked") @Override public <T> T deserialize(Message message, Class<T> clazz) { Objects.requireNonNull(message, "'message' cannot be null."); Objects.requireNonNull(clazz, "'clazz' cannot be null."); if (clazz == ServiceBusReceivedMessage.class) { return (T) deserializeMessage(message); } else { throw logger.logExceptionAsError(new IllegalArgumentException( String.format(Messages.CLASS_NOT_A_SUPPORTED_TYPE, clazz))); } } @SuppressWarnings("unchecked") @Override public <T> List<T> deserializeList(Message message, Class<T> clazz) { if (clazz == ServiceBusReceivedMessage.class) { return (List<T>) deserializeListOfMessages(message); } else if (clazz == OffsetDateTime.class) { return (List<T>) deserializeListOfOffsetDateTime(message); } else if (clazz == OffsetDateTime.class) { return (List<T>) deserializeListOfOffsetDateTime(message); } else if (clazz == Long.class) { return (List<T>) deserializeListOfLong(message); } else { throw logger.logExceptionAsError(new IllegalArgumentException( String.format(Messages.CLASS_NOT_A_SUPPORTED_TYPE, clazz))); } } private List<Long> deserializeListOfLong(Message amqpMessage) { if (amqpMessage.getBody() instanceof AmqpValue) { AmqpValue amqpValue = ((AmqpValue) amqpMessage.getBody()); if (amqpValue.getValue() instanceof Map) { @SuppressWarnings("unchecked") Map<String, Object> responseBody = (Map<String, Object>) amqpValue.getValue(); Object expirationListObj = responseBody.get(ManagementConstants.SEQUENCE_NUMBERS); if (expirationListObj instanceof long[]) { return Arrays.stream((long[]) expirationListObj) .boxed() .collect(Collectors.toList()); } } } return Collections.emptyList(); } private List<OffsetDateTime> deserializeListOfOffsetDateTime(Message amqpMessage) { if (amqpMessage.getBody() instanceof AmqpValue) { AmqpValue amqpValue = ((AmqpValue) amqpMessage.getBody()); if (amqpValue.getValue() instanceof Map) { @SuppressWarnings("unchecked") Map<String, Object> responseBody = (Map<String, Object>) amqpValue.getValue(); Object expirationListObj = responseBody.get(ManagementConstants.EXPIRATIONS); if (expirationListObj instanceof Date[]) { return Arrays.stream((Date[]) expirationListObj) .map(date -> date.toInstant().atOffset(ZoneOffset.UTC)) .collect(Collectors.toList()); } } } return Collections.emptyList(); } @SuppressWarnings("rawtypes") private List<ServiceBusReceivedMessage> deserializeListOfMessages(Message amqpMessage) { final List<ServiceBusReceivedMessage> messageList = new ArrayList<>(); final AmqpResponseCode statusCode = RequestResponseUtils.getStatusCode(amqpMessage); if (statusCode != AmqpResponseCode.OK) { logger.warning("AMQP response did not contain OK status code. Actual: {}", statusCode); return Collections.emptyList(); } final Object responseBodyMap = ((AmqpValue) amqpMessage.getBody()).getValue(); if (responseBodyMap == null) { logger.warning("AMQP response did not contain a body."); return Collections.emptyList(); } else if (!(responseBodyMap instanceof Map)) { logger.warning("AMQP response body is not correct instance. Expected: {}. Actual: {}", Map.class, responseBodyMap.getClass()); return Collections.emptyList(); } final Object messages = ((Map) responseBodyMap).get(ManagementConstants.MESSAGES); if (messages == null) { logger.warning("Response body did not contain key: {}", ManagementConstants.MESSAGES); return Collections.emptyList(); } else if (!(messages instanceof Iterable)) { logger.warning("Response body contents is not the correct type. Expected: {}. Actual: {}", Iterable.class, messages.getClass()); return Collections.emptyList(); } for (Object message : (Iterable) messages) { if (!(message instanceof Map)) { logger.warning("Message inside iterable of message is not correct type. Expected: {}. Actual: {}", Map.class, message.getClass()); continue; } final Message responseMessage = Message.Factory.create(); final Binary messagePayLoad = (Binary) ((Map) message).get(ManagementConstants.MESSAGE); responseMessage.decode(messagePayLoad.getArray(), messagePayLoad.getArrayOffset(), messagePayLoad.getLength()); final ServiceBusReceivedMessage receivedMessage = deserializeMessage(responseMessage); if (((Map) message).containsKey(ManagementConstants.LOCK_TOKEN_KEY)) { receivedMessage.setLockToken((UUID) ((Map) message).get(ManagementConstants.LOCK_TOKEN_KEY)); } messageList.add(receivedMessage); } return messageList; } private ServiceBusReceivedMessage deserializeMessage(Message amqpMessage) { final Section body = amqpMessage.getBody(); AmqpMessageBody amqpMessageBody; if (body != null) { if (body instanceof Data) { final Binary messageData = ((Data) body).getValue(); amqpMessageBody = AmqpMessageBody.fromData(messageData.getArray()); } else if (body instanceof AmqpValue) { amqpMessageBody = AmqpMessageBody.fromValue(((AmqpValue) body).getValue()); } else if (body instanceof AmqpSequence) { @SuppressWarnings("unchecked") List<Object> messageData = ((AmqpSequence) body).getValue(); amqpMessageBody = AmqpMessageBody.fromSequence(messageData); } else { logger.warning(String.format(Messages.MESSAGE_NOT_OF_TYPE, body.getType())); amqpMessageBody = AmqpMessageBody.fromData(EMPTY_BYTE_ARRAY); } } else { logger.warning(String.format(Messages.MESSAGE_NOT_OF_TYPE, "null")); amqpMessageBody = AmqpMessageBody.fromData(EMPTY_BYTE_ARRAY); } final ServiceBusReceivedMessage brokeredMessage = new ServiceBusReceivedMessage(amqpMessageBody); AmqpAnnotatedMessage brokeredAmqpAnnotatedMessage = brokeredMessage.getRawAmqpMessage(); ApplicationProperties applicationProperties = amqpMessage.getApplicationProperties(); if (applicationProperties != null) { final Map<String, Object> propertiesValue = convertToOriginType(applicationProperties.getValue()); brokeredAmqpAnnotatedMessage.getApplicationProperties().putAll(propertiesValue); } final AmqpMessageHeader brokeredHeader = brokeredAmqpAnnotatedMessage.getHeader(); brokeredHeader.setTimeToLive(Duration.ofMillis(amqpMessage.getTtl())); brokeredHeader.setDeliveryCount(amqpMessage.getDeliveryCount()); brokeredHeader.setDurable(amqpMessage.getHeader().getDurable()); brokeredHeader.setFirstAcquirer(amqpMessage.getHeader().getFirstAcquirer()); brokeredHeader.setPriority(amqpMessage.getPriority()); final Footer footer = amqpMessage.getFooter(); if (footer != null && footer.getValue() != null) { @SuppressWarnings("unchecked") final Map<Symbol, Object> footerValue = footer.getValue(); setValues(footerValue, brokeredAmqpAnnotatedMessage.getFooter()); } final AmqpMessageProperties brokeredProperties = brokeredAmqpAnnotatedMessage.getProperties(); brokeredProperties.setReplyToGroupId(amqpMessage.getReplyToGroupId()); final String replyTo = amqpMessage.getReplyTo(); if (replyTo != null) { brokeredProperties.setReplyTo(new AmqpAddress(amqpMessage.getReplyTo())); } final Object messageId = amqpMessage.getMessageId(); if (messageId != null) { brokeredProperties.setMessageId(new AmqpMessageId(messageId.toString())); } brokeredProperties.setContentType(amqpMessage.getContentType()); final Object correlationId = amqpMessage.getCorrelationId(); if (correlationId != null) { brokeredProperties.setCorrelationId(new AmqpMessageId(correlationId.toString())); } final Properties amqpProperties = amqpMessage.getProperties(); if (amqpProperties != null) { final String to = amqpProperties.getTo(); if (to != null) { brokeredProperties.setTo(new AmqpAddress(amqpProperties.getTo())); } if (amqpProperties.getAbsoluteExpiryTime() != null) { brokeredProperties.setAbsoluteExpiryTime(amqpProperties.getAbsoluteExpiryTime().toInstant() .atOffset(ZoneOffset.UTC)); } if (amqpProperties.getCreationTime() != null) { brokeredProperties.setCreationTime(amqpProperties.getCreationTime().toInstant() .atOffset(ZoneOffset.UTC)); } } brokeredProperties.setSubject(amqpMessage.getSubject()); brokeredProperties.setGroupId(amqpMessage.getGroupId()); brokeredProperties.setContentEncoding(amqpMessage.getContentEncoding()); brokeredProperties.setGroupSequence(amqpMessage.getGroupSequence()); brokeredProperties.setUserId(amqpMessage.getUserId()); final DeliveryAnnotations deliveryAnnotations = amqpMessage.getDeliveryAnnotations(); if (deliveryAnnotations != null) { setValues(deliveryAnnotations.getValue(), brokeredAmqpAnnotatedMessage.getDeliveryAnnotations()); } final MessageAnnotations messageAnnotations = amqpMessage.getMessageAnnotations(); if (messageAnnotations != null) { setValues(messageAnnotations.getValue(), brokeredAmqpAnnotatedMessage.getMessageAnnotations()); } if (amqpMessage instanceof MessageWithLockToken) { brokeredMessage.setLockToken(((MessageWithLockToken) amqpMessage).getLockToken()); } return brokeredMessage; } private static int getPayloadSize(Message msg) { if (msg == null || msg.getBody() == null) { return 0; } final Section bodySection = msg.getBody(); if (bodySection instanceof AmqpValue) { return sizeof(((AmqpValue) bodySection).getValue()); } else if (bodySection instanceof AmqpSequence) { return sizeof(((AmqpSequence) bodySection).getValue()); } else if (bodySection instanceof Data) { final Data payloadSection = (Data) bodySection; final Binary payloadBytes = payloadSection.getValue(); return sizeof(payloadBytes); } else { return 0; } } private void setValues(Map<Symbol, Object> sourceMap, Map<String, Object> targetMap) { if (sourceMap != null) { for (Map.Entry<Symbol, Object> entry : sourceMap.entrySet()) { targetMap.put(entry.getKey().toString(), entry.getValue()); } } } @SuppressWarnings("rawtypes") }
class ServiceBusMessageSerializer implements MessageSerializer { private static final byte[] EMPTY_BYTE_ARRAY = new byte[0]; private final ClientLogger logger = new ClientLogger(ServiceBusMessageSerializer.class); /** * Gets the serialized size of the AMQP message. */ @Override public int getSize(Message amqpMessage) { if (amqpMessage == null) { return 0; } int payloadSize = getPayloadSize(amqpMessage); final MessageAnnotations messageAnnotations = amqpMessage.getMessageAnnotations(); final ApplicationProperties applicationProperties = amqpMessage.getApplicationProperties(); int annotationsSize = 0; int applicationPropertiesSize = 0; if (messageAnnotations != null) { final Map<Symbol, Object> map = messageAnnotations.getValue(); for (Map.Entry<Symbol, Object> entry : map.entrySet()) { final int size = sizeof(entry.getKey()) + sizeof(entry.getValue()); annotationsSize += size; } } if (applicationProperties != null) { final Map<String, Object> map = applicationProperties.getValue(); for (Map.Entry<String, Object> entry : map.entrySet()) { final int size = sizeof(entry.getKey()) + sizeof(entry.getValue()); applicationPropertiesSize += size; } } return annotationsSize + applicationPropertiesSize + payloadSize; } /** * Creates the AMQP message represented by this {@code object}. Currently, only supports serializing {@link * ServiceBusMessage}. * * @param object Concrete object to deserialize. * * @return A new AMQP message for this {@code object}. * * @throws IllegalArgumentException if {@code object} is not an instance of {@link ServiceBusMessage}. */ @Override public <T> Message serialize(T object) { Objects.requireNonNull(object, "'object' to serialize cannot be null."); if (!(object instanceof ServiceBusMessage)) { throw logger.logExceptionAsError(new IllegalArgumentException( "Cannot serialize object that is not ServiceBusMessage. Clazz: " + object.getClass())); } final ServiceBusMessage brokeredMessage = (ServiceBusMessage) object; AmqpMessageBodyType brokeredBodyType = brokeredMessage.getRawAmqpMessage().getBody().getBodyType(); final Message amqpMessage = Proton.message(); byte[] body; if (brokeredBodyType == AmqpMessageBodyType.DATA || brokeredBodyType == null) { body = brokeredMessage.getBody().toBytes(); amqpMessage.setBody(new Data(new Binary(body))); } else if (brokeredBodyType == AmqpMessageBodyType.SEQUENCE) { List<Object> sequenceList = brokeredMessage.getRawAmqpMessage().getBody().getSequence(); amqpMessage.setBody(new AmqpSequence(sequenceList)); } else if (brokeredBodyType == AmqpMessageBodyType.VALUE) { amqpMessage.setBody(new AmqpValue(brokeredMessage.getRawAmqpMessage().getBody().getValue())); } if (brokeredMessage.getApplicationProperties() != null) { Map<String, Object> describedTypeMap = convertToDescribedType(brokeredMessage.getApplicationProperties()); amqpMessage.setApplicationProperties(new ApplicationProperties(describedTypeMap)); } if (brokeredMessage.getTimeToLive() != null) { amqpMessage.setTtl(brokeredMessage.getTimeToLive().toMillis()); } if (amqpMessage.getProperties() == null) { amqpMessage.setProperties(new Properties()); } amqpMessage.setMessageId(brokeredMessage.getMessageId()); amqpMessage.setContentType(brokeredMessage.getContentType()); amqpMessage.setCorrelationId(brokeredMessage.getCorrelationId()); amqpMessage.setSubject(brokeredMessage.getSubject()); amqpMessage.setReplyTo(brokeredMessage.getReplyTo()); amqpMessage.setReplyToGroupId(brokeredMessage.getReplyToSessionId()); amqpMessage.setGroupId(brokeredMessage.getSessionId()); final AmqpMessageProperties brokeredProperties = brokeredMessage.getRawAmqpMessage().getProperties(); amqpMessage.setContentEncoding(brokeredProperties.getContentEncoding()); if (brokeredProperties.getGroupSequence() != null) { amqpMessage.setGroupSequence(brokeredProperties.getGroupSequence()); } amqpMessage.getProperties().setTo(brokeredMessage.getTo()); amqpMessage.getProperties().setUserId(new Binary(brokeredProperties.getUserId())); if (brokeredProperties.getAbsoluteExpiryTime() != null) { amqpMessage.getProperties().setAbsoluteExpiryTime(Date.from(brokeredProperties.getAbsoluteExpiryTime() .toInstant())); } if (brokeredProperties.getCreationTime() != null) { amqpMessage.getProperties().setCreationTime(Date.from(brokeredProperties.getCreationTime().toInstant())); } amqpMessage.setFooter(new Footer(brokeredMessage.getRawAmqpMessage().getFooter())); AmqpMessageHeader header = brokeredMessage.getRawAmqpMessage().getHeader(); if (header.getDeliveryCount() != null) { amqpMessage.setDeliveryCount(header.getDeliveryCount()); } if (header.getPriority() != null) { amqpMessage.setPriority(header.getPriority()); } if (header.isDurable() != null) { amqpMessage.setDurable(header.isDurable()); } if (header.isFirstAcquirer() != null) { amqpMessage.setFirstAcquirer(header.isFirstAcquirer()); } if (header.getTimeToLive() != null) { amqpMessage.setTtl(header.getTimeToLive().toMillis()); } final Map<Symbol, Object> messageAnnotationsMap = new HashMap<>(); if (brokeredMessage.getScheduledEnqueueTime() != null) { messageAnnotationsMap.put(Symbol.valueOf(SCHEDULED_ENQUEUE_UTC_TIME_NAME.getValue()), Date.from(brokeredMessage.getScheduledEnqueueTime().toInstant())); } final String partitionKey = brokeredMessage.getPartitionKey(); if (partitionKey != null && !partitionKey.isEmpty()) { messageAnnotationsMap.put(Symbol.valueOf(PARTITION_KEY_ANNOTATION_NAME.getValue()), brokeredMessage.getPartitionKey()); } amqpMessage.setMessageAnnotations(new MessageAnnotations(messageAnnotationsMap)); final Map<Symbol, Object> deliveryAnnotationsMap = new HashMap<>(); final Map<String, Object> deliveryAnnotations = brokeredMessage.getRawAmqpMessage() .getDeliveryAnnotations(); for (Map.Entry<String, Object> deliveryEntry : deliveryAnnotations.entrySet()) { deliveryAnnotationsMap.put(Symbol.valueOf(deliveryEntry.getKey()), deliveryEntry.getValue()); } amqpMessage.setDeliveryAnnotations(new DeliveryAnnotations(deliveryAnnotationsMap)); return amqpMessage; } /** * Convert specific type to described type for sending on the wire. * @param propertiesValue application properties set by user which may contain specific type. * @return Map only contains primitive type and described type. */ private static Map<String, Object> convertToDescribedType(Map<String, Object> propertiesValue) { for (Map.Entry<String, Object> entry : propertiesValue.entrySet()) { Object value = entry.getValue(); if (value instanceof URI) { entry.setValue(new UriDescribedType((URI) value)); } else if (value instanceof OffsetDateTime) { entry.setValue(new OffsetDateTimeDescribedType((OffsetDateTime) value)); } else if (value instanceof Duration) { entry.setValue(new DurationDescribedType((Duration) value)); } } return propertiesValue; } /** * Convert described type to origin type. * @param propertiesValue application properties from amqp message may contain described type. * @return Map without described type. */ private static Map<String, Object> convertToOriginType(Map<String, Object> propertiesValue) { for (Map.Entry<String, Object> entry : propertiesValue.entrySet()) { Object value = entry.getValue(); if (value instanceof DescribedType) { entry.setValue(MessageUtils.describedToOrigin((DescribedType) value)); } } return propertiesValue; } @SuppressWarnings("unchecked") @Override public <T> T deserialize(Message message, Class<T> clazz) { Objects.requireNonNull(message, "'message' cannot be null."); Objects.requireNonNull(clazz, "'clazz' cannot be null."); if (clazz == ServiceBusReceivedMessage.class) { return (T) deserializeMessage(message); } else { throw logger.logExceptionAsError(new IllegalArgumentException( String.format(Messages.CLASS_NOT_A_SUPPORTED_TYPE, clazz))); } } @SuppressWarnings("unchecked") @Override public <T> List<T> deserializeList(Message message, Class<T> clazz) { if (clazz == ServiceBusReceivedMessage.class) { return (List<T>) deserializeListOfMessages(message); } else if (clazz == OffsetDateTime.class) { return (List<T>) deserializeListOfOffsetDateTime(message); } else if (clazz == OffsetDateTime.class) { return (List<T>) deserializeListOfOffsetDateTime(message); } else if (clazz == Long.class) { return (List<T>) deserializeListOfLong(message); } else { throw logger.logExceptionAsError(new IllegalArgumentException( String.format(Messages.CLASS_NOT_A_SUPPORTED_TYPE, clazz))); } } private List<Long> deserializeListOfLong(Message amqpMessage) { if (amqpMessage.getBody() instanceof AmqpValue) { AmqpValue amqpValue = ((AmqpValue) amqpMessage.getBody()); if (amqpValue.getValue() instanceof Map) { @SuppressWarnings("unchecked") Map<String, Object> responseBody = (Map<String, Object>) amqpValue.getValue(); Object expirationListObj = responseBody.get(ManagementConstants.SEQUENCE_NUMBERS); if (expirationListObj instanceof long[]) { return Arrays.stream((long[]) expirationListObj) .boxed() .collect(Collectors.toList()); } } } return Collections.emptyList(); } private List<OffsetDateTime> deserializeListOfOffsetDateTime(Message amqpMessage) { if (amqpMessage.getBody() instanceof AmqpValue) { AmqpValue amqpValue = ((AmqpValue) amqpMessage.getBody()); if (amqpValue.getValue() instanceof Map) { @SuppressWarnings("unchecked") Map<String, Object> responseBody = (Map<String, Object>) amqpValue.getValue(); Object expirationListObj = responseBody.get(ManagementConstants.EXPIRATIONS); if (expirationListObj instanceof Date[]) { return Arrays.stream((Date[]) expirationListObj) .map(date -> date.toInstant().atOffset(ZoneOffset.UTC)) .collect(Collectors.toList()); } } } return Collections.emptyList(); } @SuppressWarnings("rawtypes") private List<ServiceBusReceivedMessage> deserializeListOfMessages(Message amqpMessage) { final List<ServiceBusReceivedMessage> messageList = new ArrayList<>(); final AmqpResponseCode statusCode = RequestResponseUtils.getStatusCode(amqpMessage); if (statusCode != AmqpResponseCode.OK) { logger.warning("AMQP response did not contain OK status code. Actual: {}", statusCode); return Collections.emptyList(); } final Object responseBodyMap = ((AmqpValue) amqpMessage.getBody()).getValue(); if (responseBodyMap == null) { logger.warning("AMQP response did not contain a body."); return Collections.emptyList(); } else if (!(responseBodyMap instanceof Map)) { logger.warning("AMQP response body is not correct instance. Expected: {}. Actual: {}", Map.class, responseBodyMap.getClass()); return Collections.emptyList(); } final Object messages = ((Map) responseBodyMap).get(ManagementConstants.MESSAGES); if (messages == null) { logger.warning("Response body did not contain key: {}", ManagementConstants.MESSAGES); return Collections.emptyList(); } else if (!(messages instanceof Iterable)) { logger.warning("Response body contents is not the correct type. Expected: {}. Actual: {}", Iterable.class, messages.getClass()); return Collections.emptyList(); } for (Object message : (Iterable) messages) { if (!(message instanceof Map)) { logger.warning("Message inside iterable of message is not correct type. Expected: {}. Actual: {}", Map.class, message.getClass()); continue; } final Message responseMessage = Message.Factory.create(); final Binary messagePayLoad = (Binary) ((Map) message).get(ManagementConstants.MESSAGE); responseMessage.decode(messagePayLoad.getArray(), messagePayLoad.getArrayOffset(), messagePayLoad.getLength()); final ServiceBusReceivedMessage receivedMessage = deserializeMessage(responseMessage); if (((Map) message).containsKey(ManagementConstants.LOCK_TOKEN_KEY)) { receivedMessage.setLockToken((UUID) ((Map) message).get(ManagementConstants.LOCK_TOKEN_KEY)); } messageList.add(receivedMessage); } return messageList; } private ServiceBusReceivedMessage deserializeMessage(Message amqpMessage) { final Section body = amqpMessage.getBody(); AmqpMessageBody amqpMessageBody; if (body != null) { if (body instanceof Data) { final Binary messageData = ((Data) body).getValue(); amqpMessageBody = AmqpMessageBody.fromData(messageData.getArray()); } else if (body instanceof AmqpValue) { amqpMessageBody = AmqpMessageBody.fromValue(((AmqpValue) body).getValue()); } else if (body instanceof AmqpSequence) { @SuppressWarnings("unchecked") List<Object> messageData = ((AmqpSequence) body).getValue(); amqpMessageBody = AmqpMessageBody.fromSequence(messageData); } else { logger.warning(String.format(Messages.MESSAGE_NOT_OF_TYPE, body.getType())); amqpMessageBody = AmqpMessageBody.fromData(EMPTY_BYTE_ARRAY); } } else { logger.warning(String.format(Messages.MESSAGE_NOT_OF_TYPE, "null")); amqpMessageBody = AmqpMessageBody.fromData(EMPTY_BYTE_ARRAY); } final ServiceBusReceivedMessage brokeredMessage = new ServiceBusReceivedMessage(amqpMessageBody); AmqpAnnotatedMessage brokeredAmqpAnnotatedMessage = brokeredMessage.getRawAmqpMessage(); ApplicationProperties applicationProperties = amqpMessage.getApplicationProperties(); if (applicationProperties != null) { final Map<String, Object> propertiesValue = convertToOriginType(applicationProperties.getValue()); brokeredAmqpAnnotatedMessage.getApplicationProperties().putAll(propertiesValue); } final AmqpMessageHeader brokeredHeader = brokeredAmqpAnnotatedMessage.getHeader(); brokeredHeader.setTimeToLive(Duration.ofMillis(amqpMessage.getTtl())); brokeredHeader.setDeliveryCount(amqpMessage.getDeliveryCount()); brokeredHeader.setDurable(amqpMessage.getHeader().getDurable()); brokeredHeader.setFirstAcquirer(amqpMessage.getHeader().getFirstAcquirer()); brokeredHeader.setPriority(amqpMessage.getPriority()); final Footer footer = amqpMessage.getFooter(); if (footer != null && footer.getValue() != null) { @SuppressWarnings("unchecked") final Map<Symbol, Object> footerValue = footer.getValue(); setValues(footerValue, brokeredAmqpAnnotatedMessage.getFooter()); } final AmqpMessageProperties brokeredProperties = brokeredAmqpAnnotatedMessage.getProperties(); brokeredProperties.setReplyToGroupId(amqpMessage.getReplyToGroupId()); final String replyTo = amqpMessage.getReplyTo(); if (replyTo != null) { brokeredProperties.setReplyTo(new AmqpAddress(amqpMessage.getReplyTo())); } final Object messageId = amqpMessage.getMessageId(); if (messageId != null) { brokeredProperties.setMessageId(new AmqpMessageId(messageId.toString())); } brokeredProperties.setContentType(amqpMessage.getContentType()); final Object correlationId = amqpMessage.getCorrelationId(); if (correlationId != null) { brokeredProperties.setCorrelationId(new AmqpMessageId(correlationId.toString())); } final Properties amqpProperties = amqpMessage.getProperties(); if (amqpProperties != null) { final String to = amqpProperties.getTo(); if (to != null) { brokeredProperties.setTo(new AmqpAddress(amqpProperties.getTo())); } if (amqpProperties.getAbsoluteExpiryTime() != null) { brokeredProperties.setAbsoluteExpiryTime(amqpProperties.getAbsoluteExpiryTime().toInstant() .atOffset(ZoneOffset.UTC)); } if (amqpProperties.getCreationTime() != null) { brokeredProperties.setCreationTime(amqpProperties.getCreationTime().toInstant() .atOffset(ZoneOffset.UTC)); } } brokeredProperties.setSubject(amqpMessage.getSubject()); brokeredProperties.setGroupId(amqpMessage.getGroupId()); brokeredProperties.setContentEncoding(amqpMessage.getContentEncoding()); brokeredProperties.setGroupSequence(amqpMessage.getGroupSequence()); brokeredProperties.setUserId(amqpMessage.getUserId()); final DeliveryAnnotations deliveryAnnotations = amqpMessage.getDeliveryAnnotations(); if (deliveryAnnotations != null) { setValues(deliveryAnnotations.getValue(), brokeredAmqpAnnotatedMessage.getDeliveryAnnotations()); } final MessageAnnotations messageAnnotations = amqpMessage.getMessageAnnotations(); if (messageAnnotations != null) { setValues(messageAnnotations.getValue(), brokeredAmqpAnnotatedMessage.getMessageAnnotations()); } if (amqpMessage instanceof MessageWithLockToken) { brokeredMessage.setLockToken(((MessageWithLockToken) amqpMessage).getLockToken()); } return brokeredMessage; } private static int getPayloadSize(Message msg) { if (msg == null || msg.getBody() == null) { return 0; } final Section bodySection = msg.getBody(); if (bodySection instanceof AmqpValue) { return sizeof(((AmqpValue) bodySection).getValue()); } else if (bodySection instanceof AmqpSequence) { return sizeof(((AmqpSequence) bodySection).getValue()); } else if (bodySection instanceof Data) { final Data payloadSection = (Data) bodySection; final Binary payloadBytes = payloadSection.getValue(); return sizeof(payloadBytes); } else { return 0; } } private void setValues(Map<Symbol, Object> sourceMap, Map<String, Object> targetMap) { if (sourceMap != null) { for (Map.Entry<Symbol, Object> entry : sourceMap.entrySet()) { targetMap.put(entry.getKey().toString(), entry.getValue()); } } } @SuppressWarnings("rawtypes") }
Emit shutdown signal after CBS closing function returns, instead of waiting CBS completely closed. Therefore, the ReactorReceiver(s) can be closed in parallel with CBS node.
Mono<Void> closeAsync(AmqpShutdownSignal shutdownSignal) { final Mono<Void> emitShutdownSignalOperation = Mono.fromRunnable(() -> { addShutdownSignal(logger.atInfo(), shutdownSignal).log("Disposing of ReactorConnection."); final Sinks.EmitResult result = shutdownSignalSink.tryEmitValue(shutdownSignal); if (result.isFailure()) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(EMIT_RESULT_KEY, result) .log("Unable to emit shutdown signal."); } }); final Mono<Void> cbsCloseOperation; if (cbsChannelProcessor != null) { cbsCloseOperation = cbsChannelProcessor.flatMap(channel -> channel.closeAsync()); } else { cbsCloseOperation = Mono.empty(); } final Mono<Void> managementNodeCloseOperations = Mono.when( Flux.fromStream(managementNodes.values().stream()).flatMap(node -> node.closeAsync())); final Mono<Void> closeReactor = Mono.fromRunnable(() -> { logger.verbose("Scheduling closeConnection work."); final ReactorDispatcher dispatcher = reactorProvider.getReactorDispatcher(); if (dispatcher != null) { try { dispatcher.invoke(() -> closeConnectionWork()); } catch (IOException e) { logger.warning("IOException while scheduling closeConnection work. Manually disposing.", e); closeConnectionWork(); } catch (RejectedExecutionException e) { logger.info("Could not schedule closeConnection work. Manually disposing."); closeConnectionWork(); } } else { closeConnectionWork(); } }); return Mono.whenDelayError( cbsCloseOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed CBS node.")), managementNodeCloseOperations.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed management nodes.")), emitShutdownSignalOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Emitted connection shutdown signal. "))) .then(closeReactor.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed reactor dispatcher."))) .then(isClosedMono.asMono()); }
emitShutdownSignalOperation.doFinally(signalType ->
Mono<Void> closeAsync(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal).log("Disposing of ReactorConnection."); final Sinks.EmitResult result = shutdownSignalSink.tryEmitValue(shutdownSignal); if (result.isFailure()) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(EMIT_RESULT_KEY, result) .log("Unable to emit shutdown signal."); } final Mono<Void> cbsCloseOperation; if (cbsChannelProcessor != null) { cbsCloseOperation = cbsChannelProcessor.flatMap(channel -> channel.closeAsync()); } else { cbsCloseOperation = Mono.empty(); } final Mono<Void> managementNodeCloseOperations = Mono.when( Flux.fromStream(managementNodes.values().stream()).flatMap(node -> node.closeAsync())); final Mono<Void> closeReactor = Mono.fromRunnable(() -> { logger.verbose("Scheduling closeConnection work."); final ReactorDispatcher dispatcher = reactorProvider.getReactorDispatcher(); if (dispatcher != null) { try { dispatcher.invoke(() -> closeConnectionWork()); } catch (IOException e) { logger.warning("IOException while scheduling closeConnection work. Manually disposing.", e); closeConnectionWork(); } catch (RejectedExecutionException e) { logger.info("Could not schedule closeConnection work. Manually disposing."); closeConnectionWork(); } } else { closeConnectionWork(); } }); return Mono.whenDelayError( cbsCloseOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed CBS node.")), managementNodeCloseOperations.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed management nodes."))) .then(closeReactor.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed reactor dispatcher."))) .then(isClosedMono.asMono()); }
class ReactorConnection implements AmqpConnection { private static final String CBS_SESSION_NAME = "cbs-session"; private static final String CBS_ADDRESS = "$cbs"; private static final String CBS_LINK_NAME = "cbs"; private static final String MANAGEMENT_SESSION_NAME = "mgmt-session"; private static final String MANAGEMENT_ADDRESS = "$management"; private static final String MANAGEMENT_LINK_NAME = "mgmt"; private final ClientLogger logger; private final ConcurrentMap<String, SessionSubscription> sessionMap = new ConcurrentHashMap<>(); private final ConcurrentHashMap<String, AmqpManagementNode> managementNodes = new ConcurrentHashMap<>(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.One<AmqpShutdownSignal> shutdownSignalSink = Sinks.one(); private final Flux<AmqpEndpointState> endpointStates; private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final String connectionId; private final Mono<Connection> connectionMono; private final ConnectionHandler handler; private final ReactorHandlerProvider handlerProvider; private final TokenManagerProvider tokenManagerProvider; private final MessageSerializer messageSerializer; private final ConnectionOptions connectionOptions; private final ReactorProvider reactorProvider; private final AmqpRetryPolicy retryPolicy; private final SenderSettleMode senderSettleMode; private final ReceiverSettleMode receiverSettleMode; private final Duration operationTimeout; private final Composite subscriptions; private ReactorExecutor executor; private volatile ClaimsBasedSecurityChannel cbsChannel; private volatile AmqpChannelProcessor<RequestResponseChannel> cbsChannelProcessor; private volatile Connection connection; /** * Creates a new AMQP connection that uses proton-j. * * @param connectionId Identifier for the connection. * @param connectionOptions A set of options used to create the AMQP connection. * @param reactorProvider Provides proton-j Reactor instances. * @param handlerProvider Provides {@link BaseHandler} to listen to proton-j reactor events. * @param tokenManagerProvider Provides the appropriate token manager to authorize with CBS node. * @param messageSerializer Serializer to translate objects to and from proton-j {@link Message messages}. * @param senderSettleMode to set as {@link SenderSettleMode} on sender. * @param receiverSettleMode to set as {@link ReceiverSettleMode} on receiver. */ public ReactorConnection(String connectionId, ConnectionOptions connectionOptions, ReactorProvider reactorProvider, ReactorHandlerProvider handlerProvider, TokenManagerProvider tokenManagerProvider, MessageSerializer messageSerializer, SenderSettleMode senderSettleMode, ReceiverSettleMode receiverSettleMode) { this.connectionOptions = connectionOptions; this.reactorProvider = reactorProvider; this.connectionId = connectionId; this.logger = new ClientLogger(ReactorConnection.class, createContextWithConnectionId(connectionId)); this.handlerProvider = handlerProvider; this.tokenManagerProvider = Objects.requireNonNull(tokenManagerProvider, "'tokenManagerProvider' cannot be null."); this.messageSerializer = messageSerializer; this.handler = handlerProvider.createConnectionHandler(connectionId, connectionOptions); this.retryPolicy = RetryUtil.getRetryPolicy(connectionOptions.getRetry()); this.operationTimeout = connectionOptions.getRetry().getTryTimeout(); this.senderSettleMode = senderSettleMode; this.receiverSettleMode = receiverSettleMode; this.connectionMono = Mono.fromCallable(this::getOrCreateConnection) .flatMap(reactorConnection -> { final Mono<AmqpEndpointState> activeEndpoint = getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(operationTimeout, Mono.error(() -> new AmqpException(true, String.format( "Connection '%s' not opened within AmqpRetryOptions.tryTimeout(): %s", connectionId, operationTimeout), handler.getErrorContext()))); return activeEndpoint.thenReturn(reactorConnection); }) .doOnError(error -> { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already disposed: Error occurred while connection was starting.", error); } else { closeAsync(new AmqpShutdownSignal(false, false, "Error occurred while connection was starting. Error: " + error)).subscribe(); } }); this.endpointStates = this.handler.getEndpointStates() .takeUntilOther(shutdownSignalSink.asMono()) .map(state -> { logger.verbose("State {}", state); return AmqpEndpointStateUtil.getConnectionState(state); }) .onErrorResume(error -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to error."); return closeAsync(new AmqpShutdownSignal(false, false, error.getMessage())).then(Mono.error(error)); } else { return Mono.error(error); } }) .doOnComplete(() -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to connection close."); closeAsync(new AmqpShutdownSignal(false, false, "Connection handler closed.")).subscribe(); } }) .cache(1); this.subscriptions = Disposables.composite(this.endpointStates.subscribe()); } /** * {@inheritDoc} */ @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } /** * Gets the shutdown signal associated with this connection. When it emits, the underlying connection is closed. * * @return Shutdown signals associated with this connection. It emits a signal when the underlying connection is * closed. */ @Override public Flux<AmqpShutdownSignal> getShutdownSignals() { return shutdownSignalSink.asMono().cache().flux(); } @Override public Mono<AmqpManagementNode> getManagementNode(String entityPath) { return Mono.defer(() -> { if (isDisposed()) { return Mono.error(logger.atError() .addKeyValue(ENTITY_PATH_KEY, entityPath) .log(Exceptions.propagate(new IllegalStateException("Connection is disposed. Cannot get management instance.")))); } final AmqpManagementNode existing = managementNodes.get(entityPath); if (existing != null) { return Mono.just(existing); } final TokenManager tokenManager = new AzureTokenManagerProvider(connectionOptions.getAuthorizationType(), connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getAuthorizationScope()) .getTokenManager(getClaimsBasedSecurityNode(), entityPath); return tokenManager.authorize().thenReturn(managementNodes.compute(entityPath, (key, current) -> { if (current != null) { logger.info("A management node exists already, returning it."); tokenManager.close(); return current; } final String sessionName = entityPath + "-" + MANAGEMENT_SESSION_NAME; final String linkName = entityPath + "-" + MANAGEMENT_LINK_NAME; final String address = entityPath + "/" + MANAGEMENT_ADDRESS; logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .addKeyValue("address", address) .log("Creating management node."); final AmqpChannelProcessor<RequestResponseChannel> requestResponseChannel = createRequestResponseChannel(sessionName, linkName, address); return new ManagementChannel(requestResponseChannel, getFullyQualifiedNamespace(), entityPath, tokenManager); })); }); } /** * {@inheritDoc} */ @Override public Mono<ClaimsBasedSecurityNode> getClaimsBasedSecurityNode() { return connectionMono.then(Mono.fromCallable(() -> getOrCreateCBSNode())); } @Override public String getId() { return connectionId; } /** * {@inheritDoc} */ @Override public String getFullyQualifiedNamespace() { return handler.getHostname(); } /** * {@inheritDoc} */ @Override public int getMaxFrameSize() { return handler.getMaxFrameSize(); } /** * {@inheritDoc} */ @Override public Map<String, Object> getConnectionProperties() { return handler.getConnectionProperties(); } /** * {@inheritDoc} */ @Override public Mono<AmqpSession> createSession(String sessionName) { return connectionMono.map(connection -> { final SessionSubscription sessionSubscription = sessionMap.computeIfAbsent(sessionName, key -> { final SessionHandler sessionHandler = handlerProvider.createSessionHandler(connectionId, getFullyQualifiedNamespace(), key, connectionOptions.getRetry().getTryTimeout()); final Session session = connection.session(); BaseHandler.setHandler(session, sessionHandler); final AmqpSession amqpSession = createSession(key, session, sessionHandler); final Disposable subscription = amqpSession.getEndpointStates() .subscribe(state -> { }, error -> { if (isDisposed.get()) { return; } logger.atInfo() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Error occurred. Removing and disposing session", error); removeSession(key); }, () -> { if (isDisposed.get()) { return; } logger.atVerbose() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Complete. Removing and disposing session."); removeSession(key); }); return new SessionSubscription(amqpSession, subscription); }); return sessionSubscription; }).flatMap(sessionSubscription -> { final Mono<AmqpEndpointState> activeSession = sessionSubscription.getSession().getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(retryPolicy.getRetryOptions().getTryTimeout(), Mono.error(() -> new AmqpException(true, String.format("connectionId[%s] sessionName[%s] Timeout waiting for session to be active.", connectionId, sessionName), handler.getErrorContext()))); return activeSession.thenReturn(sessionSubscription.getSession()); }); } /** * Creates a new AMQP session with the given parameters. * * @param sessionName Name of the AMQP session. * @param session The reactor session associated with this session. * @param handler Session handler for the reactor session. * * @return A new instance of AMQP session. */ protected AmqpSession createSession(String sessionName, Session session, SessionHandler handler) { return new ReactorSession(this, session, handler, sessionName, reactorProvider, handlerProvider, getClaimsBasedSecurityNode(), tokenManagerProvider, messageSerializer, connectionOptions.getRetry()); } /** * {@inheritDoc} */ @Override public boolean removeSession(String sessionName) { if (sessionName == null) { return false; } final SessionSubscription removed = sessionMap.remove(sessionName); if (removed != null) { removed.dispose(); } return removed != null; } @Override public boolean isDisposed() { return isDisposed.get(); } /** * {@inheritDoc} */ @Override public void dispose() { final Duration timeout = operationTimeout.plus(operationTimeout); closeAsync().block(timeout); } /** * Gets the active AMQP connection for this instance. * * @return The AMQP connection. * * @throws AmqpException if the {@link Connection} was not transitioned to an active state within the given * {@link AmqpRetryOptions */ protected Mono<Connection> getReactorConnection() { return connectionMono; } /** * Creates a bidirectional link between the message broker and the client. * * @param sessionName Name of the session. * @param linkName Name of the link. * @param entityPath Address to the message broker. * * @return A new {@link RequestResponseChannel} to communicate with the message broker. */ protected AmqpChannelProcessor<RequestResponseChannel> createRequestResponseChannel(String sessionName, String linkName, String entityPath) { Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); final Flux<RequestResponseChannel> createChannel = createSession(sessionName) .cast(ReactorSession.class) .map(reactorSession -> new RequestResponseChannel(this, getId(), getFullyQualifiedNamespace(), linkName, entityPath, reactorSession.session(), connectionOptions.getRetry(), handlerProvider, reactorProvider, messageSerializer, senderSettleMode, receiverSettleMode)) .doOnNext(e -> { logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .log("Emitting new response channel."); }) .repeat() .takeUntilOther(shutdownSignalSink.asMono()); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(ENTITY_PATH_KEY, entityPath); return createChannel .subscribeWith(new AmqpChannelProcessor<>(getFullyQualifiedNamespace(), channel -> channel.getEndpointStates(), retryPolicy, loggingContext)); } @Override public Mono<Void> closeAsync() { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already closed. Not disposing again."); return isClosedMono.asMono(); } return closeAsync(new AmqpShutdownSignal(false, true, "Disposed by client.")); } private synchronized void closeConnectionWork() { if (connection == null) { isClosedMono.emitEmpty((signalType, emitResult) -> { addSignalTypeAndResult(logger.atInfo(), signalType, emitResult) .log("Unable to complete closeMono."); return false; }); return; } connection.close(); handler.close(); final ArrayList<Mono<Void>> closingSessions = new ArrayList<>(); sessionMap.values().forEach(link -> closingSessions.add(link.isClosed())); final Mono<Void> closedExecutor = executor != null ? Mono.defer(() -> { synchronized (this) { logger.info("Closing executor."); return executor.closeAsync(); } }) : Mono.empty(); final Mono<Void> closeSessionAndExecutorMono = Mono.when(closingSessions) .timeout(operationTimeout) .onErrorResume(error -> { logger.info("Timed out waiting for all sessions to close."); return Mono.empty(); }) .then(closedExecutor) .then(Mono.fromRunnable(() -> { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result) .log("Unable to emit connection closed signal."); return false; }); subscriptions.dispose(); })); subscriptions.add(closeSessionAndExecutorMono.subscribe()); } private synchronized ClaimsBasedSecurityNode getOrCreateCBSNode() { if (cbsChannel == null) { logger.info("Setting CBS channel."); cbsChannelProcessor = createRequestResponseChannel(CBS_SESSION_NAME, CBS_LINK_NAME, CBS_ADDRESS); cbsChannel = new ClaimsBasedSecurityChannel( cbsChannelProcessor, connectionOptions.getTokenCredential(), connectionOptions.getAuthorizationType(), connectionOptions.getRetry()); } return cbsChannel; } private synchronized Connection getOrCreateConnection() throws IOException { if (connection == null) { logger.atInfo() .addKeyValue(HOSTNAME_KEY, handler.getHostname()) .addKeyValue("port", handler.getProtocolPort()) .log("Creating and starting connection."); final Reactor reactor = reactorProvider.createReactor(connectionId, handler.getMaxFrameSize()); connection = reactor.connectionToHost(handler.getHostname(), handler.getProtocolPort(), handler); final ReactorExceptionHandler reactorExceptionHandler = new ReactorExceptionHandler(); final Duration timeoutDivided = connectionOptions.getRetry().getTryTimeout().dividedBy(2); final Duration pendingTasksDuration = ClientConstants.SERVER_BUSY_WAIT_TIME.compareTo(timeoutDivided) < 0 ? ClientConstants.SERVER_BUSY_WAIT_TIME : timeoutDivided; final Scheduler scheduler = Schedulers.newSingle("reactor-executor"); executor = new ReactorExecutor(reactor, scheduler, connectionId, reactorExceptionHandler, pendingTasksDuration, connectionOptions.getFullyQualifiedNamespace()); final Mono<Void> executorCloseMono = Mono.defer(() -> { synchronized (this) { return executor.closeAsync(); } }); reactorProvider.getReactorDispatcher().getShutdownSignal() .flatMap(signal -> { reactorExceptionHandler.onConnectionShutdown(signal); return executorCloseMono; }) .onErrorResume(error -> { reactorExceptionHandler.onConnectionError(error); return executorCloseMono; }) .subscribe(); executor.start(); } return connection; } private final class ReactorExceptionHandler extends AmqpExceptionHandler { private ReactorExceptionHandler() { super(); } @Override public void onConnectionError(Throwable exception) { logger.atInfo() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionError, Starting new reactor", exception); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onReactorError: Disposing."); closeAsync(new AmqpShutdownSignal(false, false, "onReactorError: " + exception.toString())) .subscribe(); } } @Override void onConnectionShutdown(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown. Shutting down."); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown: disposing."); closeAsync(shutdownSignal).subscribe(); } } } private static final class SessionSubscription { private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AmqpSession session; private final Disposable subscription; private SessionSubscription(AmqpSession session, Disposable subscription) { this.session = session; this.subscription = subscription; } private AmqpSession getSession() { return session; } private void dispose() { if (isDisposed.getAndSet(true)) { return; } if (session instanceof ReactorSession) { ((ReactorSession) session).closeAsync("Closing session.", null, true) .subscribe(); } else { session.dispose(); } subscription.dispose(); } private Mono<Void> isClosed() { if (session instanceof ReactorSession) { return ((ReactorSession) session).isClosed(); } else { return Mono.empty(); } } } }
class ReactorConnection implements AmqpConnection { private static final String CBS_SESSION_NAME = "cbs-session"; private static final String CBS_ADDRESS = "$cbs"; private static final String CBS_LINK_NAME = "cbs"; private static final String MANAGEMENT_SESSION_NAME = "mgmt-session"; private static final String MANAGEMENT_ADDRESS = "$management"; private static final String MANAGEMENT_LINK_NAME = "mgmt"; private final ClientLogger logger; private final ConcurrentMap<String, SessionSubscription> sessionMap = new ConcurrentHashMap<>(); private final ConcurrentHashMap<String, AmqpManagementNode> managementNodes = new ConcurrentHashMap<>(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.One<AmqpShutdownSignal> shutdownSignalSink = Sinks.one(); private final Flux<AmqpEndpointState> endpointStates; private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final String connectionId; private final Mono<Connection> connectionMono; private final ConnectionHandler handler; private final ReactorHandlerProvider handlerProvider; private final TokenManagerProvider tokenManagerProvider; private final MessageSerializer messageSerializer; private final ConnectionOptions connectionOptions; private final ReactorProvider reactorProvider; private final AmqpRetryPolicy retryPolicy; private final SenderSettleMode senderSettleMode; private final ReceiverSettleMode receiverSettleMode; private final Duration operationTimeout; private final Composite subscriptions; private ReactorExecutor executor; private volatile ClaimsBasedSecurityChannel cbsChannel; private volatile AmqpChannelProcessor<RequestResponseChannel> cbsChannelProcessor; private volatile Connection connection; /** * Creates a new AMQP connection that uses proton-j. * * @param connectionId Identifier for the connection. * @param connectionOptions A set of options used to create the AMQP connection. * @param reactorProvider Provides proton-j Reactor instances. * @param handlerProvider Provides {@link BaseHandler} to listen to proton-j reactor events. * @param tokenManagerProvider Provides the appropriate token manager to authorize with CBS node. * @param messageSerializer Serializer to translate objects to and from proton-j {@link Message messages}. * @param senderSettleMode to set as {@link SenderSettleMode} on sender. * @param receiverSettleMode to set as {@link ReceiverSettleMode} on receiver. */ public ReactorConnection(String connectionId, ConnectionOptions connectionOptions, ReactorProvider reactorProvider, ReactorHandlerProvider handlerProvider, TokenManagerProvider tokenManagerProvider, MessageSerializer messageSerializer, SenderSettleMode senderSettleMode, ReceiverSettleMode receiverSettleMode) { this.connectionOptions = connectionOptions; this.reactorProvider = reactorProvider; this.connectionId = connectionId; this.logger = new ClientLogger(ReactorConnection.class, createContextWithConnectionId(connectionId)); this.handlerProvider = handlerProvider; this.tokenManagerProvider = Objects.requireNonNull(tokenManagerProvider, "'tokenManagerProvider' cannot be null."); this.messageSerializer = messageSerializer; this.handler = handlerProvider.createConnectionHandler(connectionId, connectionOptions); this.retryPolicy = RetryUtil.getRetryPolicy(connectionOptions.getRetry()); this.operationTimeout = connectionOptions.getRetry().getTryTimeout(); this.senderSettleMode = senderSettleMode; this.receiverSettleMode = receiverSettleMode; this.connectionMono = Mono.fromCallable(this::getOrCreateConnection) .flatMap(reactorConnection -> { final Mono<AmqpEndpointState> activeEndpoint = getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(operationTimeout, Mono.error(() -> new AmqpException(true, String.format( "Connection '%s' not opened within AmqpRetryOptions.tryTimeout(): %s", connectionId, operationTimeout), handler.getErrorContext()))); return activeEndpoint.thenReturn(reactorConnection); }) .doOnError(error -> { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already disposed: Error occurred while connection was starting.", error); } else { closeAsync(new AmqpShutdownSignal(false, false, "Error occurred while connection was starting. Error: " + error)).subscribe(); } }); this.endpointStates = this.handler.getEndpointStates() .takeUntilOther(shutdownSignalSink.asMono()) .map(state -> { logger.verbose("State {}", state); return AmqpEndpointStateUtil.getConnectionState(state); }) .onErrorResume(error -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to error."); return closeAsync(new AmqpShutdownSignal(false, false, error.getMessage())).then(Mono.error(error)); } else { return Mono.error(error); } }) .doOnComplete(() -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to connection close."); closeAsync(new AmqpShutdownSignal(false, false, "Connection handler closed.")).subscribe(); } }) .cache(1); this.subscriptions = Disposables.composite(this.endpointStates.subscribe()); } /** * {@inheritDoc} */ @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } /** * Gets the shutdown signal associated with this connection. When it emits, the underlying connection is closed. * * @return Shutdown signals associated with this connection. It emits a signal when the underlying connection is * closed. */ @Override public Flux<AmqpShutdownSignal> getShutdownSignals() { return shutdownSignalSink.asMono().cache().flux(); } @Override public Mono<AmqpManagementNode> getManagementNode(String entityPath) { return Mono.defer(() -> { if (isDisposed()) { return Mono.error(logger.atError() .addKeyValue(ENTITY_PATH_KEY, entityPath) .log(Exceptions.propagate(new IllegalStateException("Connection is disposed. Cannot get management instance.")))); } final AmqpManagementNode existing = managementNodes.get(entityPath); if (existing != null) { return Mono.just(existing); } final TokenManager tokenManager = new AzureTokenManagerProvider(connectionOptions.getAuthorizationType(), connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getAuthorizationScope()) .getTokenManager(getClaimsBasedSecurityNode(), entityPath); return tokenManager.authorize().thenReturn(managementNodes.compute(entityPath, (key, current) -> { if (current != null) { logger.info("A management node exists already, returning it."); tokenManager.close(); return current; } final String sessionName = entityPath + "-" + MANAGEMENT_SESSION_NAME; final String linkName = entityPath + "-" + MANAGEMENT_LINK_NAME; final String address = entityPath + "/" + MANAGEMENT_ADDRESS; logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .addKeyValue("address", address) .log("Creating management node."); final AmqpChannelProcessor<RequestResponseChannel> requestResponseChannel = createRequestResponseChannel(sessionName, linkName, address); return new ManagementChannel(requestResponseChannel, getFullyQualifiedNamespace(), entityPath, tokenManager); })); }); } /** * {@inheritDoc} */ @Override public Mono<ClaimsBasedSecurityNode> getClaimsBasedSecurityNode() { return connectionMono.then(Mono.fromCallable(() -> getOrCreateCBSNode())); } @Override public String getId() { return connectionId; } /** * {@inheritDoc} */ @Override public String getFullyQualifiedNamespace() { return handler.getHostname(); } /** * {@inheritDoc} */ @Override public int getMaxFrameSize() { return handler.getMaxFrameSize(); } /** * {@inheritDoc} */ @Override public Map<String, Object> getConnectionProperties() { return handler.getConnectionProperties(); } /** * {@inheritDoc} */ @Override public Mono<AmqpSession> createSession(String sessionName) { return connectionMono.map(connection -> { final SessionSubscription sessionSubscription = sessionMap.computeIfAbsent(sessionName, key -> { final SessionHandler sessionHandler = handlerProvider.createSessionHandler(connectionId, getFullyQualifiedNamespace(), key, connectionOptions.getRetry().getTryTimeout()); final Session session = connection.session(); BaseHandler.setHandler(session, sessionHandler); final AmqpSession amqpSession = createSession(key, session, sessionHandler); final Disposable subscription = amqpSession.getEndpointStates() .subscribe(state -> { }, error -> { if (isDisposed.get()) { return; } logger.atInfo() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Error occurred. Removing and disposing session", error); removeSession(key); }, () -> { if (isDisposed.get()) { return; } logger.atVerbose() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Complete. Removing and disposing session."); removeSession(key); }); return new SessionSubscription(amqpSession, subscription); }); return sessionSubscription; }).flatMap(sessionSubscription -> { final Mono<AmqpEndpointState> activeSession = sessionSubscription.getSession().getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(retryPolicy.getRetryOptions().getTryTimeout(), Mono.error(() -> new AmqpException(true, String.format("connectionId[%s] sessionName[%s] Timeout waiting for session to be active.", connectionId, sessionName), handler.getErrorContext()))); return activeSession.thenReturn(sessionSubscription.getSession()); }); } /** * Creates a new AMQP session with the given parameters. * * @param sessionName Name of the AMQP session. * @param session The reactor session associated with this session. * @param handler Session handler for the reactor session. * * @return A new instance of AMQP session. */ protected AmqpSession createSession(String sessionName, Session session, SessionHandler handler) { return new ReactorSession(this, session, handler, sessionName, reactorProvider, handlerProvider, getClaimsBasedSecurityNode(), tokenManagerProvider, messageSerializer, connectionOptions.getRetry()); } /** * {@inheritDoc} */ @Override public boolean removeSession(String sessionName) { if (sessionName == null) { return false; } final SessionSubscription removed = sessionMap.remove(sessionName); if (removed != null) { removed.dispose(); } return removed != null; } @Override public boolean isDisposed() { return isDisposed.get(); } /** * {@inheritDoc} */ @Override public void dispose() { final Duration timeout = operationTimeout.plus(operationTimeout); closeAsync().block(timeout); } /** * Gets the active AMQP connection for this instance. * * @return The AMQP connection. * * @throws AmqpException if the {@link Connection} was not transitioned to an active state within the given * {@link AmqpRetryOptions */ protected Mono<Connection> getReactorConnection() { return connectionMono; } /** * Creates a bidirectional link between the message broker and the client. * * @param sessionName Name of the session. * @param linkName Name of the link. * @param entityPath Address to the message broker. * * @return A new {@link RequestResponseChannel} to communicate with the message broker. */ protected AmqpChannelProcessor<RequestResponseChannel> createRequestResponseChannel(String sessionName, String linkName, String entityPath) { Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); final Flux<RequestResponseChannel> createChannel = createSession(sessionName) .cast(ReactorSession.class) .map(reactorSession -> new RequestResponseChannel(this, getId(), getFullyQualifiedNamespace(), linkName, entityPath, reactorSession.session(), connectionOptions.getRetry(), handlerProvider, reactorProvider, messageSerializer, senderSettleMode, receiverSettleMode)) .doOnNext(e -> { logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .log("Emitting new response channel."); }) .repeat(() -> !this.isDisposed()); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(ENTITY_PATH_KEY, entityPath); return createChannel .subscribeWith(new AmqpChannelProcessor<>(getFullyQualifiedNamespace(), channel -> channel.getEndpointStates(), retryPolicy, loggingContext)); } @Override public Mono<Void> closeAsync() { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already closed. Not disposing again."); return isClosedMono.asMono(); } return closeAsync(new AmqpShutdownSignal(false, true, "Disposed by client.")); } private synchronized void closeConnectionWork() { if (connection == null) { isClosedMono.emitEmpty((signalType, emitResult) -> { addSignalTypeAndResult(logger.atInfo(), signalType, emitResult) .log("Unable to complete closeMono."); return false; }); return; } connection.close(); handler.close(); final ArrayList<Mono<Void>> closingSessions = new ArrayList<>(); sessionMap.values().forEach(link -> closingSessions.add(link.isClosed())); final Mono<Void> closedExecutor = executor != null ? Mono.defer(() -> { synchronized (this) { logger.info("Closing executor."); return executor.closeAsync(); } }) : Mono.empty(); final Mono<Void> closeSessionAndExecutorMono = Mono.when(closingSessions) .timeout(operationTimeout) .onErrorResume(error -> { logger.info("Timed out waiting for all sessions to close."); return Mono.empty(); }) .then(closedExecutor) .then(Mono.fromRunnable(() -> { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result) .log("Unable to emit connection closed signal."); return false; }); subscriptions.dispose(); })); subscriptions.add(closeSessionAndExecutorMono.subscribe()); } private synchronized ClaimsBasedSecurityNode getOrCreateCBSNode() { if (cbsChannel == null) { logger.info("Setting CBS channel."); cbsChannelProcessor = createRequestResponseChannel(CBS_SESSION_NAME, CBS_LINK_NAME, CBS_ADDRESS); cbsChannel = new ClaimsBasedSecurityChannel( cbsChannelProcessor, connectionOptions.getTokenCredential(), connectionOptions.getAuthorizationType(), connectionOptions.getRetry()); } return cbsChannel; } private synchronized Connection getOrCreateConnection() throws IOException { if (connection == null) { logger.atInfo() .addKeyValue(HOSTNAME_KEY, handler.getHostname()) .addKeyValue("port", handler.getProtocolPort()) .log("Creating and starting connection."); final Reactor reactor = reactorProvider.createReactor(connectionId, handler.getMaxFrameSize()); connection = reactor.connectionToHost(handler.getHostname(), handler.getProtocolPort(), handler); final ReactorExceptionHandler reactorExceptionHandler = new ReactorExceptionHandler(); final Duration timeoutDivided = connectionOptions.getRetry().getTryTimeout().dividedBy(2); final Duration pendingTasksDuration = ClientConstants.SERVER_BUSY_WAIT_TIME.compareTo(timeoutDivided) < 0 ? ClientConstants.SERVER_BUSY_WAIT_TIME : timeoutDivided; final Scheduler scheduler = Schedulers.newSingle("reactor-executor"); executor = new ReactorExecutor(reactor, scheduler, connectionId, reactorExceptionHandler, pendingTasksDuration, connectionOptions.getFullyQualifiedNamespace()); final Mono<Void> executorCloseMono = Mono.defer(() -> { synchronized (this) { return executor.closeAsync(); } }); reactorProvider.getReactorDispatcher().getShutdownSignal() .flatMap(signal -> { reactorExceptionHandler.onConnectionShutdown(signal); return executorCloseMono; }) .onErrorResume(error -> { reactorExceptionHandler.onConnectionError(error); return executorCloseMono; }) .subscribe(); executor.start(); } return connection; } private final class ReactorExceptionHandler extends AmqpExceptionHandler { private ReactorExceptionHandler() { super(); } @Override public void onConnectionError(Throwable exception) { logger.atInfo() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionError, Starting new reactor", exception); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onReactorError: Disposing."); closeAsync(new AmqpShutdownSignal(false, false, "onReactorError: " + exception.toString())) .subscribe(); } } @Override void onConnectionShutdown(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown. Shutting down."); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown: disposing."); closeAsync(shutdownSignal).subscribe(); } } } private static final class SessionSubscription { private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AmqpSession session; private final Disposable subscription; private SessionSubscription(AmqpSession session, Disposable subscription) { this.session = session; this.subscription = subscription; } private AmqpSession getSession() { return session; } private void dispose() { if (isDisposed.getAndSet(true)) { return; } if (session instanceof ReactorSession) { ((ReactorSession) session).closeAsync("Closing session.", null, true) .subscribe(); } else { session.dispose(); } subscription.dispose(); } private Mono<Void> isClosed() { if (session instanceof ReactorSession) { return ((ReactorSession) session).isClosed(); } else { return Mono.empty(); } } } }
https://github.com/Azure/azure-sdk-for-java/pull/27146#issuecomment-1042782715
private static int sizeof(Object obj) { if (obj == null) { return 0; } if (obj instanceof String) { return obj.toString().length() << 1; } if (obj instanceof Symbol) { return ((Symbol) obj).length() << 1; } if (obj instanceof Byte || obj instanceof UnsignedByte) { return Byte.BYTES; } if (obj instanceof Integer || obj instanceof UnsignedInteger) { return Integer.BYTES; } if (obj instanceof Long || obj instanceof UnsignedLong || obj instanceof Date) { return Long.BYTES; } if (obj instanceof Short || obj instanceof UnsignedShort) { return Short.BYTES; } if (obj instanceof Boolean) { return 1; } if (obj instanceof Character) { return 4; } if (obj instanceof Float) { return Float.BYTES; } if (obj instanceof Double) { return Double.BYTES; } if (obj instanceof UUID) { return 16; } if (obj instanceof Decimal32) { return 4; } if (obj instanceof Decimal64) { return 8; } if (obj instanceof Decimal128) { return 16; } if (obj instanceof Binary) { return ((Binary) obj).getLength(); } if (obj instanceof Declare) { return 7; } if (obj instanceof Discharge) { Discharge discharge = (Discharge) obj; return 12 + discharge.getTxnId().getLength(); } if (obj instanceof ServiceBusDescribedType) { ServiceBusDescribedType describedType = (ServiceBusDescribedType) obj; int size = ((Symbol) describedType.getDescriptor()).length() << 1; if (describedType.getDescribed() instanceof String) { size += ((String) describedType.getDescribed()).length(); } else if (describedType.getDescribed() instanceof Long) { size += Long.BYTES; } return size; } if (obj instanceof Map) { int size = 8; Map map = (Map) obj; for (Object value : map.keySet()) { size += sizeof(value); } for (Object value : map.values()) { size += sizeof(value); } return size; } if (obj instanceof Iterable) { int size = 8; for (Object innerObject : (Iterable) obj) { size += sizeof(innerObject); } return size; } if (obj.getClass().isArray()) { int size = 8; int length = Array.getLength(obj); for (int i = 0; i < length; i++) { size += sizeof(Array.get(obj, i)); } return size; } throw new IllegalArgumentException(String.format(Locale.US, "Encoding Type: %s is not supported", obj.getClass())); }
}
private static int sizeof(Object obj) { if (obj == null) { return 0; } if (obj instanceof String) { return obj.toString().length() << 1; } if (obj instanceof Symbol) { return ((Symbol) obj).length() << 1; } if (obj instanceof Byte || obj instanceof UnsignedByte) { return Byte.BYTES; } if (obj instanceof Integer || obj instanceof UnsignedInteger) { return Integer.BYTES; } if (obj instanceof Long || obj instanceof UnsignedLong || obj instanceof Date) { return Long.BYTES; } if (obj instanceof Short || obj instanceof UnsignedShort) { return Short.BYTES; } if (obj instanceof Boolean) { return 1; } if (obj instanceof Character) { return 4; } if (obj instanceof Float) { return Float.BYTES; } if (obj instanceof Double) { return Double.BYTES; } if (obj instanceof UUID) { return 16; } if (obj instanceof Decimal32) { return 4; } if (obj instanceof Decimal64) { return 8; } if (obj instanceof Decimal128) { return 16; } if (obj instanceof Binary) { return ((Binary) obj).getLength(); } if (obj instanceof Declare) { return 7; } if (obj instanceof Discharge) { Discharge discharge = (Discharge) obj; return 12 + discharge.getTxnId().getLength(); } if (obj instanceof ServiceBusDescribedType) { ServiceBusDescribedType describedType = (ServiceBusDescribedType) obj; return describedType.size(); } if (obj instanceof Map) { int size = 8; Map map = (Map) obj; for (Object value : map.keySet()) { size += sizeof(value); } for (Object value : map.values()) { size += sizeof(value); } return size; } if (obj instanceof Iterable) { int size = 8; for (Object innerObject : (Iterable) obj) { size += sizeof(innerObject); } return size; } if (obj.getClass().isArray()) { int size = 8; int length = Array.getLength(obj); for (int i = 0; i < length; i++) { size += sizeof(Array.get(obj, i)); } return size; } throw new IllegalArgumentException(String.format(Locale.US, "Encoding Type: %s is not supported", obj.getClass())); }
class ServiceBusMessageSerializer implements MessageSerializer { private static final byte[] EMPTY_BYTE_ARRAY = new byte[0]; private final ClientLogger logger = new ClientLogger(ServiceBusMessageSerializer.class); /** * Gets the serialized size of the AMQP message. */ @Override public int getSize(Message amqpMessage) { if (amqpMessage == null) { return 0; } int payloadSize = getPayloadSize(amqpMessage); final MessageAnnotations messageAnnotations = amqpMessage.getMessageAnnotations(); final ApplicationProperties applicationProperties = amqpMessage.getApplicationProperties(); int annotationsSize = 0; int applicationPropertiesSize = 0; if (messageAnnotations != null) { final Map<Symbol, Object> map = messageAnnotations.getValue(); for (Map.Entry<Symbol, Object> entry : map.entrySet()) { final int size = sizeof(entry.getKey()) + sizeof(entry.getValue()); annotationsSize += size; } } if (applicationProperties != null) { final Map<String, Object> map = applicationProperties.getValue(); for (Map.Entry<String, Object> entry : map.entrySet()) { final int size = sizeof(entry.getKey()) + sizeof(entry.getValue()); applicationPropertiesSize += size; } } return annotationsSize + applicationPropertiesSize + payloadSize; } /** * Creates the AMQP message represented by this {@code object}. Currently, only supports serializing {@link * ServiceBusMessage}. * * @param object Concrete object to deserialize. * * @return A new AMQP message for this {@code object}. * * @throws IllegalArgumentException if {@code object} is not an instance of {@link ServiceBusMessage}. */ @Override public <T> Message serialize(T object) { Objects.requireNonNull(object, "'object' to serialize cannot be null."); if (!(object instanceof ServiceBusMessage)) { throw logger.logExceptionAsError(new IllegalArgumentException( "Cannot serialize object that is not ServiceBusMessage. Clazz: " + object.getClass())); } final ServiceBusMessage brokeredMessage = (ServiceBusMessage) object; AmqpMessageBodyType brokeredBodyType = brokeredMessage.getRawAmqpMessage().getBody().getBodyType(); final Message amqpMessage = Proton.message(); byte[] body; if (brokeredBodyType == AmqpMessageBodyType.DATA || brokeredBodyType == null) { body = brokeredMessage.getBody().toBytes(); amqpMessage.setBody(new Data(new Binary(body))); } else if (brokeredBodyType == AmqpMessageBodyType.SEQUENCE) { List<Object> sequenceList = brokeredMessage.getRawAmqpMessage().getBody().getSequence(); amqpMessage.setBody(new AmqpSequence(sequenceList)); } else if (brokeredBodyType == AmqpMessageBodyType.VALUE) { amqpMessage.setBody(new AmqpValue(brokeredMessage.getRawAmqpMessage().getBody().getValue())); } if (brokeredMessage.getApplicationProperties() != null) { amqpMessage.setApplicationProperties(new ApplicationProperties(convertToDescribedType(brokeredMessage.getApplicationProperties()))); } if (brokeredMessage.getTimeToLive() != null) { amqpMessage.setTtl(brokeredMessage.getTimeToLive().toMillis()); } if (amqpMessage.getProperties() == null) { amqpMessage.setProperties(new Properties()); } amqpMessage.setMessageId(brokeredMessage.getMessageId()); amqpMessage.setContentType(brokeredMessage.getContentType()); amqpMessage.setCorrelationId(brokeredMessage.getCorrelationId()); amqpMessage.setSubject(brokeredMessage.getSubject()); amqpMessage.setReplyTo(brokeredMessage.getReplyTo()); amqpMessage.setReplyToGroupId(brokeredMessage.getReplyToSessionId()); amqpMessage.setGroupId(brokeredMessage.getSessionId()); final AmqpMessageProperties brokeredProperties = brokeredMessage.getRawAmqpMessage().getProperties(); amqpMessage.setContentEncoding(brokeredProperties.getContentEncoding()); if (brokeredProperties.getGroupSequence() != null) { amqpMessage.setGroupSequence(brokeredProperties.getGroupSequence()); } amqpMessage.getProperties().setTo(brokeredMessage.getTo()); amqpMessage.getProperties().setUserId(new Binary(brokeredProperties.getUserId())); if (brokeredProperties.getAbsoluteExpiryTime() != null) { amqpMessage.getProperties().setAbsoluteExpiryTime(Date.from(brokeredProperties.getAbsoluteExpiryTime() .toInstant())); } if (brokeredProperties.getCreationTime() != null) { amqpMessage.getProperties().setCreationTime(Date.from(brokeredProperties.getCreationTime().toInstant())); } amqpMessage.setFooter(new Footer(brokeredMessage.getRawAmqpMessage().getFooter())); AmqpMessageHeader header = brokeredMessage.getRawAmqpMessage().getHeader(); if (header.getDeliveryCount() != null) { amqpMessage.setDeliveryCount(header.getDeliveryCount()); } if (header.getPriority() != null) { amqpMessage.setPriority(header.getPriority()); } if (header.isDurable() != null) { amqpMessage.setDurable(header.isDurable()); } if (header.isFirstAcquirer() != null) { amqpMessage.setFirstAcquirer(header.isFirstAcquirer()); } if (header.getTimeToLive() != null) { amqpMessage.setTtl(header.getTimeToLive().toMillis()); } final Map<Symbol, Object> messageAnnotationsMap = new HashMap<>(); if (brokeredMessage.getScheduledEnqueueTime() != null) { messageAnnotationsMap.put(Symbol.valueOf(SCHEDULED_ENQUEUE_UTC_TIME_NAME.getValue()), Date.from(brokeredMessage.getScheduledEnqueueTime().toInstant())); } final String partitionKey = brokeredMessage.getPartitionKey(); if (partitionKey != null && !partitionKey.isEmpty()) { messageAnnotationsMap.put(Symbol.valueOf(PARTITION_KEY_ANNOTATION_NAME.getValue()), brokeredMessage.getPartitionKey()); } amqpMessage.setMessageAnnotations(new MessageAnnotations(messageAnnotationsMap)); final Map<Symbol, Object> deliveryAnnotationsMap = new HashMap<>(); final Map<String, Object> deliveryAnnotations = brokeredMessage.getRawAmqpMessage() .getDeliveryAnnotations(); for (Map.Entry<String, Object> deliveryEntry : deliveryAnnotations.entrySet()) { deliveryAnnotationsMap.put(Symbol.valueOf(deliveryEntry.getKey()), deliveryEntry.getValue()); } amqpMessage.setDeliveryAnnotations(new DeliveryAnnotations(deliveryAnnotationsMap)); return amqpMessage; } private Map<String, Object> convertToDescribedType(Map<String, Object> propertiesValue) { for (Map.Entry<String, Object> entry : propertiesValue.entrySet()) { Object value = entry.getValue(); if (value instanceof URI) { propertiesValue.put(entry.getKey(), new ServiceBusDescribedType(ServiceBusConstants.URI_SYMBOL, ((URI) value).toString())); } else if (value instanceof OffsetDateTime) { propertiesValue.put(entry.getKey(), new ServiceBusDescribedType(ServiceBusConstants.OFFSETDATETIME_SYMBOL, ((OffsetDateTime) value).atZoneSameInstant(ZoneOffset.UTC).toInstant().toEpochMilli())); } else if (value instanceof Duration) { propertiesValue.put(entry.getKey(), new ServiceBusDescribedType(ServiceBusConstants.DURATION_SYMBOL, ((Duration) value).toNanos())); } } return propertiesValue; } private Map<String, Object> convertToOriginType(Map<String, Object> propertiesValue) { for (Map.Entry<String, Object> entry : propertiesValue.entrySet()) { Object value = entry.getValue(); if (value instanceof DescribedType) { DescribedType describedType = (DescribedType) value; if (ServiceBusConstants.URI_SYMBOL.equals(describedType.getDescriptor())) { propertiesValue.put(entry.getKey(), URI.create((String) describedType.getDescribed())); } else if (ServiceBusConstants.OFFSETDATETIME_SYMBOL.equals(describedType.getDescriptor())) { propertiesValue.put(entry.getKey(), OffsetDateTime.ofInstant(Instant.ofEpochMilli((long) describedType.getDescribed()), ZoneId.systemDefault())); } else if (ServiceBusConstants.DURATION_SYMBOL.equals(describedType.getDescriptor())) { propertiesValue.put(entry.getKey(), Duration.ofNanos((long) describedType.getDescribed())); } } } return propertiesValue; } @SuppressWarnings("unchecked") @Override public <T> T deserialize(Message message, Class<T> clazz) { Objects.requireNonNull(message, "'message' cannot be null."); Objects.requireNonNull(clazz, "'clazz' cannot be null."); if (clazz == ServiceBusReceivedMessage.class) { return (T) deserializeMessage(message); } else { throw logger.logExceptionAsError(new IllegalArgumentException( String.format(Messages.CLASS_NOT_A_SUPPORTED_TYPE, clazz))); } } @SuppressWarnings("unchecked") @Override public <T> List<T> deserializeList(Message message, Class<T> clazz) { if (clazz == ServiceBusReceivedMessage.class) { return (List<T>) deserializeListOfMessages(message); } else if (clazz == OffsetDateTime.class) { return (List<T>) deserializeListOfOffsetDateTime(message); } else if (clazz == OffsetDateTime.class) { return (List<T>) deserializeListOfOffsetDateTime(message); } else if (clazz == Long.class) { return (List<T>) deserializeListOfLong(message); } else { throw logger.logExceptionAsError(new IllegalArgumentException( String.format(Messages.CLASS_NOT_A_SUPPORTED_TYPE, clazz))); } } private List<Long> deserializeListOfLong(Message amqpMessage) { if (amqpMessage.getBody() instanceof AmqpValue) { AmqpValue amqpValue = ((AmqpValue) amqpMessage.getBody()); if (amqpValue.getValue() instanceof Map) { @SuppressWarnings("unchecked") Map<String, Object> responseBody = (Map<String, Object>) amqpValue.getValue(); Object expirationListObj = responseBody.get(ManagementConstants.SEQUENCE_NUMBERS); if (expirationListObj instanceof long[]) { return Arrays.stream((long[]) expirationListObj) .boxed() .collect(Collectors.toList()); } } } return Collections.emptyList(); } private List<OffsetDateTime> deserializeListOfOffsetDateTime(Message amqpMessage) { if (amqpMessage.getBody() instanceof AmqpValue) { AmqpValue amqpValue = ((AmqpValue) amqpMessage.getBody()); if (amqpValue.getValue() instanceof Map) { @SuppressWarnings("unchecked") Map<String, Object> responseBody = (Map<String, Object>) amqpValue.getValue(); Object expirationListObj = responseBody.get(ManagementConstants.EXPIRATIONS); if (expirationListObj instanceof Date[]) { return Arrays.stream((Date[]) expirationListObj) .map(date -> date.toInstant().atOffset(ZoneOffset.UTC)) .collect(Collectors.toList()); } } } return Collections.emptyList(); } @SuppressWarnings("rawtypes") private List<ServiceBusReceivedMessage> deserializeListOfMessages(Message amqpMessage) { final List<ServiceBusReceivedMessage> messageList = new ArrayList<>(); final AmqpResponseCode statusCode = RequestResponseUtils.getStatusCode(amqpMessage); if (statusCode != AmqpResponseCode.OK) { logger.warning("AMQP response did not contain OK status code. Actual: {}", statusCode); return Collections.emptyList(); } final Object responseBodyMap = ((AmqpValue) amqpMessage.getBody()).getValue(); if (responseBodyMap == null) { logger.warning("AMQP response did not contain a body."); return Collections.emptyList(); } else if (!(responseBodyMap instanceof Map)) { logger.warning("AMQP response body is not correct instance. Expected: {}. Actual: {}", Map.class, responseBodyMap.getClass()); return Collections.emptyList(); } final Object messages = ((Map) responseBodyMap).get(ManagementConstants.MESSAGES); if (messages == null) { logger.warning("Response body did not contain key: {}", ManagementConstants.MESSAGES); return Collections.emptyList(); } else if (!(messages instanceof Iterable)) { logger.warning("Response body contents is not the correct type. Expected: {}. Actual: {}", Iterable.class, messages.getClass()); return Collections.emptyList(); } for (Object message : (Iterable) messages) { if (!(message instanceof Map)) { logger.warning("Message inside iterable of message is not correct type. Expected: {}. Actual: {}", Map.class, message.getClass()); continue; } final Message responseMessage = Message.Factory.create(); final Binary messagePayLoad = (Binary) ((Map) message).get(ManagementConstants.MESSAGE); responseMessage.decode(messagePayLoad.getArray(), messagePayLoad.getArrayOffset(), messagePayLoad.getLength()); final ServiceBusReceivedMessage receivedMessage = deserializeMessage(responseMessage); if (((Map) message).containsKey(ManagementConstants.LOCK_TOKEN_KEY)) { receivedMessage.setLockToken((UUID) ((Map) message).get(ManagementConstants.LOCK_TOKEN_KEY)); } messageList.add(receivedMessage); } return messageList; } private ServiceBusReceivedMessage deserializeMessage(Message amqpMessage) { final Section body = amqpMessage.getBody(); AmqpMessageBody amqpMessageBody; if (body != null) { if (body instanceof Data) { final Binary messageData = ((Data) body).getValue(); amqpMessageBody = AmqpMessageBody.fromData(messageData.getArray()); } else if (body instanceof AmqpValue) { amqpMessageBody = AmqpMessageBody.fromValue(((AmqpValue) body).getValue()); } else if (body instanceof AmqpSequence) { @SuppressWarnings("unchecked") List<Object> messageData = ((AmqpSequence) body).getValue(); amqpMessageBody = AmqpMessageBody.fromSequence(messageData); } else { logger.warning(String.format(Messages.MESSAGE_NOT_OF_TYPE, body.getType())); amqpMessageBody = AmqpMessageBody.fromData(EMPTY_BYTE_ARRAY); } } else { logger.warning(String.format(Messages.MESSAGE_NOT_OF_TYPE, "null")); amqpMessageBody = AmqpMessageBody.fromData(EMPTY_BYTE_ARRAY); } final ServiceBusReceivedMessage brokeredMessage = new ServiceBusReceivedMessage(amqpMessageBody); AmqpAnnotatedMessage brokeredAmqpAnnotatedMessage = brokeredMessage.getRawAmqpMessage(); ApplicationProperties applicationProperties = amqpMessage.getApplicationProperties(); if (applicationProperties != null) { final Map<String, Object> propertiesValue = applicationProperties.getValue(); brokeredAmqpAnnotatedMessage.getApplicationProperties().putAll(convertToOriginType(propertiesValue)); } final AmqpMessageHeader brokeredHeader = brokeredAmqpAnnotatedMessage.getHeader(); brokeredHeader.setTimeToLive(Duration.ofMillis(amqpMessage.getTtl())); brokeredHeader.setDeliveryCount(amqpMessage.getDeliveryCount()); brokeredHeader.setDurable(amqpMessage.getHeader().getDurable()); brokeredHeader.setFirstAcquirer(amqpMessage.getHeader().getFirstAcquirer()); brokeredHeader.setPriority(amqpMessage.getPriority()); final Footer footer = amqpMessage.getFooter(); if (footer != null && footer.getValue() != null) { @SuppressWarnings("unchecked") final Map<Symbol, Object> footerValue = footer.getValue(); setValues(footerValue, brokeredAmqpAnnotatedMessage.getFooter()); } final AmqpMessageProperties brokeredProperties = brokeredAmqpAnnotatedMessage.getProperties(); brokeredProperties.setReplyToGroupId(amqpMessage.getReplyToGroupId()); final String replyTo = amqpMessage.getReplyTo(); if (replyTo != null) { brokeredProperties.setReplyTo(new AmqpAddress(amqpMessage.getReplyTo())); } final Object messageId = amqpMessage.getMessageId(); if (messageId != null) { brokeredProperties.setMessageId(new AmqpMessageId(messageId.toString())); } brokeredProperties.setContentType(amqpMessage.getContentType()); final Object correlationId = amqpMessage.getCorrelationId(); if (correlationId != null) { brokeredProperties.setCorrelationId(new AmqpMessageId(correlationId.toString())); } final Properties amqpProperties = amqpMessage.getProperties(); if (amqpProperties != null) { final String to = amqpProperties.getTo(); if (to != null) { brokeredProperties.setTo(new AmqpAddress(amqpProperties.getTo())); } if (amqpProperties.getAbsoluteExpiryTime() != null) { brokeredProperties.setAbsoluteExpiryTime(amqpProperties.getAbsoluteExpiryTime().toInstant() .atOffset(ZoneOffset.UTC)); } if (amqpProperties.getCreationTime() != null) { brokeredProperties.setCreationTime(amqpProperties.getCreationTime().toInstant() .atOffset(ZoneOffset.UTC)); } } brokeredProperties.setSubject(amqpMessage.getSubject()); brokeredProperties.setGroupId(amqpMessage.getGroupId()); brokeredProperties.setContentEncoding(amqpMessage.getContentEncoding()); brokeredProperties.setGroupSequence(amqpMessage.getGroupSequence()); brokeredProperties.setUserId(amqpMessage.getUserId()); final DeliveryAnnotations deliveryAnnotations = amqpMessage.getDeliveryAnnotations(); if (deliveryAnnotations != null) { setValues(deliveryAnnotations.getValue(), brokeredAmqpAnnotatedMessage.getDeliveryAnnotations()); } final MessageAnnotations messageAnnotations = amqpMessage.getMessageAnnotations(); if (messageAnnotations != null) { setValues(messageAnnotations.getValue(), brokeredAmqpAnnotatedMessage.getMessageAnnotations()); } if (amqpMessage instanceof MessageWithLockToken) { brokeredMessage.setLockToken(((MessageWithLockToken) amqpMessage).getLockToken()); } return brokeredMessage; } private static int getPayloadSize(Message msg) { if (msg == null || msg.getBody() == null) { return 0; } final Section bodySection = msg.getBody(); if (bodySection instanceof AmqpValue) { return sizeof(((AmqpValue) bodySection).getValue()); } else if (bodySection instanceof AmqpSequence) { return sizeof(((AmqpSequence) bodySection).getValue()); } else if (bodySection instanceof Data) { final Data payloadSection = (Data) bodySection; final Binary payloadBytes = payloadSection.getValue(); return sizeof(payloadBytes); } else { return 0; } } private void setValues(Map<Symbol, Object> sourceMap, Map<String, Object> targetMap) { if (sourceMap != null) { for (Map.Entry<Symbol, Object> entry : sourceMap.entrySet()) { targetMap.put(entry.getKey().toString(), entry.getValue()); } } } @SuppressWarnings("rawtypes") }
class ServiceBusMessageSerializer implements MessageSerializer { private static final byte[] EMPTY_BYTE_ARRAY = new byte[0]; private final ClientLogger logger = new ClientLogger(ServiceBusMessageSerializer.class); /** * Gets the serialized size of the AMQP message. */ @Override public int getSize(Message amqpMessage) { if (amqpMessage == null) { return 0; } int payloadSize = getPayloadSize(amqpMessage); final MessageAnnotations messageAnnotations = amqpMessage.getMessageAnnotations(); final ApplicationProperties applicationProperties = amqpMessage.getApplicationProperties(); int annotationsSize = 0; int applicationPropertiesSize = 0; if (messageAnnotations != null) { final Map<Symbol, Object> map = messageAnnotations.getValue(); for (Map.Entry<Symbol, Object> entry : map.entrySet()) { final int size = sizeof(entry.getKey()) + sizeof(entry.getValue()); annotationsSize += size; } } if (applicationProperties != null) { final Map<String, Object> map = applicationProperties.getValue(); for (Map.Entry<String, Object> entry : map.entrySet()) { final int size = sizeof(entry.getKey()) + sizeof(entry.getValue()); applicationPropertiesSize += size; } } return annotationsSize + applicationPropertiesSize + payloadSize; } /** * Creates the AMQP message represented by this {@code object}. Currently, only supports serializing {@link * ServiceBusMessage}. * * @param object Concrete object to deserialize. * * @return A new AMQP message for this {@code object}. * * @throws IllegalArgumentException if {@code object} is not an instance of {@link ServiceBusMessage}. */ @Override public <T> Message serialize(T object) { Objects.requireNonNull(object, "'object' to serialize cannot be null."); if (!(object instanceof ServiceBusMessage)) { throw logger.logExceptionAsError(new IllegalArgumentException( "Cannot serialize object that is not ServiceBusMessage. Clazz: " + object.getClass())); } final ServiceBusMessage brokeredMessage = (ServiceBusMessage) object; AmqpMessageBodyType brokeredBodyType = brokeredMessage.getRawAmqpMessage().getBody().getBodyType(); final Message amqpMessage = Proton.message(); byte[] body; if (brokeredBodyType == AmqpMessageBodyType.DATA || brokeredBodyType == null) { body = brokeredMessage.getBody().toBytes(); amqpMessage.setBody(new Data(new Binary(body))); } else if (brokeredBodyType == AmqpMessageBodyType.SEQUENCE) { List<Object> sequenceList = brokeredMessage.getRawAmqpMessage().getBody().getSequence(); amqpMessage.setBody(new AmqpSequence(sequenceList)); } else if (brokeredBodyType == AmqpMessageBodyType.VALUE) { amqpMessage.setBody(new AmqpValue(brokeredMessage.getRawAmqpMessage().getBody().getValue())); } if (brokeredMessage.getApplicationProperties() != null) { Map<String, Object> describedTypeMap = convertToDescribedType(brokeredMessage.getApplicationProperties()); amqpMessage.setApplicationProperties(new ApplicationProperties(describedTypeMap)); } if (brokeredMessage.getTimeToLive() != null) { amqpMessage.setTtl(brokeredMessage.getTimeToLive().toMillis()); } if (amqpMessage.getProperties() == null) { amqpMessage.setProperties(new Properties()); } amqpMessage.setMessageId(brokeredMessage.getMessageId()); amqpMessage.setContentType(brokeredMessage.getContentType()); amqpMessage.setCorrelationId(brokeredMessage.getCorrelationId()); amqpMessage.setSubject(brokeredMessage.getSubject()); amqpMessage.setReplyTo(brokeredMessage.getReplyTo()); amqpMessage.setReplyToGroupId(brokeredMessage.getReplyToSessionId()); amqpMessage.setGroupId(brokeredMessage.getSessionId()); final AmqpMessageProperties brokeredProperties = brokeredMessage.getRawAmqpMessage().getProperties(); amqpMessage.setContentEncoding(brokeredProperties.getContentEncoding()); if (brokeredProperties.getGroupSequence() != null) { amqpMessage.setGroupSequence(brokeredProperties.getGroupSequence()); } amqpMessage.getProperties().setTo(brokeredMessage.getTo()); amqpMessage.getProperties().setUserId(new Binary(brokeredProperties.getUserId())); if (brokeredProperties.getAbsoluteExpiryTime() != null) { amqpMessage.getProperties().setAbsoluteExpiryTime(Date.from(brokeredProperties.getAbsoluteExpiryTime() .toInstant())); } if (brokeredProperties.getCreationTime() != null) { amqpMessage.getProperties().setCreationTime(Date.from(brokeredProperties.getCreationTime().toInstant())); } amqpMessage.setFooter(new Footer(brokeredMessage.getRawAmqpMessage().getFooter())); AmqpMessageHeader header = brokeredMessage.getRawAmqpMessage().getHeader(); if (header.getDeliveryCount() != null) { amqpMessage.setDeliveryCount(header.getDeliveryCount()); } if (header.getPriority() != null) { amqpMessage.setPriority(header.getPriority()); } if (header.isDurable() != null) { amqpMessage.setDurable(header.isDurable()); } if (header.isFirstAcquirer() != null) { amqpMessage.setFirstAcquirer(header.isFirstAcquirer()); } if (header.getTimeToLive() != null) { amqpMessage.setTtl(header.getTimeToLive().toMillis()); } final Map<Symbol, Object> messageAnnotationsMap = new HashMap<>(); if (brokeredMessage.getScheduledEnqueueTime() != null) { messageAnnotationsMap.put(Symbol.valueOf(SCHEDULED_ENQUEUE_UTC_TIME_NAME.getValue()), Date.from(brokeredMessage.getScheduledEnqueueTime().toInstant())); } final String partitionKey = brokeredMessage.getPartitionKey(); if (partitionKey != null && !partitionKey.isEmpty()) { messageAnnotationsMap.put(Symbol.valueOf(PARTITION_KEY_ANNOTATION_NAME.getValue()), brokeredMessage.getPartitionKey()); } amqpMessage.setMessageAnnotations(new MessageAnnotations(messageAnnotationsMap)); final Map<Symbol, Object> deliveryAnnotationsMap = new HashMap<>(); final Map<String, Object> deliveryAnnotations = brokeredMessage.getRawAmqpMessage() .getDeliveryAnnotations(); for (Map.Entry<String, Object> deliveryEntry : deliveryAnnotations.entrySet()) { deliveryAnnotationsMap.put(Symbol.valueOf(deliveryEntry.getKey()), deliveryEntry.getValue()); } amqpMessage.setDeliveryAnnotations(new DeliveryAnnotations(deliveryAnnotationsMap)); return amqpMessage; } /** * Convert specific type to described type for sending on the wire. * @param propertiesValue application properties set by user which may contain specific type. * @return Map only contains primitive type and described type. */ private static Map<String, Object> convertToDescribedType(Map<String, Object> propertiesValue) { for (Map.Entry<String, Object> entry : propertiesValue.entrySet()) { Object value = entry.getValue(); if (value instanceof URI) { entry.setValue(new UriDescribedType((URI) value)); } else if (value instanceof OffsetDateTime) { entry.setValue(new OffsetDateTimeDescribedType((OffsetDateTime) value)); } else if (value instanceof Duration) { entry.setValue(new DurationDescribedType((Duration) value)); } } return propertiesValue; } /** * Convert described type to origin type. * @param propertiesValue application properties from amqp message may contain described type. * @return Map without described type. */ private static Map<String, Object> convertToOriginType(Map<String, Object> propertiesValue) { for (Map.Entry<String, Object> entry : propertiesValue.entrySet()) { Object value = entry.getValue(); if (value instanceof DescribedType) { entry.setValue(MessageUtils.describedToOrigin((DescribedType) value)); } } return propertiesValue; } @SuppressWarnings("unchecked") @Override public <T> T deserialize(Message message, Class<T> clazz) { Objects.requireNonNull(message, "'message' cannot be null."); Objects.requireNonNull(clazz, "'clazz' cannot be null."); if (clazz == ServiceBusReceivedMessage.class) { return (T) deserializeMessage(message); } else { throw logger.logExceptionAsError(new IllegalArgumentException( String.format(Messages.CLASS_NOT_A_SUPPORTED_TYPE, clazz))); } } @SuppressWarnings("unchecked") @Override public <T> List<T> deserializeList(Message message, Class<T> clazz) { if (clazz == ServiceBusReceivedMessage.class) { return (List<T>) deserializeListOfMessages(message); } else if (clazz == OffsetDateTime.class) { return (List<T>) deserializeListOfOffsetDateTime(message); } else if (clazz == OffsetDateTime.class) { return (List<T>) deserializeListOfOffsetDateTime(message); } else if (clazz == Long.class) { return (List<T>) deserializeListOfLong(message); } else { throw logger.logExceptionAsError(new IllegalArgumentException( String.format(Messages.CLASS_NOT_A_SUPPORTED_TYPE, clazz))); } } private List<Long> deserializeListOfLong(Message amqpMessage) { if (amqpMessage.getBody() instanceof AmqpValue) { AmqpValue amqpValue = ((AmqpValue) amqpMessage.getBody()); if (amqpValue.getValue() instanceof Map) { @SuppressWarnings("unchecked") Map<String, Object> responseBody = (Map<String, Object>) amqpValue.getValue(); Object expirationListObj = responseBody.get(ManagementConstants.SEQUENCE_NUMBERS); if (expirationListObj instanceof long[]) { return Arrays.stream((long[]) expirationListObj) .boxed() .collect(Collectors.toList()); } } } return Collections.emptyList(); } private List<OffsetDateTime> deserializeListOfOffsetDateTime(Message amqpMessage) { if (amqpMessage.getBody() instanceof AmqpValue) { AmqpValue amqpValue = ((AmqpValue) amqpMessage.getBody()); if (amqpValue.getValue() instanceof Map) { @SuppressWarnings("unchecked") Map<String, Object> responseBody = (Map<String, Object>) amqpValue.getValue(); Object expirationListObj = responseBody.get(ManagementConstants.EXPIRATIONS); if (expirationListObj instanceof Date[]) { return Arrays.stream((Date[]) expirationListObj) .map(date -> date.toInstant().atOffset(ZoneOffset.UTC)) .collect(Collectors.toList()); } } } return Collections.emptyList(); } @SuppressWarnings("rawtypes") private List<ServiceBusReceivedMessage> deserializeListOfMessages(Message amqpMessage) { final List<ServiceBusReceivedMessage> messageList = new ArrayList<>(); final AmqpResponseCode statusCode = RequestResponseUtils.getStatusCode(amqpMessage); if (statusCode != AmqpResponseCode.OK) { logger.warning("AMQP response did not contain OK status code. Actual: {}", statusCode); return Collections.emptyList(); } final Object responseBodyMap = ((AmqpValue) amqpMessage.getBody()).getValue(); if (responseBodyMap == null) { logger.warning("AMQP response did not contain a body."); return Collections.emptyList(); } else if (!(responseBodyMap instanceof Map)) { logger.warning("AMQP response body is not correct instance. Expected: {}. Actual: {}", Map.class, responseBodyMap.getClass()); return Collections.emptyList(); } final Object messages = ((Map) responseBodyMap).get(ManagementConstants.MESSAGES); if (messages == null) { logger.warning("Response body did not contain key: {}", ManagementConstants.MESSAGES); return Collections.emptyList(); } else if (!(messages instanceof Iterable)) { logger.warning("Response body contents is not the correct type. Expected: {}. Actual: {}", Iterable.class, messages.getClass()); return Collections.emptyList(); } for (Object message : (Iterable) messages) { if (!(message instanceof Map)) { logger.warning("Message inside iterable of message is not correct type. Expected: {}. Actual: {}", Map.class, message.getClass()); continue; } final Message responseMessage = Message.Factory.create(); final Binary messagePayLoad = (Binary) ((Map) message).get(ManagementConstants.MESSAGE); responseMessage.decode(messagePayLoad.getArray(), messagePayLoad.getArrayOffset(), messagePayLoad.getLength()); final ServiceBusReceivedMessage receivedMessage = deserializeMessage(responseMessage); if (((Map) message).containsKey(ManagementConstants.LOCK_TOKEN_KEY)) { receivedMessage.setLockToken((UUID) ((Map) message).get(ManagementConstants.LOCK_TOKEN_KEY)); } messageList.add(receivedMessage); } return messageList; } private ServiceBusReceivedMessage deserializeMessage(Message amqpMessage) { final Section body = amqpMessage.getBody(); AmqpMessageBody amqpMessageBody; if (body != null) { if (body instanceof Data) { final Binary messageData = ((Data) body).getValue(); amqpMessageBody = AmqpMessageBody.fromData(messageData.getArray()); } else if (body instanceof AmqpValue) { amqpMessageBody = AmqpMessageBody.fromValue(((AmqpValue) body).getValue()); } else if (body instanceof AmqpSequence) { @SuppressWarnings("unchecked") List<Object> messageData = ((AmqpSequence) body).getValue(); amqpMessageBody = AmqpMessageBody.fromSequence(messageData); } else { logger.warning(String.format(Messages.MESSAGE_NOT_OF_TYPE, body.getType())); amqpMessageBody = AmqpMessageBody.fromData(EMPTY_BYTE_ARRAY); } } else { logger.warning(String.format(Messages.MESSAGE_NOT_OF_TYPE, "null")); amqpMessageBody = AmqpMessageBody.fromData(EMPTY_BYTE_ARRAY); } final ServiceBusReceivedMessage brokeredMessage = new ServiceBusReceivedMessage(amqpMessageBody); AmqpAnnotatedMessage brokeredAmqpAnnotatedMessage = brokeredMessage.getRawAmqpMessage(); ApplicationProperties applicationProperties = amqpMessage.getApplicationProperties(); if (applicationProperties != null) { final Map<String, Object> propertiesValue = convertToOriginType(applicationProperties.getValue()); brokeredAmqpAnnotatedMessage.getApplicationProperties().putAll(propertiesValue); } final AmqpMessageHeader brokeredHeader = brokeredAmqpAnnotatedMessage.getHeader(); brokeredHeader.setTimeToLive(Duration.ofMillis(amqpMessage.getTtl())); brokeredHeader.setDeliveryCount(amqpMessage.getDeliveryCount()); brokeredHeader.setDurable(amqpMessage.getHeader().getDurable()); brokeredHeader.setFirstAcquirer(amqpMessage.getHeader().getFirstAcquirer()); brokeredHeader.setPriority(amqpMessage.getPriority()); final Footer footer = amqpMessage.getFooter(); if (footer != null && footer.getValue() != null) { @SuppressWarnings("unchecked") final Map<Symbol, Object> footerValue = footer.getValue(); setValues(footerValue, brokeredAmqpAnnotatedMessage.getFooter()); } final AmqpMessageProperties brokeredProperties = brokeredAmqpAnnotatedMessage.getProperties(); brokeredProperties.setReplyToGroupId(amqpMessage.getReplyToGroupId()); final String replyTo = amqpMessage.getReplyTo(); if (replyTo != null) { brokeredProperties.setReplyTo(new AmqpAddress(amqpMessage.getReplyTo())); } final Object messageId = amqpMessage.getMessageId(); if (messageId != null) { brokeredProperties.setMessageId(new AmqpMessageId(messageId.toString())); } brokeredProperties.setContentType(amqpMessage.getContentType()); final Object correlationId = amqpMessage.getCorrelationId(); if (correlationId != null) { brokeredProperties.setCorrelationId(new AmqpMessageId(correlationId.toString())); } final Properties amqpProperties = amqpMessage.getProperties(); if (amqpProperties != null) { final String to = amqpProperties.getTo(); if (to != null) { brokeredProperties.setTo(new AmqpAddress(amqpProperties.getTo())); } if (amqpProperties.getAbsoluteExpiryTime() != null) { brokeredProperties.setAbsoluteExpiryTime(amqpProperties.getAbsoluteExpiryTime().toInstant() .atOffset(ZoneOffset.UTC)); } if (amqpProperties.getCreationTime() != null) { brokeredProperties.setCreationTime(amqpProperties.getCreationTime().toInstant() .atOffset(ZoneOffset.UTC)); } } brokeredProperties.setSubject(amqpMessage.getSubject()); brokeredProperties.setGroupId(amqpMessage.getGroupId()); brokeredProperties.setContentEncoding(amqpMessage.getContentEncoding()); brokeredProperties.setGroupSequence(amqpMessage.getGroupSequence()); brokeredProperties.setUserId(amqpMessage.getUserId()); final DeliveryAnnotations deliveryAnnotations = amqpMessage.getDeliveryAnnotations(); if (deliveryAnnotations != null) { setValues(deliveryAnnotations.getValue(), brokeredAmqpAnnotatedMessage.getDeliveryAnnotations()); } final MessageAnnotations messageAnnotations = amqpMessage.getMessageAnnotations(); if (messageAnnotations != null) { setValues(messageAnnotations.getValue(), brokeredAmqpAnnotatedMessage.getMessageAnnotations()); } if (amqpMessage instanceof MessageWithLockToken) { brokeredMessage.setLockToken(((MessageWithLockToken) amqpMessage).getLockToken()); } return brokeredMessage; } private static int getPayloadSize(Message msg) { if (msg == null || msg.getBody() == null) { return 0; } final Section bodySection = msg.getBody(); if (bodySection instanceof AmqpValue) { return sizeof(((AmqpValue) bodySection).getValue()); } else if (bodySection instanceof AmqpSequence) { return sizeof(((AmqpSequence) bodySection).getValue()); } else if (bodySection instanceof Data) { final Data payloadSection = (Data) bodySection; final Binary payloadBytes = payloadSection.getValue(); return sizeof(payloadBytes); } else { return 0; } } private void setValues(Map<Symbol, Object> sourceMap, Map<String, Object> targetMap) { if (sourceMap != null) { for (Map.Entry<Symbol, Object> entry : sourceMap.entrySet()) { targetMap.put(entry.getKey().toString(), entry.getValue()); } } } @SuppressWarnings("rawtypes") }
also test with some char with 2 or more bytes in utf8
void serializeMessageWithSpecificApplicationProperties() { String contents = "some contents"; String messageId = "messageId"; final ServiceBusMessage message = getServiceBusMessage(contents, messageId); HashMap<String, Object> specificMap = new HashMap<>(); specificMap.put("uri", URI.create("https: specificMap.put("duration", Duration.ZERO); specificMap.put("offsetDateTime", OffsetDateTime.now()); message.getApplicationProperties().putAll(specificMap); Message amqpMessage = serializer.serialize(message); assertEquals(specificMap.size(), amqpMessage.getApplicationProperties().getValue().size()); AtomicInteger convertCount = new AtomicInteger(); specificMap.forEach((key, value) -> { Assertions.assertTrue(amqpMessage.getApplicationProperties().getValue().containsKey(key)); if (value instanceof URI) { assertEquals(((URI) value).toString(), ((ServiceBusDescribedType) amqpMessage.getApplicationProperties().getValue().get(key)).getDescribed()); convertCount.getAndIncrement(); } else if (value instanceof Duration) { convertCount.getAndIncrement(); } else if (value instanceof OffsetDateTime) { convertCount.getAndIncrement(); } }); assertEquals(specificMap.size(), convertCount.get()); }
String contents = "some contents";
void serializeMessageWithSpecificApplicationProperties() { String contents = "some contents"; String messageId = "messageId"; final ServiceBusMessage message = getServiceBusMessage(contents, messageId); HashMap<String, Object> specificMap = new HashMap<>(); specificMap.put("uri", URI.create("https: specificMap.put("duration", Duration.ZERO); specificMap.put("offsetDateTime", OffsetDateTime.now()); message.getApplicationProperties().putAll(specificMap); Message amqpMessage = serializer.serialize(message); assertEquals(specificMap.size(), amqpMessage.getApplicationProperties().getValue().size()); AtomicInteger convertCount = new AtomicInteger(); specificMap.forEach((key, value) -> { Assertions.assertTrue(amqpMessage.getApplicationProperties().getValue().containsKey(key)); if (value instanceof URI) { assertEquals(((URI) value).toString(), ((ServiceBusDescribedType) amqpMessage.getApplicationProperties().getValue().get(key)).getDescribed()); convertCount.getAndIncrement(); } else if (value instanceof Duration) { convertCount.getAndIncrement(); } else if (value instanceof OffsetDateTime) { convertCount.getAndIncrement(); } }); assertEquals(specificMap.size(), convertCount.get()); }
class ServiceBusMessageSerializerTest { private final ServiceBusMessageSerializer serializer = new ServiceBusMessageSerializer(); @Test void deserializeMessageNotNull() { assertThrows(NullPointerException.class, () -> serializer.deserialize(null, ServiceBusMessage.class)); } @Test void deserializeClassNotNull() { assertThrows(NullPointerException.class, () -> serializer.deserialize(Proton.message(), null)); } @Test void serializeObjectNotNull() { assertThrows(NullPointerException.class, () -> serializer.serialize(null)); } /** * Verify that we cannot serialize something that is not of type Message. */ @Test void cannotSerializeObject() { String something = "oops"; assertThrows(IllegalArgumentException.class, () -> serializer.serialize(something)); } /** * Verify we can only deserialize supported classes. */ @Test void cannotDeserializeObject() { final org.apache.qpid.proton.message.Message message = getMessage("hello-world".getBytes(UTF_8)); assertThrows(IllegalArgumentException.class, () -> serializer.deserialize(message, ServiceBusReceiverAsyncClient.class)); assertThrows(IllegalArgumentException.class, () -> serializer.deserializeList(message, ServiceBusReceiverAsyncClient.class)); } /** * Verify that we can deserialize a proton-j message with all the correct contents to {@link ServiceBusMessage}. */ @Test void deserializeMessage() { final String payload = "hello-world"; final byte[] payloadBytes = payload.getBytes(UTF_8); final org.apache.qpid.proton.message.Message message = getMessage(payloadBytes); message.setAddress("a-to-address"); message.setContentType("some-content-type"); message.setCorrelationId("correlation-id-test"); message.setDeliveryCount(10); message.setTtl(1045); message.setMessageId("a-test-message-id"); message.setSubject("this is a label"); message.getProperties().setTo("this is a to property"); message.setReplyTo("reply-to-property"); message.setReplyToGroupId("reply-to-session-id-property"); message.setGroupId("session-id-as-a-group-id"); Map<Symbol, Object> expectedMessageAnnotations = message.getMessageAnnotations().getValue(); expectedMessageAnnotations.put(Symbol.valueOf("A"), "A value"); Map<Symbol, Object> expectedDeliveryAnnotations = new HashMap<>(); expectedDeliveryAnnotations.put(Symbol.valueOf("D"), "D value"); message.setDeliveryAnnotations(new DeliveryAnnotations(expectedDeliveryAnnotations)); Map<Symbol, Object> expectedFooterValues = new HashMap<>(); expectedFooterValues.put(Symbol.valueOf("footer1"), "footer value"); message.setFooter(new Footer(expectedFooterValues)); final ServiceBusReceivedMessage actualMessage = serializer.deserialize(message, ServiceBusReceivedMessage.class); assertNotNull(actualMessage.getEnqueuedTime()); assertEquals(SEQUENCE_NUMBER, actualMessage.getSequenceNumber()); assertEquals(message.getTtl(), actualMessage.getTimeToLive().toMillis()); assertEquals(message.getSubject(), actualMessage.getSubject()); assertEquals(message.getReplyTo(), actualMessage.getReplyTo()); assertEquals(message.getDeliveryCount(), actualMessage.getDeliveryCount()); assertEquals(message.getProperties().getTo(), actualMessage.getTo()); assertEquals(message.getReplyToGroupId(), actualMessage.getReplyToSessionId()); assertEquals(message.getGroupId(), actualMessage.getSessionId()); assertEquals(message.getContentType(), actualMessage.getContentType()); assertEquals(message.getCorrelationId(), actualMessage.getCorrelationId()); assertValues(expectedMessageAnnotations, actualMessage.getRawAmqpMessage().getMessageAnnotations()); assertValues(expectedDeliveryAnnotations, actualMessage.getRawAmqpMessage().getDeliveryAnnotations()); assertValues(expectedFooterValues, actualMessage.getRawAmqpMessage().getFooter()); assertEquals(APPLICATION_PROPERTIES.size(), actualMessage.getApplicationProperties().size()); APPLICATION_PROPERTIES.forEach((key, value) -> { Assertions.assertTrue(actualMessage.getApplicationProperties().containsKey(key)); assertEquals(value, actualMessage.getApplicationProperties().get(key)); }); assertEquals(payload, actualMessage.getBody().toString()); } /** * Verifies that we can serialize OffsetDateTime, Duration and URI in application properties. */ @Test /** * Message with specific type send from .net SDK. * * ServiceBusMessage message = new ServiceBusMessage("Hello world!"); * DateTime utcTime1 = DateTime.Parse("2022-02-24T08:23:23.443127200Z"); * utcTime1 = DateTime.SpecifyKind(utcTime1, DateTimeKind.Utc); * message.ApplicationProperties.Add("time", utcTime2); * message.ApplicationProperties.Add("span", TimeSpan.FromSeconds(10)); * message.ApplicationProperties.Add("uri", new Uri("https: */ @Test void deserializeRealMessageFromByte() { byte[] data = new byte[] { 0, 83, 112, -64, 10, 5, 64, 64, 112, 72, 25, 8, 0, 64, 67, 0, 83, 113, -63, 36, 2, -93, 16, 120, 45, 111, 112, 116, 45, 108, 111, 99, 107, 45, 116, 111, 107, 101, 110, -104, -99, -119, 88, -41, -124, -37, 69, 10, -98, -95, -99, 119, -64, -61, 36, 90, 0, 83, 114, -63, 85, 6, -93, 19, 120, 45, 111, 112, 116, 45, 101, 110, 113, 117, 101, 117, 101, 100, 45, 116, 105, 109, 101, -125, 0, 0, 1, 127, 42, -30, 45, 43, -93, 21, 120, 45, 111, 112, 116, 45, 115, 101, 113, 117, 101, 110, 99, 101, 45, 110, 117, 109, 98, 101, 114, 85, 78, -93, 18, 120, 45, 111, 112, 116, 45, 108, 111, 99, 107, 101, 100, 45, 117, 110, 116, 105, 108, -125, 0, 0, 1, 127, 42, -30, -94, 106, 0, 83, 115, -64, 63, 13, -95, 32, 53, 98, 100, 50, 56, 100, 98, 97, 48, 56, 54, 99, 52, 98, 57, 99, 98, 55, 55, 49, 99, 100, 97, 97, 101, 102, 52, 51, 102, 102, 49, 98, 64, 64, 64, 64, 64, 64, 64, -125, 0, 0, 1, 127, 114, -5, 53, 43, -125, 0, 0, 1, 127, 42, -30, 45, 43, 64, 64, 64, 0, 83, 116, -63, -118, 6, -95, 4, 116, 105, 109, 101, 0, -93, 29, 99, 111, 109, 46, 109, 105, 99, 114, 111, 115, 111, 102, 116, 58, 100, 97, 116, 101, 116, 105, 109, 101, 45, 111, 102, 102, 115, 101, 116, -127, 8, -39, -9, -79, -6, -116, -83, 40, -95, 4, 115, 112, 97, 110, 0, -93, 22, 99, 111, 109, 46, 109, 105, 99, 114, 111, 115, 111, 102, 116, 58, 116, 105, 109, 101, 115, 112, 97, 110, -127, 0, 0, 0, 0, 5, -11, -31, 0, -95, 3, 117, 114, 105, 0, -93, 17, 99, 111, 109, 46, 109, 105, 99, 114, 111, 115, 111, 102, 116, 58, 117, 114, 105, -95, 23, 104, 116, 116, 112, 115, 58, 47, 47, 119, 119, 119, 46, 103, 105, 116, 104, 117, 98, 46, 99, 111, 109, 47, 0, 83, 117, -96, 12, 72, 101, 108, 108, 111, 32, 119, 111, 114, 108, 100, 33 }; final Message amqpMessage = Proton.message(); amqpMessage.decode(data, 0, data.length); amqpMessage.setHeader(new Header()); final ServiceBusReceivedMessage actualMessage = serializer.deserialize(amqpMessage, ServiceBusReceivedMessage.class); AtomicInteger convertCount = new AtomicInteger(); HashMap<String, Object> specificMap = new HashMap<>(); specificMap.put("uri", URI.create("https: specificMap.put("span", Duration.ofSeconds(2)); specificMap.put("time", OffsetDateTime.parse("2022-02-24T08:23:23.443127200Z")); assertEquals(specificMap.size(), actualMessage.getApplicationProperties().size()); specificMap.forEach((key, value) -> { Assertions.assertTrue(actualMessage.getApplicationProperties().containsKey(key)); if (value instanceof URI) { assertEquals((URI) value, actualMessage.getApplicationProperties().get(key)); convertCount.getAndIncrement(); } else if (value instanceof Duration) { assertEquals((Duration) value, specificMap.get("span")); convertCount.getAndIncrement(); } else if (value instanceof OffsetDateTime) { assertEquals((OffsetDateTime) value, specificMap.get("time")); convertCount.getAndIncrement(); } }); assertEquals(specificMap.size(), convertCount.get()); } /** * Verifies that an empty collection is returned if the status code was not {@link AmqpResponseCode */ @Test void deserializeListMessagesNotOK() { final Map<String, Object> properties = new HashMap<>(); properties.put("status-code", AmqpResponseCode.FORBIDDEN.getValue()); final Message message = Proton.message(); message.setBody(new AmqpValue("test")); message.setApplicationProperties(new ApplicationProperties(properties)); final List<ServiceBusReceivedMessage> actual = serializer.deserializeList(message, ServiceBusReceivedMessage.class); Assertions.assertNotNull(actual); Assertions.assertTrue(actual.isEmpty()); } private void assertValues(Map<Symbol, Object> expected, Map<String, Object> actual) { assertEquals(expected.size(), actual.size()); for (Map.Entry<Symbol, Object> expectedEntry : expected.entrySet()) { assertEquals(expectedEntry.getValue(), actual.get(expectedEntry.getKey().toString())); } } }
class ServiceBusMessageSerializerTest { private final ServiceBusMessageSerializer serializer = new ServiceBusMessageSerializer(); @Test void deserializeMessageNotNull() { assertThrows(NullPointerException.class, () -> serializer.deserialize(null, ServiceBusMessage.class)); } @Test void deserializeClassNotNull() { assertThrows(NullPointerException.class, () -> serializer.deserialize(Proton.message(), null)); } @Test void serializeObjectNotNull() { assertThrows(NullPointerException.class, () -> serializer.serialize(null)); } /** * Verify that we cannot serialize something that is not of type Message. */ @Test void cannotSerializeObject() { String something = "oops"; assertThrows(IllegalArgumentException.class, () -> serializer.serialize(something)); } /** * Verify we can only deserialize supported classes. */ @Test void cannotDeserializeObject() { final org.apache.qpid.proton.message.Message message = getMessage("hello-world".getBytes(UTF_8)); assertThrows(IllegalArgumentException.class, () -> serializer.deserialize(message, ServiceBusReceiverAsyncClient.class)); assertThrows(IllegalArgumentException.class, () -> serializer.deserializeList(message, ServiceBusReceiverAsyncClient.class)); } /** * Verify that we can deserialize a proton-j message with all the correct contents to {@link ServiceBusMessage}. */ @Test void deserializeMessage() { final String payload = "hello-world"; final byte[] payloadBytes = payload.getBytes(UTF_8); final org.apache.qpid.proton.message.Message message = getMessage(payloadBytes); message.setAddress("a-to-address"); message.setContentType("some-content-type"); message.setCorrelationId("correlation-id-test"); message.setDeliveryCount(10); message.setTtl(1045); message.setMessageId("a-test-message-id"); message.setSubject("this is a label"); message.getProperties().setTo("this is a to property"); message.setReplyTo("reply-to-property"); message.setReplyToGroupId("reply-to-session-id-property"); message.setGroupId("session-id-as-a-group-id"); Map<Symbol, Object> expectedMessageAnnotations = message.getMessageAnnotations().getValue(); expectedMessageAnnotations.put(Symbol.valueOf("A"), "A value"); Map<Symbol, Object> expectedDeliveryAnnotations = new HashMap<>(); expectedDeliveryAnnotations.put(Symbol.valueOf("D"), "D value"); message.setDeliveryAnnotations(new DeliveryAnnotations(expectedDeliveryAnnotations)); Map<Symbol, Object> expectedFooterValues = new HashMap<>(); expectedFooterValues.put(Symbol.valueOf("footer1"), "footer value"); message.setFooter(new Footer(expectedFooterValues)); final ServiceBusReceivedMessage actualMessage = serializer.deserialize(message, ServiceBusReceivedMessage.class); assertNotNull(actualMessage.getEnqueuedTime()); assertEquals(SEQUENCE_NUMBER, actualMessage.getSequenceNumber()); assertEquals(message.getTtl(), actualMessage.getTimeToLive().toMillis()); assertEquals(message.getSubject(), actualMessage.getSubject()); assertEquals(message.getReplyTo(), actualMessage.getReplyTo()); assertEquals(message.getDeliveryCount(), actualMessage.getDeliveryCount()); assertEquals(message.getProperties().getTo(), actualMessage.getTo()); assertEquals(message.getReplyToGroupId(), actualMessage.getReplyToSessionId()); assertEquals(message.getGroupId(), actualMessage.getSessionId()); assertEquals(message.getContentType(), actualMessage.getContentType()); assertEquals(message.getCorrelationId(), actualMessage.getCorrelationId()); assertValues(expectedMessageAnnotations, actualMessage.getRawAmqpMessage().getMessageAnnotations()); assertValues(expectedDeliveryAnnotations, actualMessage.getRawAmqpMessage().getDeliveryAnnotations()); assertValues(expectedFooterValues, actualMessage.getRawAmqpMessage().getFooter()); assertEquals(APPLICATION_PROPERTIES.size(), actualMessage.getApplicationProperties().size()); APPLICATION_PROPERTIES.forEach((key, value) -> { Assertions.assertTrue(actualMessage.getApplicationProperties().containsKey(key)); assertEquals(value, actualMessage.getApplicationProperties().get(key)); }); assertEquals(payload, actualMessage.getBody().toString()); } /** * Verifies that we can serialize OffsetDateTime, Duration and URI in application properties. */ @Test /** * Message with specific type send from .net SDK. * * ServiceBusMessage message = new ServiceBusMessage("Hello world!"); * DateTime utcTime1 = DateTime.Parse("2022-02-24T08:23:23.443127200Z"); * utcTime1 = DateTime.SpecifyKind(utcTime1, DateTimeKind.Utc); * message.ApplicationProperties.Add("time", utcTime2); * message.ApplicationProperties.Add("span", TimeSpan.FromSeconds(10)); * message.ApplicationProperties.Add("uri", new Uri("https: */ @Test void deserializeRealMessageFromByte() { byte[] data = new byte[] { 0, 83, 112, -64, 10, 5, 64, 64, 112, 72, 25, 8, 0, 64, 67, 0, 83, 113, -63, 36, 2, -93, 16, 120, 45, 111, 112, 116, 45, 108, 111, 99, 107, 45, 116, 111, 107, 101, 110, -104, -99, -119, 88, -41, -124, -37, 69, 10, -98, -95, -99, 119, -64, -61, 36, 90, 0, 83, 114, -63, 85, 6, -93, 19, 120, 45, 111, 112, 116, 45, 101, 110, 113, 117, 101, 117, 101, 100, 45, 116, 105, 109, 101, -125, 0, 0, 1, 127, 42, -30, 45, 43, -93, 21, 120, 45, 111, 112, 116, 45, 115, 101, 113, 117, 101, 110, 99, 101, 45, 110, 117, 109, 98, 101, 114, 85, 78, -93, 18, 120, 45, 111, 112, 116, 45, 108, 111, 99, 107, 101, 100, 45, 117, 110, 116, 105, 108, -125, 0, 0, 1, 127, 42, -30, -94, 106, 0, 83, 115, -64, 63, 13, -95, 32, 53, 98, 100, 50, 56, 100, 98, 97, 48, 56, 54, 99, 52, 98, 57, 99, 98, 55, 55, 49, 99, 100, 97, 97, 101, 102, 52, 51, 102, 102, 49, 98, 64, 64, 64, 64, 64, 64, 64, -125, 0, 0, 1, 127, 114, -5, 53, 43, -125, 0, 0, 1, 127, 42, -30, 45, 43, 64, 64, 64, 0, 83, 116, -63, -118, 6, -95, 4, 116, 105, 109, 101, 0, -93, 29, 99, 111, 109, 46, 109, 105, 99, 114, 111, 115, 111, 102, 116, 58, 100, 97, 116, 101, 116, 105, 109, 101, 45, 111, 102, 102, 115, 101, 116, -127, 8, -39, -9, -79, -6, -116, -83, 40, -95, 4, 115, 112, 97, 110, 0, -93, 22, 99, 111, 109, 46, 109, 105, 99, 114, 111, 115, 111, 102, 116, 58, 116, 105, 109, 101, 115, 112, 97, 110, -127, 0, 0, 0, 0, 5, -11, -31, 0, -95, 3, 117, 114, 105, 0, -93, 17, 99, 111, 109, 46, 109, 105, 99, 114, 111, 115, 111, 102, 116, 58, 117, 114, 105, -95, 23, 104, 116, 116, 112, 115, 58, 47, 47, 119, 119, 119, 46, 103, 105, 116, 104, 117, 98, 46, 99, 111, 109, 47, 0, 83, 117, -96, 12, 72, 101, 108, 108, 111, 32, 119, 111, 114, 108, 100, 33 }; final Message amqpMessage = Proton.message(); amqpMessage.decode(data, 0, data.length); amqpMessage.setHeader(new Header()); final ServiceBusReceivedMessage actualMessage = serializer.deserialize(amqpMessage, ServiceBusReceivedMessage.class); AtomicInteger convertCount = new AtomicInteger(); HashMap<String, Object> specificMap = new HashMap<>(); specificMap.put("uri", URI.create("https: specificMap.put("span", Duration.ofSeconds(2)); specificMap.put("time", OffsetDateTime.parse("2022-02-24T08:23:23.443127200Z")); assertEquals(specificMap.size(), actualMessage.getApplicationProperties().size()); specificMap.forEach((key, value) -> { Assertions.assertTrue(actualMessage.getApplicationProperties().containsKey(key)); if (value instanceof URI) { assertEquals((URI) value, actualMessage.getApplicationProperties().get(key)); convertCount.getAndIncrement(); } else if (value instanceof Duration) { assertEquals((Duration) value, specificMap.get("span")); convertCount.getAndIncrement(); } else if (value instanceof OffsetDateTime) { assertEquals((OffsetDateTime) value, specificMap.get("time")); convertCount.getAndIncrement(); } }); assertEquals(specificMap.size(), convertCount.get()); } /** * Verifies that an empty collection is returned if the status code was not {@link AmqpResponseCode */ @Test void deserializeListMessagesNotOK() { final Map<String, Object> properties = new HashMap<>(); properties.put("status-code", AmqpResponseCode.FORBIDDEN.getValue()); final Message message = Proton.message(); message.setBody(new AmqpValue("test")); message.setApplicationProperties(new ApplicationProperties(properties)); final List<ServiceBusReceivedMessage> actual = serializer.deserializeList(message, ServiceBusReceivedMessage.class); Assertions.assertNotNull(actual); Assertions.assertTrue(actual.isEmpty()); } private void assertValues(Map<Symbol, Object> expected, Map<String, Object> actual) { assertEquals(expected.size(), actual.size()); for (Map.Entry<Symbol, Object> expectedEntry : expected.entrySet()) { assertEquals(expectedEntry.getValue(), actual.get(expectedEntry.getKey().toString())); } } }
This part seems not correct `((String) this.getDescribed()).length()`? It is char length, not encoded byte length. I am not sure which version is required. Document it in superclass.
public int size() { return URI_SYMBOL.length() + ((String) this.getDescribed()).length(); }
return URI_SYMBOL.length() + ((String) this.getDescribed()).length();
public int size() { return URI_SYMBOL.length() + ((String) this.getDescribed()).getBytes(StandardCharsets.UTF_8).length; }
class UriDescribedType extends ServiceBusDescribedType { /** * Set described to describe data in described type. * * @param described real value in the described type. */ public UriDescribedType(Object described) { super(URI_SYMBOL, ((URI) described).toString()); } @Override }
class UriDescribedType extends ServiceBusDescribedType { /** * Set described to describe data in described type. * * @param uri set as described in DescribedType. */ public UriDescribedType(URI uri) { super(URI_SYMBOL, uri.toString()); } @Override }
I didn't fix the `sizeOf` method in this PR, so I just keep the same test content as former code.
void serializeMessageWithSpecificApplicationProperties() { String contents = "some contents"; String messageId = "messageId"; final ServiceBusMessage message = getServiceBusMessage(contents, messageId); HashMap<String, Object> specificMap = new HashMap<>(); specificMap.put("uri", URI.create("https: specificMap.put("duration", Duration.ZERO); specificMap.put("offsetDateTime", OffsetDateTime.now()); message.getApplicationProperties().putAll(specificMap); Message amqpMessage = serializer.serialize(message); assertEquals(specificMap.size(), amqpMessage.getApplicationProperties().getValue().size()); AtomicInteger convertCount = new AtomicInteger(); specificMap.forEach((key, value) -> { Assertions.assertTrue(amqpMessage.getApplicationProperties().getValue().containsKey(key)); if (value instanceof URI) { assertEquals(((URI) value).toString(), ((ServiceBusDescribedType) amqpMessage.getApplicationProperties().getValue().get(key)).getDescribed()); convertCount.getAndIncrement(); } else if (value instanceof Duration) { convertCount.getAndIncrement(); } else if (value instanceof OffsetDateTime) { convertCount.getAndIncrement(); } }); assertEquals(specificMap.size(), convertCount.get()); }
String contents = "some contents";
void serializeMessageWithSpecificApplicationProperties() { String contents = "some contents"; String messageId = "messageId"; final ServiceBusMessage message = getServiceBusMessage(contents, messageId); HashMap<String, Object> specificMap = new HashMap<>(); specificMap.put("uri", URI.create("https: specificMap.put("duration", Duration.ZERO); specificMap.put("offsetDateTime", OffsetDateTime.now()); message.getApplicationProperties().putAll(specificMap); Message amqpMessage = serializer.serialize(message); assertEquals(specificMap.size(), amqpMessage.getApplicationProperties().getValue().size()); AtomicInteger convertCount = new AtomicInteger(); specificMap.forEach((key, value) -> { Assertions.assertTrue(amqpMessage.getApplicationProperties().getValue().containsKey(key)); if (value instanceof URI) { assertEquals(((URI) value).toString(), ((ServiceBusDescribedType) amqpMessage.getApplicationProperties().getValue().get(key)).getDescribed()); convertCount.getAndIncrement(); } else if (value instanceof Duration) { convertCount.getAndIncrement(); } else if (value instanceof OffsetDateTime) { convertCount.getAndIncrement(); } }); assertEquals(specificMap.size(), convertCount.get()); }
class ServiceBusMessageSerializerTest { private final ServiceBusMessageSerializer serializer = new ServiceBusMessageSerializer(); @Test void deserializeMessageNotNull() { assertThrows(NullPointerException.class, () -> serializer.deserialize(null, ServiceBusMessage.class)); } @Test void deserializeClassNotNull() { assertThrows(NullPointerException.class, () -> serializer.deserialize(Proton.message(), null)); } @Test void serializeObjectNotNull() { assertThrows(NullPointerException.class, () -> serializer.serialize(null)); } /** * Verify that we cannot serialize something that is not of type Message. */ @Test void cannotSerializeObject() { String something = "oops"; assertThrows(IllegalArgumentException.class, () -> serializer.serialize(something)); } /** * Verify we can only deserialize supported classes. */ @Test void cannotDeserializeObject() { final org.apache.qpid.proton.message.Message message = getMessage("hello-world".getBytes(UTF_8)); assertThrows(IllegalArgumentException.class, () -> serializer.deserialize(message, ServiceBusReceiverAsyncClient.class)); assertThrows(IllegalArgumentException.class, () -> serializer.deserializeList(message, ServiceBusReceiverAsyncClient.class)); } /** * Verify that we can deserialize a proton-j message with all the correct contents to {@link ServiceBusMessage}. */ @Test void deserializeMessage() { final String payload = "hello-world"; final byte[] payloadBytes = payload.getBytes(UTF_8); final org.apache.qpid.proton.message.Message message = getMessage(payloadBytes); message.setAddress("a-to-address"); message.setContentType("some-content-type"); message.setCorrelationId("correlation-id-test"); message.setDeliveryCount(10); message.setTtl(1045); message.setMessageId("a-test-message-id"); message.setSubject("this is a label"); message.getProperties().setTo("this is a to property"); message.setReplyTo("reply-to-property"); message.setReplyToGroupId("reply-to-session-id-property"); message.setGroupId("session-id-as-a-group-id"); Map<Symbol, Object> expectedMessageAnnotations = message.getMessageAnnotations().getValue(); expectedMessageAnnotations.put(Symbol.valueOf("A"), "A value"); Map<Symbol, Object> expectedDeliveryAnnotations = new HashMap<>(); expectedDeliveryAnnotations.put(Symbol.valueOf("D"), "D value"); message.setDeliveryAnnotations(new DeliveryAnnotations(expectedDeliveryAnnotations)); Map<Symbol, Object> expectedFooterValues = new HashMap<>(); expectedFooterValues.put(Symbol.valueOf("footer1"), "footer value"); message.setFooter(new Footer(expectedFooterValues)); final ServiceBusReceivedMessage actualMessage = serializer.deserialize(message, ServiceBusReceivedMessage.class); assertNotNull(actualMessage.getEnqueuedTime()); assertEquals(SEQUENCE_NUMBER, actualMessage.getSequenceNumber()); assertEquals(message.getTtl(), actualMessage.getTimeToLive().toMillis()); assertEquals(message.getSubject(), actualMessage.getSubject()); assertEquals(message.getReplyTo(), actualMessage.getReplyTo()); assertEquals(message.getDeliveryCount(), actualMessage.getDeliveryCount()); assertEquals(message.getProperties().getTo(), actualMessage.getTo()); assertEquals(message.getReplyToGroupId(), actualMessage.getReplyToSessionId()); assertEquals(message.getGroupId(), actualMessage.getSessionId()); assertEquals(message.getContentType(), actualMessage.getContentType()); assertEquals(message.getCorrelationId(), actualMessage.getCorrelationId()); assertValues(expectedMessageAnnotations, actualMessage.getRawAmqpMessage().getMessageAnnotations()); assertValues(expectedDeliveryAnnotations, actualMessage.getRawAmqpMessage().getDeliveryAnnotations()); assertValues(expectedFooterValues, actualMessage.getRawAmqpMessage().getFooter()); assertEquals(APPLICATION_PROPERTIES.size(), actualMessage.getApplicationProperties().size()); APPLICATION_PROPERTIES.forEach((key, value) -> { Assertions.assertTrue(actualMessage.getApplicationProperties().containsKey(key)); assertEquals(value, actualMessage.getApplicationProperties().get(key)); }); assertEquals(payload, actualMessage.getBody().toString()); } /** * Verifies that we can serialize OffsetDateTime, Duration and URI in application properties. */ @Test /** * Message with specific type send from .net SDK. * * ServiceBusMessage message = new ServiceBusMessage("Hello world!"); * DateTime utcTime1 = DateTime.Parse("2022-02-24T08:23:23.443127200Z"); * utcTime1 = DateTime.SpecifyKind(utcTime1, DateTimeKind.Utc); * message.ApplicationProperties.Add("time", utcTime2); * message.ApplicationProperties.Add("span", TimeSpan.FromSeconds(10)); * message.ApplicationProperties.Add("uri", new Uri("https: */ @Test void deserializeRealMessageFromByte() { byte[] data = new byte[] { 0, 83, 112, -64, 10, 5, 64, 64, 112, 72, 25, 8, 0, 64, 67, 0, 83, 113, -63, 36, 2, -93, 16, 120, 45, 111, 112, 116, 45, 108, 111, 99, 107, 45, 116, 111, 107, 101, 110, -104, -99, -119, 88, -41, -124, -37, 69, 10, -98, -95, -99, 119, -64, -61, 36, 90, 0, 83, 114, -63, 85, 6, -93, 19, 120, 45, 111, 112, 116, 45, 101, 110, 113, 117, 101, 117, 101, 100, 45, 116, 105, 109, 101, -125, 0, 0, 1, 127, 42, -30, 45, 43, -93, 21, 120, 45, 111, 112, 116, 45, 115, 101, 113, 117, 101, 110, 99, 101, 45, 110, 117, 109, 98, 101, 114, 85, 78, -93, 18, 120, 45, 111, 112, 116, 45, 108, 111, 99, 107, 101, 100, 45, 117, 110, 116, 105, 108, -125, 0, 0, 1, 127, 42, -30, -94, 106, 0, 83, 115, -64, 63, 13, -95, 32, 53, 98, 100, 50, 56, 100, 98, 97, 48, 56, 54, 99, 52, 98, 57, 99, 98, 55, 55, 49, 99, 100, 97, 97, 101, 102, 52, 51, 102, 102, 49, 98, 64, 64, 64, 64, 64, 64, 64, -125, 0, 0, 1, 127, 114, -5, 53, 43, -125, 0, 0, 1, 127, 42, -30, 45, 43, 64, 64, 64, 0, 83, 116, -63, -118, 6, -95, 4, 116, 105, 109, 101, 0, -93, 29, 99, 111, 109, 46, 109, 105, 99, 114, 111, 115, 111, 102, 116, 58, 100, 97, 116, 101, 116, 105, 109, 101, 45, 111, 102, 102, 115, 101, 116, -127, 8, -39, -9, -79, -6, -116, -83, 40, -95, 4, 115, 112, 97, 110, 0, -93, 22, 99, 111, 109, 46, 109, 105, 99, 114, 111, 115, 111, 102, 116, 58, 116, 105, 109, 101, 115, 112, 97, 110, -127, 0, 0, 0, 0, 5, -11, -31, 0, -95, 3, 117, 114, 105, 0, -93, 17, 99, 111, 109, 46, 109, 105, 99, 114, 111, 115, 111, 102, 116, 58, 117, 114, 105, -95, 23, 104, 116, 116, 112, 115, 58, 47, 47, 119, 119, 119, 46, 103, 105, 116, 104, 117, 98, 46, 99, 111, 109, 47, 0, 83, 117, -96, 12, 72, 101, 108, 108, 111, 32, 119, 111, 114, 108, 100, 33 }; final Message amqpMessage = Proton.message(); amqpMessage.decode(data, 0, data.length); amqpMessage.setHeader(new Header()); final ServiceBusReceivedMessage actualMessage = serializer.deserialize(amqpMessage, ServiceBusReceivedMessage.class); AtomicInteger convertCount = new AtomicInteger(); HashMap<String, Object> specificMap = new HashMap<>(); specificMap.put("uri", URI.create("https: specificMap.put("span", Duration.ofSeconds(2)); specificMap.put("time", OffsetDateTime.parse("2022-02-24T08:23:23.443127200Z")); assertEquals(specificMap.size(), actualMessage.getApplicationProperties().size()); specificMap.forEach((key, value) -> { Assertions.assertTrue(actualMessage.getApplicationProperties().containsKey(key)); if (value instanceof URI) { assertEquals((URI) value, actualMessage.getApplicationProperties().get(key)); convertCount.getAndIncrement(); } else if (value instanceof Duration) { assertEquals((Duration) value, specificMap.get("span")); convertCount.getAndIncrement(); } else if (value instanceof OffsetDateTime) { assertEquals((OffsetDateTime) value, specificMap.get("time")); convertCount.getAndIncrement(); } }); assertEquals(specificMap.size(), convertCount.get()); } /** * Verifies that an empty collection is returned if the status code was not {@link AmqpResponseCode */ @Test void deserializeListMessagesNotOK() { final Map<String, Object> properties = new HashMap<>(); properties.put("status-code", AmqpResponseCode.FORBIDDEN.getValue()); final Message message = Proton.message(); message.setBody(new AmqpValue("test")); message.setApplicationProperties(new ApplicationProperties(properties)); final List<ServiceBusReceivedMessage> actual = serializer.deserializeList(message, ServiceBusReceivedMessage.class); Assertions.assertNotNull(actual); Assertions.assertTrue(actual.isEmpty()); } private void assertValues(Map<Symbol, Object> expected, Map<String, Object> actual) { assertEquals(expected.size(), actual.size()); for (Map.Entry<Symbol, Object> expectedEntry : expected.entrySet()) { assertEquals(expectedEntry.getValue(), actual.get(expectedEntry.getKey().toString())); } } }
class ServiceBusMessageSerializerTest { private final ServiceBusMessageSerializer serializer = new ServiceBusMessageSerializer(); @Test void deserializeMessageNotNull() { assertThrows(NullPointerException.class, () -> serializer.deserialize(null, ServiceBusMessage.class)); } @Test void deserializeClassNotNull() { assertThrows(NullPointerException.class, () -> serializer.deserialize(Proton.message(), null)); } @Test void serializeObjectNotNull() { assertThrows(NullPointerException.class, () -> serializer.serialize(null)); } /** * Verify that we cannot serialize something that is not of type Message. */ @Test void cannotSerializeObject() { String something = "oops"; assertThrows(IllegalArgumentException.class, () -> serializer.serialize(something)); } /** * Verify we can only deserialize supported classes. */ @Test void cannotDeserializeObject() { final org.apache.qpid.proton.message.Message message = getMessage("hello-world".getBytes(UTF_8)); assertThrows(IllegalArgumentException.class, () -> serializer.deserialize(message, ServiceBusReceiverAsyncClient.class)); assertThrows(IllegalArgumentException.class, () -> serializer.deserializeList(message, ServiceBusReceiverAsyncClient.class)); } /** * Verify that we can deserialize a proton-j message with all the correct contents to {@link ServiceBusMessage}. */ @Test void deserializeMessage() { final String payload = "hello-world"; final byte[] payloadBytes = payload.getBytes(UTF_8); final org.apache.qpid.proton.message.Message message = getMessage(payloadBytes); message.setAddress("a-to-address"); message.setContentType("some-content-type"); message.setCorrelationId("correlation-id-test"); message.setDeliveryCount(10); message.setTtl(1045); message.setMessageId("a-test-message-id"); message.setSubject("this is a label"); message.getProperties().setTo("this is a to property"); message.setReplyTo("reply-to-property"); message.setReplyToGroupId("reply-to-session-id-property"); message.setGroupId("session-id-as-a-group-id"); Map<Symbol, Object> expectedMessageAnnotations = message.getMessageAnnotations().getValue(); expectedMessageAnnotations.put(Symbol.valueOf("A"), "A value"); Map<Symbol, Object> expectedDeliveryAnnotations = new HashMap<>(); expectedDeliveryAnnotations.put(Symbol.valueOf("D"), "D value"); message.setDeliveryAnnotations(new DeliveryAnnotations(expectedDeliveryAnnotations)); Map<Symbol, Object> expectedFooterValues = new HashMap<>(); expectedFooterValues.put(Symbol.valueOf("footer1"), "footer value"); message.setFooter(new Footer(expectedFooterValues)); final ServiceBusReceivedMessage actualMessage = serializer.deserialize(message, ServiceBusReceivedMessage.class); assertNotNull(actualMessage.getEnqueuedTime()); assertEquals(SEQUENCE_NUMBER, actualMessage.getSequenceNumber()); assertEquals(message.getTtl(), actualMessage.getTimeToLive().toMillis()); assertEquals(message.getSubject(), actualMessage.getSubject()); assertEquals(message.getReplyTo(), actualMessage.getReplyTo()); assertEquals(message.getDeliveryCount(), actualMessage.getDeliveryCount()); assertEquals(message.getProperties().getTo(), actualMessage.getTo()); assertEquals(message.getReplyToGroupId(), actualMessage.getReplyToSessionId()); assertEquals(message.getGroupId(), actualMessage.getSessionId()); assertEquals(message.getContentType(), actualMessage.getContentType()); assertEquals(message.getCorrelationId(), actualMessage.getCorrelationId()); assertValues(expectedMessageAnnotations, actualMessage.getRawAmqpMessage().getMessageAnnotations()); assertValues(expectedDeliveryAnnotations, actualMessage.getRawAmqpMessage().getDeliveryAnnotations()); assertValues(expectedFooterValues, actualMessage.getRawAmqpMessage().getFooter()); assertEquals(APPLICATION_PROPERTIES.size(), actualMessage.getApplicationProperties().size()); APPLICATION_PROPERTIES.forEach((key, value) -> { Assertions.assertTrue(actualMessage.getApplicationProperties().containsKey(key)); assertEquals(value, actualMessage.getApplicationProperties().get(key)); }); assertEquals(payload, actualMessage.getBody().toString()); } /** * Verifies that we can serialize OffsetDateTime, Duration and URI in application properties. */ @Test /** * Message with specific type send from .net SDK. * * ServiceBusMessage message = new ServiceBusMessage("Hello world!"); * DateTime utcTime1 = DateTime.Parse("2022-02-24T08:23:23.443127200Z"); * utcTime1 = DateTime.SpecifyKind(utcTime1, DateTimeKind.Utc); * message.ApplicationProperties.Add("time", utcTime2); * message.ApplicationProperties.Add("span", TimeSpan.FromSeconds(10)); * message.ApplicationProperties.Add("uri", new Uri("https: */ @Test void deserializeRealMessageFromByte() { byte[] data = new byte[] { 0, 83, 112, -64, 10, 5, 64, 64, 112, 72, 25, 8, 0, 64, 67, 0, 83, 113, -63, 36, 2, -93, 16, 120, 45, 111, 112, 116, 45, 108, 111, 99, 107, 45, 116, 111, 107, 101, 110, -104, -99, -119, 88, -41, -124, -37, 69, 10, -98, -95, -99, 119, -64, -61, 36, 90, 0, 83, 114, -63, 85, 6, -93, 19, 120, 45, 111, 112, 116, 45, 101, 110, 113, 117, 101, 117, 101, 100, 45, 116, 105, 109, 101, -125, 0, 0, 1, 127, 42, -30, 45, 43, -93, 21, 120, 45, 111, 112, 116, 45, 115, 101, 113, 117, 101, 110, 99, 101, 45, 110, 117, 109, 98, 101, 114, 85, 78, -93, 18, 120, 45, 111, 112, 116, 45, 108, 111, 99, 107, 101, 100, 45, 117, 110, 116, 105, 108, -125, 0, 0, 1, 127, 42, -30, -94, 106, 0, 83, 115, -64, 63, 13, -95, 32, 53, 98, 100, 50, 56, 100, 98, 97, 48, 56, 54, 99, 52, 98, 57, 99, 98, 55, 55, 49, 99, 100, 97, 97, 101, 102, 52, 51, 102, 102, 49, 98, 64, 64, 64, 64, 64, 64, 64, -125, 0, 0, 1, 127, 114, -5, 53, 43, -125, 0, 0, 1, 127, 42, -30, 45, 43, 64, 64, 64, 0, 83, 116, -63, -118, 6, -95, 4, 116, 105, 109, 101, 0, -93, 29, 99, 111, 109, 46, 109, 105, 99, 114, 111, 115, 111, 102, 116, 58, 100, 97, 116, 101, 116, 105, 109, 101, 45, 111, 102, 102, 115, 101, 116, -127, 8, -39, -9, -79, -6, -116, -83, 40, -95, 4, 115, 112, 97, 110, 0, -93, 22, 99, 111, 109, 46, 109, 105, 99, 114, 111, 115, 111, 102, 116, 58, 116, 105, 109, 101, 115, 112, 97, 110, -127, 0, 0, 0, 0, 5, -11, -31, 0, -95, 3, 117, 114, 105, 0, -93, 17, 99, 111, 109, 46, 109, 105, 99, 114, 111, 115, 111, 102, 116, 58, 117, 114, 105, -95, 23, 104, 116, 116, 112, 115, 58, 47, 47, 119, 119, 119, 46, 103, 105, 116, 104, 117, 98, 46, 99, 111, 109, 47, 0, 83, 117, -96, 12, 72, 101, 108, 108, 111, 32, 119, 111, 114, 108, 100, 33 }; final Message amqpMessage = Proton.message(); amqpMessage.decode(data, 0, data.length); amqpMessage.setHeader(new Header()); final ServiceBusReceivedMessage actualMessage = serializer.deserialize(amqpMessage, ServiceBusReceivedMessage.class); AtomicInteger convertCount = new AtomicInteger(); HashMap<String, Object> specificMap = new HashMap<>(); specificMap.put("uri", URI.create("https: specificMap.put("span", Duration.ofSeconds(2)); specificMap.put("time", OffsetDateTime.parse("2022-02-24T08:23:23.443127200Z")); assertEquals(specificMap.size(), actualMessage.getApplicationProperties().size()); specificMap.forEach((key, value) -> { Assertions.assertTrue(actualMessage.getApplicationProperties().containsKey(key)); if (value instanceof URI) { assertEquals((URI) value, actualMessage.getApplicationProperties().get(key)); convertCount.getAndIncrement(); } else if (value instanceof Duration) { assertEquals((Duration) value, specificMap.get("span")); convertCount.getAndIncrement(); } else if (value instanceof OffsetDateTime) { assertEquals((OffsetDateTime) value, specificMap.get("time")); convertCount.getAndIncrement(); } }); assertEquals(specificMap.size(), convertCount.get()); } /** * Verifies that an empty collection is returned if the status code was not {@link AmqpResponseCode */ @Test void deserializeListMessagesNotOK() { final Map<String, Object> properties = new HashMap<>(); properties.put("status-code", AmqpResponseCode.FORBIDDEN.getValue()); final Message message = Proton.message(); message.setBody(new AmqpValue("test")); message.setApplicationProperties(new ApplicationProperties(properties)); final List<ServiceBusReceivedMessage> actual = serializer.deserializeList(message, ServiceBusReceivedMessage.class); Assertions.assertNotNull(actual); Assertions.assertTrue(actual.isEmpty()); } private void assertValues(Map<Symbol, Object> expected, Map<String, Object> actual) { assertEquals(expected.size(), actual.size()); for (Map.Entry<Symbol, Object> expectedEntry : expected.entrySet()) { assertEquals(expectedEntry.getValue(), actual.get(expectedEntry.getKey().toString())); } } }
OK. URL does not allow non-ascii. So you are safe here.
void serializeMessageWithSpecificApplicationProperties() { String contents = "some contents"; String messageId = "messageId"; final ServiceBusMessage message = getServiceBusMessage(contents, messageId); HashMap<String, Object> specificMap = new HashMap<>(); specificMap.put("uri", URI.create("https: specificMap.put("duration", Duration.ZERO); specificMap.put("offsetDateTime", OffsetDateTime.now()); message.getApplicationProperties().putAll(specificMap); Message amqpMessage = serializer.serialize(message); assertEquals(specificMap.size(), amqpMessage.getApplicationProperties().getValue().size()); AtomicInteger convertCount = new AtomicInteger(); specificMap.forEach((key, value) -> { Assertions.assertTrue(amqpMessage.getApplicationProperties().getValue().containsKey(key)); if (value instanceof URI) { assertEquals(((URI) value).toString(), ((ServiceBusDescribedType) amqpMessage.getApplicationProperties().getValue().get(key)).getDescribed()); convertCount.getAndIncrement(); } else if (value instanceof Duration) { convertCount.getAndIncrement(); } else if (value instanceof OffsetDateTime) { convertCount.getAndIncrement(); } }); assertEquals(specificMap.size(), convertCount.get()); }
String contents = "some contents";
void serializeMessageWithSpecificApplicationProperties() { String contents = "some contents"; String messageId = "messageId"; final ServiceBusMessage message = getServiceBusMessage(contents, messageId); HashMap<String, Object> specificMap = new HashMap<>(); specificMap.put("uri", URI.create("https: specificMap.put("duration", Duration.ZERO); specificMap.put("offsetDateTime", OffsetDateTime.now()); message.getApplicationProperties().putAll(specificMap); Message amqpMessage = serializer.serialize(message); assertEquals(specificMap.size(), amqpMessage.getApplicationProperties().getValue().size()); AtomicInteger convertCount = new AtomicInteger(); specificMap.forEach((key, value) -> { Assertions.assertTrue(amqpMessage.getApplicationProperties().getValue().containsKey(key)); if (value instanceof URI) { assertEquals(((URI) value).toString(), ((ServiceBusDescribedType) amqpMessage.getApplicationProperties().getValue().get(key)).getDescribed()); convertCount.getAndIncrement(); } else if (value instanceof Duration) { convertCount.getAndIncrement(); } else if (value instanceof OffsetDateTime) { convertCount.getAndIncrement(); } }); assertEquals(specificMap.size(), convertCount.get()); }
class ServiceBusMessageSerializerTest { private final ServiceBusMessageSerializer serializer = new ServiceBusMessageSerializer(); @Test void deserializeMessageNotNull() { assertThrows(NullPointerException.class, () -> serializer.deserialize(null, ServiceBusMessage.class)); } @Test void deserializeClassNotNull() { assertThrows(NullPointerException.class, () -> serializer.deserialize(Proton.message(), null)); } @Test void serializeObjectNotNull() { assertThrows(NullPointerException.class, () -> serializer.serialize(null)); } /** * Verify that we cannot serialize something that is not of type Message. */ @Test void cannotSerializeObject() { String something = "oops"; assertThrows(IllegalArgumentException.class, () -> serializer.serialize(something)); } /** * Verify we can only deserialize supported classes. */ @Test void cannotDeserializeObject() { final org.apache.qpid.proton.message.Message message = getMessage("hello-world".getBytes(UTF_8)); assertThrows(IllegalArgumentException.class, () -> serializer.deserialize(message, ServiceBusReceiverAsyncClient.class)); assertThrows(IllegalArgumentException.class, () -> serializer.deserializeList(message, ServiceBusReceiverAsyncClient.class)); } /** * Verify that we can deserialize a proton-j message with all the correct contents to {@link ServiceBusMessage}. */ @Test void deserializeMessage() { final String payload = "hello-world"; final byte[] payloadBytes = payload.getBytes(UTF_8); final org.apache.qpid.proton.message.Message message = getMessage(payloadBytes); message.setAddress("a-to-address"); message.setContentType("some-content-type"); message.setCorrelationId("correlation-id-test"); message.setDeliveryCount(10); message.setTtl(1045); message.setMessageId("a-test-message-id"); message.setSubject("this is a label"); message.getProperties().setTo("this is a to property"); message.setReplyTo("reply-to-property"); message.setReplyToGroupId("reply-to-session-id-property"); message.setGroupId("session-id-as-a-group-id"); Map<Symbol, Object> expectedMessageAnnotations = message.getMessageAnnotations().getValue(); expectedMessageAnnotations.put(Symbol.valueOf("A"), "A value"); Map<Symbol, Object> expectedDeliveryAnnotations = new HashMap<>(); expectedDeliveryAnnotations.put(Symbol.valueOf("D"), "D value"); message.setDeliveryAnnotations(new DeliveryAnnotations(expectedDeliveryAnnotations)); Map<Symbol, Object> expectedFooterValues = new HashMap<>(); expectedFooterValues.put(Symbol.valueOf("footer1"), "footer value"); message.setFooter(new Footer(expectedFooterValues)); final ServiceBusReceivedMessage actualMessage = serializer.deserialize(message, ServiceBusReceivedMessage.class); assertNotNull(actualMessage.getEnqueuedTime()); assertEquals(SEQUENCE_NUMBER, actualMessage.getSequenceNumber()); assertEquals(message.getTtl(), actualMessage.getTimeToLive().toMillis()); assertEquals(message.getSubject(), actualMessage.getSubject()); assertEquals(message.getReplyTo(), actualMessage.getReplyTo()); assertEquals(message.getDeliveryCount(), actualMessage.getDeliveryCount()); assertEquals(message.getProperties().getTo(), actualMessage.getTo()); assertEquals(message.getReplyToGroupId(), actualMessage.getReplyToSessionId()); assertEquals(message.getGroupId(), actualMessage.getSessionId()); assertEquals(message.getContentType(), actualMessage.getContentType()); assertEquals(message.getCorrelationId(), actualMessage.getCorrelationId()); assertValues(expectedMessageAnnotations, actualMessage.getRawAmqpMessage().getMessageAnnotations()); assertValues(expectedDeliveryAnnotations, actualMessage.getRawAmqpMessage().getDeliveryAnnotations()); assertValues(expectedFooterValues, actualMessage.getRawAmqpMessage().getFooter()); assertEquals(APPLICATION_PROPERTIES.size(), actualMessage.getApplicationProperties().size()); APPLICATION_PROPERTIES.forEach((key, value) -> { Assertions.assertTrue(actualMessage.getApplicationProperties().containsKey(key)); assertEquals(value, actualMessage.getApplicationProperties().get(key)); }); assertEquals(payload, actualMessage.getBody().toString()); } /** * Verifies that we can serialize OffsetDateTime, Duration and URI in application properties. */ @Test /** * Message with specific type send from .net SDK. * * ServiceBusMessage message = new ServiceBusMessage("Hello world!"); * DateTime utcTime1 = DateTime.Parse("2022-02-24T08:23:23.443127200Z"); * utcTime1 = DateTime.SpecifyKind(utcTime1, DateTimeKind.Utc); * message.ApplicationProperties.Add("time", utcTime2); * message.ApplicationProperties.Add("span", TimeSpan.FromSeconds(10)); * message.ApplicationProperties.Add("uri", new Uri("https: */ @Test void deserializeRealMessageFromByte() { byte[] data = new byte[] { 0, 83, 112, -64, 10, 5, 64, 64, 112, 72, 25, 8, 0, 64, 67, 0, 83, 113, -63, 36, 2, -93, 16, 120, 45, 111, 112, 116, 45, 108, 111, 99, 107, 45, 116, 111, 107, 101, 110, -104, -99, -119, 88, -41, -124, -37, 69, 10, -98, -95, -99, 119, -64, -61, 36, 90, 0, 83, 114, -63, 85, 6, -93, 19, 120, 45, 111, 112, 116, 45, 101, 110, 113, 117, 101, 117, 101, 100, 45, 116, 105, 109, 101, -125, 0, 0, 1, 127, 42, -30, 45, 43, -93, 21, 120, 45, 111, 112, 116, 45, 115, 101, 113, 117, 101, 110, 99, 101, 45, 110, 117, 109, 98, 101, 114, 85, 78, -93, 18, 120, 45, 111, 112, 116, 45, 108, 111, 99, 107, 101, 100, 45, 117, 110, 116, 105, 108, -125, 0, 0, 1, 127, 42, -30, -94, 106, 0, 83, 115, -64, 63, 13, -95, 32, 53, 98, 100, 50, 56, 100, 98, 97, 48, 56, 54, 99, 52, 98, 57, 99, 98, 55, 55, 49, 99, 100, 97, 97, 101, 102, 52, 51, 102, 102, 49, 98, 64, 64, 64, 64, 64, 64, 64, -125, 0, 0, 1, 127, 114, -5, 53, 43, -125, 0, 0, 1, 127, 42, -30, 45, 43, 64, 64, 64, 0, 83, 116, -63, -118, 6, -95, 4, 116, 105, 109, 101, 0, -93, 29, 99, 111, 109, 46, 109, 105, 99, 114, 111, 115, 111, 102, 116, 58, 100, 97, 116, 101, 116, 105, 109, 101, 45, 111, 102, 102, 115, 101, 116, -127, 8, -39, -9, -79, -6, -116, -83, 40, -95, 4, 115, 112, 97, 110, 0, -93, 22, 99, 111, 109, 46, 109, 105, 99, 114, 111, 115, 111, 102, 116, 58, 116, 105, 109, 101, 115, 112, 97, 110, -127, 0, 0, 0, 0, 5, -11, -31, 0, -95, 3, 117, 114, 105, 0, -93, 17, 99, 111, 109, 46, 109, 105, 99, 114, 111, 115, 111, 102, 116, 58, 117, 114, 105, -95, 23, 104, 116, 116, 112, 115, 58, 47, 47, 119, 119, 119, 46, 103, 105, 116, 104, 117, 98, 46, 99, 111, 109, 47, 0, 83, 117, -96, 12, 72, 101, 108, 108, 111, 32, 119, 111, 114, 108, 100, 33 }; final Message amqpMessage = Proton.message(); amqpMessage.decode(data, 0, data.length); amqpMessage.setHeader(new Header()); final ServiceBusReceivedMessage actualMessage = serializer.deserialize(amqpMessage, ServiceBusReceivedMessage.class); AtomicInteger convertCount = new AtomicInteger(); HashMap<String, Object> specificMap = new HashMap<>(); specificMap.put("uri", URI.create("https: specificMap.put("span", Duration.ofSeconds(2)); specificMap.put("time", OffsetDateTime.parse("2022-02-24T08:23:23.443127200Z")); assertEquals(specificMap.size(), actualMessage.getApplicationProperties().size()); specificMap.forEach((key, value) -> { Assertions.assertTrue(actualMessage.getApplicationProperties().containsKey(key)); if (value instanceof URI) { assertEquals((URI) value, actualMessage.getApplicationProperties().get(key)); convertCount.getAndIncrement(); } else if (value instanceof Duration) { assertEquals((Duration) value, specificMap.get("span")); convertCount.getAndIncrement(); } else if (value instanceof OffsetDateTime) { assertEquals((OffsetDateTime) value, specificMap.get("time")); convertCount.getAndIncrement(); } }); assertEquals(specificMap.size(), convertCount.get()); } /** * Verifies that an empty collection is returned if the status code was not {@link AmqpResponseCode */ @Test void deserializeListMessagesNotOK() { final Map<String, Object> properties = new HashMap<>(); properties.put("status-code", AmqpResponseCode.FORBIDDEN.getValue()); final Message message = Proton.message(); message.setBody(new AmqpValue("test")); message.setApplicationProperties(new ApplicationProperties(properties)); final List<ServiceBusReceivedMessage> actual = serializer.deserializeList(message, ServiceBusReceivedMessage.class); Assertions.assertNotNull(actual); Assertions.assertTrue(actual.isEmpty()); } private void assertValues(Map<Symbol, Object> expected, Map<String, Object> actual) { assertEquals(expected.size(), actual.size()); for (Map.Entry<Symbol, Object> expectedEntry : expected.entrySet()) { assertEquals(expectedEntry.getValue(), actual.get(expectedEntry.getKey().toString())); } } }
class ServiceBusMessageSerializerTest { private final ServiceBusMessageSerializer serializer = new ServiceBusMessageSerializer(); @Test void deserializeMessageNotNull() { assertThrows(NullPointerException.class, () -> serializer.deserialize(null, ServiceBusMessage.class)); } @Test void deserializeClassNotNull() { assertThrows(NullPointerException.class, () -> serializer.deserialize(Proton.message(), null)); } @Test void serializeObjectNotNull() { assertThrows(NullPointerException.class, () -> serializer.serialize(null)); } /** * Verify that we cannot serialize something that is not of type Message. */ @Test void cannotSerializeObject() { String something = "oops"; assertThrows(IllegalArgumentException.class, () -> serializer.serialize(something)); } /** * Verify we can only deserialize supported classes. */ @Test void cannotDeserializeObject() { final org.apache.qpid.proton.message.Message message = getMessage("hello-world".getBytes(UTF_8)); assertThrows(IllegalArgumentException.class, () -> serializer.deserialize(message, ServiceBusReceiverAsyncClient.class)); assertThrows(IllegalArgumentException.class, () -> serializer.deserializeList(message, ServiceBusReceiverAsyncClient.class)); } /** * Verify that we can deserialize a proton-j message with all the correct contents to {@link ServiceBusMessage}. */ @Test void deserializeMessage() { final String payload = "hello-world"; final byte[] payloadBytes = payload.getBytes(UTF_8); final org.apache.qpid.proton.message.Message message = getMessage(payloadBytes); message.setAddress("a-to-address"); message.setContentType("some-content-type"); message.setCorrelationId("correlation-id-test"); message.setDeliveryCount(10); message.setTtl(1045); message.setMessageId("a-test-message-id"); message.setSubject("this is a label"); message.getProperties().setTo("this is a to property"); message.setReplyTo("reply-to-property"); message.setReplyToGroupId("reply-to-session-id-property"); message.setGroupId("session-id-as-a-group-id"); Map<Symbol, Object> expectedMessageAnnotations = message.getMessageAnnotations().getValue(); expectedMessageAnnotations.put(Symbol.valueOf("A"), "A value"); Map<Symbol, Object> expectedDeliveryAnnotations = new HashMap<>(); expectedDeliveryAnnotations.put(Symbol.valueOf("D"), "D value"); message.setDeliveryAnnotations(new DeliveryAnnotations(expectedDeliveryAnnotations)); Map<Symbol, Object> expectedFooterValues = new HashMap<>(); expectedFooterValues.put(Symbol.valueOf("footer1"), "footer value"); message.setFooter(new Footer(expectedFooterValues)); final ServiceBusReceivedMessage actualMessage = serializer.deserialize(message, ServiceBusReceivedMessage.class); assertNotNull(actualMessage.getEnqueuedTime()); assertEquals(SEQUENCE_NUMBER, actualMessage.getSequenceNumber()); assertEquals(message.getTtl(), actualMessage.getTimeToLive().toMillis()); assertEquals(message.getSubject(), actualMessage.getSubject()); assertEquals(message.getReplyTo(), actualMessage.getReplyTo()); assertEquals(message.getDeliveryCount(), actualMessage.getDeliveryCount()); assertEquals(message.getProperties().getTo(), actualMessage.getTo()); assertEquals(message.getReplyToGroupId(), actualMessage.getReplyToSessionId()); assertEquals(message.getGroupId(), actualMessage.getSessionId()); assertEquals(message.getContentType(), actualMessage.getContentType()); assertEquals(message.getCorrelationId(), actualMessage.getCorrelationId()); assertValues(expectedMessageAnnotations, actualMessage.getRawAmqpMessage().getMessageAnnotations()); assertValues(expectedDeliveryAnnotations, actualMessage.getRawAmqpMessage().getDeliveryAnnotations()); assertValues(expectedFooterValues, actualMessage.getRawAmqpMessage().getFooter()); assertEquals(APPLICATION_PROPERTIES.size(), actualMessage.getApplicationProperties().size()); APPLICATION_PROPERTIES.forEach((key, value) -> { Assertions.assertTrue(actualMessage.getApplicationProperties().containsKey(key)); assertEquals(value, actualMessage.getApplicationProperties().get(key)); }); assertEquals(payload, actualMessage.getBody().toString()); } /** * Verifies that we can serialize OffsetDateTime, Duration and URI in application properties. */ @Test /** * Message with specific type send from .net SDK. * * ServiceBusMessage message = new ServiceBusMessage("Hello world!"); * DateTime utcTime1 = DateTime.Parse("2022-02-24T08:23:23.443127200Z"); * utcTime1 = DateTime.SpecifyKind(utcTime1, DateTimeKind.Utc); * message.ApplicationProperties.Add("time", utcTime2); * message.ApplicationProperties.Add("span", TimeSpan.FromSeconds(10)); * message.ApplicationProperties.Add("uri", new Uri("https: */ @Test void deserializeRealMessageFromByte() { byte[] data = new byte[] { 0, 83, 112, -64, 10, 5, 64, 64, 112, 72, 25, 8, 0, 64, 67, 0, 83, 113, -63, 36, 2, -93, 16, 120, 45, 111, 112, 116, 45, 108, 111, 99, 107, 45, 116, 111, 107, 101, 110, -104, -99, -119, 88, -41, -124, -37, 69, 10, -98, -95, -99, 119, -64, -61, 36, 90, 0, 83, 114, -63, 85, 6, -93, 19, 120, 45, 111, 112, 116, 45, 101, 110, 113, 117, 101, 117, 101, 100, 45, 116, 105, 109, 101, -125, 0, 0, 1, 127, 42, -30, 45, 43, -93, 21, 120, 45, 111, 112, 116, 45, 115, 101, 113, 117, 101, 110, 99, 101, 45, 110, 117, 109, 98, 101, 114, 85, 78, -93, 18, 120, 45, 111, 112, 116, 45, 108, 111, 99, 107, 101, 100, 45, 117, 110, 116, 105, 108, -125, 0, 0, 1, 127, 42, -30, -94, 106, 0, 83, 115, -64, 63, 13, -95, 32, 53, 98, 100, 50, 56, 100, 98, 97, 48, 56, 54, 99, 52, 98, 57, 99, 98, 55, 55, 49, 99, 100, 97, 97, 101, 102, 52, 51, 102, 102, 49, 98, 64, 64, 64, 64, 64, 64, 64, -125, 0, 0, 1, 127, 114, -5, 53, 43, -125, 0, 0, 1, 127, 42, -30, 45, 43, 64, 64, 64, 0, 83, 116, -63, -118, 6, -95, 4, 116, 105, 109, 101, 0, -93, 29, 99, 111, 109, 46, 109, 105, 99, 114, 111, 115, 111, 102, 116, 58, 100, 97, 116, 101, 116, 105, 109, 101, 45, 111, 102, 102, 115, 101, 116, -127, 8, -39, -9, -79, -6, -116, -83, 40, -95, 4, 115, 112, 97, 110, 0, -93, 22, 99, 111, 109, 46, 109, 105, 99, 114, 111, 115, 111, 102, 116, 58, 116, 105, 109, 101, 115, 112, 97, 110, -127, 0, 0, 0, 0, 5, -11, -31, 0, -95, 3, 117, 114, 105, 0, -93, 17, 99, 111, 109, 46, 109, 105, 99, 114, 111, 115, 111, 102, 116, 58, 117, 114, 105, -95, 23, 104, 116, 116, 112, 115, 58, 47, 47, 119, 119, 119, 46, 103, 105, 116, 104, 117, 98, 46, 99, 111, 109, 47, 0, 83, 117, -96, 12, 72, 101, 108, 108, 111, 32, 119, 111, 114, 108, 100, 33 }; final Message amqpMessage = Proton.message(); amqpMessage.decode(data, 0, data.length); amqpMessage.setHeader(new Header()); final ServiceBusReceivedMessage actualMessage = serializer.deserialize(amqpMessage, ServiceBusReceivedMessage.class); AtomicInteger convertCount = new AtomicInteger(); HashMap<String, Object> specificMap = new HashMap<>(); specificMap.put("uri", URI.create("https: specificMap.put("span", Duration.ofSeconds(2)); specificMap.put("time", OffsetDateTime.parse("2022-02-24T08:23:23.443127200Z")); assertEquals(specificMap.size(), actualMessage.getApplicationProperties().size()); specificMap.forEach((key, value) -> { Assertions.assertTrue(actualMessage.getApplicationProperties().containsKey(key)); if (value instanceof URI) { assertEquals((URI) value, actualMessage.getApplicationProperties().get(key)); convertCount.getAndIncrement(); } else if (value instanceof Duration) { assertEquals((Duration) value, specificMap.get("span")); convertCount.getAndIncrement(); } else if (value instanceof OffsetDateTime) { assertEquals((OffsetDateTime) value, specificMap.get("time")); convertCount.getAndIncrement(); } }); assertEquals(specificMap.size(), convertCount.get()); } /** * Verifies that an empty collection is returned if the status code was not {@link AmqpResponseCode */ @Test void deserializeListMessagesNotOK() { final Map<String, Object> properties = new HashMap<>(); properties.put("status-code", AmqpResponseCode.FORBIDDEN.getValue()); final Message message = Proton.message(); message.setBody(new AmqpValue("test")); message.setApplicationProperties(new ApplicationProperties(properties)); final List<ServiceBusReceivedMessage> actual = serializer.deserializeList(message, ServiceBusReceivedMessage.class); Assertions.assertNotNull(actual); Assertions.assertTrue(actual.isEmpty()); } private void assertValues(Map<Symbol, Object> expected, Map<String, Object> actual) { assertEquals(expected.size(), actual.size()); for (Map.Entry<Symbol, Object> expectedEntry : expected.entrySet()) { assertEquals(expectedEntry.getValue(), actual.get(expectedEntry.getKey().toString())); } } }
Yeah, sorry, I think URI only contains ASCII at that time, but it may contain other String. It should use `this.getDescribed().getBytes(StandardCharsets.UTF_8).length`
public int size() { return URI_SYMBOL.length() + ((String) this.getDescribed()).length(); }
return URI_SYMBOL.length() + ((String) this.getDescribed()).length();
public int size() { return URI_SYMBOL.length() + ((String) this.getDescribed()).getBytes(StandardCharsets.UTF_8).length; }
class UriDescribedType extends ServiceBusDescribedType { /** * Set described to describe data in described type. * * @param described real value in the described type. */ public UriDescribedType(Object described) { super(URI_SYMBOL, ((URI) described).toString()); } @Override }
class UriDescribedType extends ServiceBusDescribedType { /** * Set described to describe data in described type. * * @param uri set as described in DescribedType. */ public UriDescribedType(URI uri) { super(URI_SYMBOL, uri.toString()); } @Override }
Is it possible that the `value` is not a parsable double?
public boolean evaluate(FeatureFilterEvaluationContext context) { String value = String.valueOf(context.getParameters().get(PERCENTAGE_FILTER_SETTING)); boolean result = true; if (value.equals("null") || Double.parseDouble(value) < 0) { LOGGER.warn("The {} feature filter does not have a valid {} value for feature {}.", this.getClass().getSimpleName(), PERCENTAGE_FILTER_SETTING, context.getName()); result = false; } else { result = (Math.random() * 100) <= Double.parseDouble(value); } return result; }
if (value.equals("null") || Double.parseDouble(value) < 0) {
public boolean evaluate(FeatureFilterEvaluationContext context) { String value = String.valueOf(context.getParameters().get(PERCENTAGE_FILTER_SETTING)); boolean result = true; if (value.equals("null") || Double.parseDouble(value) < 0) { LOGGER.warn("The {} feature filter does not have a valid {} value for feature {}.", this.getClass().getSimpleName(), PERCENTAGE_FILTER_SETTING, context.getName()); result = false; } else { result = (Math.random() * 100) <= Double.parseDouble(value); } return result; }
class PercentageFilter implements FeatureFilter { private static final Logger LOGGER = LoggerFactory.getLogger(PercentageFilter.class); /** * Performs a percentage based evaluation to determine whether a feature is enabled. * * @param context The feature evaluation context. * @return True if the feature is enabled, false otherwise. */ @Override }
class PercentageFilter implements FeatureFilter { private static final Logger LOGGER = LoggerFactory.getLogger(PercentageFilter.class); /** * Performs a percentage based evaluation to determine whether a feature is enabled. * * @param context The feature evaluation context. * @return True if the feature is enabled, false otherwise. * @throws NumberFormatException if the percentage filter setting is not a parsable double */ @Override }
The Double.parseDouble will throw a NumberFormatException which we want thrown. We can update the JavaDoc to say as much.
public boolean evaluate(FeatureFilterEvaluationContext context) { String value = String.valueOf(context.getParameters().get(PERCENTAGE_FILTER_SETTING)); boolean result = true; if (value.equals("null") || Double.parseDouble(value) < 0) { LOGGER.warn("The {} feature filter does not have a valid {} value for feature {}.", this.getClass().getSimpleName(), PERCENTAGE_FILTER_SETTING, context.getName()); result = false; } else { result = (Math.random() * 100) <= Double.parseDouble(value); } return result; }
if (value.equals("null") || Double.parseDouble(value) < 0) {
public boolean evaluate(FeatureFilterEvaluationContext context) { String value = String.valueOf(context.getParameters().get(PERCENTAGE_FILTER_SETTING)); boolean result = true; if (value.equals("null") || Double.parseDouble(value) < 0) { LOGGER.warn("The {} feature filter does not have a valid {} value for feature {}.", this.getClass().getSimpleName(), PERCENTAGE_FILTER_SETTING, context.getName()); result = false; } else { result = (Math.random() * 100) <= Double.parseDouble(value); } return result; }
class PercentageFilter implements FeatureFilter { private static final Logger LOGGER = LoggerFactory.getLogger(PercentageFilter.class); /** * Performs a percentage based evaluation to determine whether a feature is enabled. * * @param context The feature evaluation context. * @return True if the feature is enabled, false otherwise. */ @Override }
class PercentageFilter implements FeatureFilter { private static final Logger LOGGER = LoggerFactory.getLogger(PercentageFilter.class); /** * Performs a percentage based evaluation to determine whether a feature is enabled. * * @param context The feature evaluation context. * @return True if the feature is enabled, false otherwise. * @throws NumberFormatException if the percentage filter setting is not a parsable double */ @Override }
nit: please remove commented line
public void invalidClientEncryptionKeyException() throws Exception { CosmosAsyncContainer cosmosAsyncContainer = Mockito.mock(CosmosAsyncContainer.class); CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient = Mockito.mock(CosmosEncryptionAsyncClient.class); Mockito.when(cosmosEncryptionAsyncClient.getEncryptionKeyWrapProvider()).thenReturn(keyStoreProvider); Mockito.when(cosmosEncryptionAsyncClientAccessor.getContainerPropertiesAsync(cosmosEncryptionAsyncClient, Mockito.any(CosmosAsyncContainer.class), Mockito.anyBoolean())).thenReturn(Mono.just(generateContainerWithCosmosEncryptionPolicy())); CosmosClientEncryptionKeyProperties keyProperties = generateClientEncryptionKeyProperties(); Mockito.when(cosmosEncryptionAsyncClientAccessor.getClientEncryptionPropertiesAsync(cosmosEncryptionAsyncClient, Mockito.anyString(), Mockito.anyString(), Mockito.any(CosmosAsyncContainer.class), Mockito.anyBoolean(), Mockito.nullable(String.class), Mockito.anyBoolean())).thenReturn(Mono.just(keyProperties)); EncryptionProcessor encryptionProcessor = new EncryptionProcessor(cosmosAsyncContainer, cosmosEncryptionAsyncClient); EncryptionSettings encryptionSettings = encryptionProcessor.getEncryptionSettings(); EncryptionSettings mockEncryptionSettings = Mockito.mock(EncryptionSettings.class); ReflectionUtils.setEncryptionSettings(encryptionProcessor, mockEncryptionSettings); Mockito.when(mockEncryptionSettings.buildProtectedDataEncryptionKey(Mockito.any(CosmosClientEncryptionKeyProperties.class), Mockito.any(EncryptionKeyStoreProvider.class), Mockito.anyString())). thenThrow(new InvalidKeyException()).thenReturn(encryptionSettings.buildProtectedDataEncryptionKey(keyProperties, encryptionKeyWrapProviderAccessor.getEncryptionKeyStoreProviderImpl(keyStoreProvider), keyProperties.getId())); Mockito.doNothing().when(mockEncryptionSettings).setEncryptionSettingForProperty(Mockito.anyString(), Mockito.any(EncryptionSettings.class), Mockito.any(Instant.class)); Assertions.assertThat(ReflectionUtils.isEncryptionSettingsInitDone(encryptionProcessor).get()).isFalse(); encryptionProcessor.initializeEncryptionSettingsAsync(false).block(); encryptionProcessor = new EncryptionProcessor(cosmosAsyncContainer, cosmosEncryptionAsyncClient); encryptionSettings = encryptionProcessor.getEncryptionSettings(); mockEncryptionSettings = Mockito.mock(EncryptionSettings.class); ReflectionUtils.setEncryptionSettings(encryptionProcessor, mockEncryptionSettings); Mockito.when(mockEncryptionSettings.buildProtectedDataEncryptionKey(Mockito.any(CosmosClientEncryptionKeyProperties.class), Mockito.any(EncryptionKeyStoreProvider.class), Mockito.anyString())). thenThrow(new InvalidKeyException(), new InvalidKeyException(), new InvalidKeyException()).thenReturn(encryptionSettings.buildProtectedDataEncryptionKey(keyProperties, encryptionKeyWrapProviderAccessor.getEncryptionKeyStoreProviderImpl(keyStoreProvider), keyProperties.getId())); try { encryptionProcessor.initializeEncryptionSettingsAsync(false).block(); fail("Expecting initializeEncryptionSettingsAsync to throw InvalidKeyException"); } catch (Exception ex) { InvalidKeyException invalidKeyException = Utils.as(ex.getCause(), InvalidKeyException.class); assertThat(invalidKeyException).isNotNull(); } }
public void invalidClientEncryptionKeyException() throws Exception { CosmosAsyncContainer cosmosAsyncContainer = Mockito.mock(CosmosAsyncContainer.class); CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient = Mockito.mock(CosmosEncryptionAsyncClient.class); Mockito.when(cosmosEncryptionAsyncClient.getKeyEncryptionKeyResolver()).thenReturn(keyEncryptionKeyResolver); Mockito.when(cosmosEncryptionAsyncClientAccessor.getContainerPropertiesAsync(cosmosEncryptionAsyncClient, Mockito.any(CosmosAsyncContainer.class), Mockito.anyBoolean())).thenReturn(Mono.just(generateContainerWithCosmosEncryptionPolicy())); CosmosClientEncryptionKeyProperties keyProperties = generateClientEncryptionKeyProperties(); Mockito.when(cosmosEncryptionAsyncClientAccessor.getClientEncryptionPropertiesAsync(cosmosEncryptionAsyncClient, Mockito.anyString(), Mockito.anyString(), Mockito.any(CosmosAsyncContainer.class), Mockito.anyBoolean(), Mockito.nullable(String.class), Mockito.anyBoolean())).thenReturn(Mono.just(keyProperties)); EncryptionKeyStoreProviderImpl encryptionKeyStoreProvider = new EncryptionKeyStoreProviderImpl(keyEncryptionKeyResolver, "TEST_KEY_RESOLVER"); Mockito.when(cosmosEncryptionAsyncClientAccessor.getEncryptionKeyStoreProviderImpl(cosmosEncryptionAsyncClient)).thenReturn(encryptionKeyStoreProvider); EncryptionProcessor encryptionProcessor = new EncryptionProcessor(cosmosAsyncContainer, cosmosEncryptionAsyncClient); EncryptionSettings encryptionSettings = encryptionProcessor.getEncryptionSettings(); EncryptionSettings mockEncryptionSettings = Mockito.mock(EncryptionSettings.class); ReflectionUtils.setEncryptionSettings(encryptionProcessor, mockEncryptionSettings); Mockito.when(mockEncryptionSettings.buildProtectedDataEncryptionKey(Mockito.any(CosmosClientEncryptionKeyProperties.class), Mockito.any(EncryptionKeyStoreProvider.class), Mockito.anyString())). thenThrow(new InvalidKeyException()).thenReturn(encryptionSettings.buildProtectedDataEncryptionKey(keyProperties, cosmosEncryptionAsyncClientAccessor.getEncryptionKeyStoreProviderImpl(cosmosEncryptionAsyncClient), keyProperties.getId())); Mockito.doNothing().when(mockEncryptionSettings).setEncryptionSettingForProperty(Mockito.anyString(), Mockito.any(EncryptionSettings.class), Mockito.any(Instant.class)); Assertions.assertThat(ReflectionUtils.isEncryptionSettingsInitDone(encryptionProcessor).get()).isFalse(); encryptionProcessor.initializeEncryptionSettingsAsync(false).block(); encryptionProcessor = new EncryptionProcessor(cosmosAsyncContainer, cosmosEncryptionAsyncClient); encryptionSettings = encryptionProcessor.getEncryptionSettings(); mockEncryptionSettings = Mockito.mock(EncryptionSettings.class); ReflectionUtils.setEncryptionSettings(encryptionProcessor, mockEncryptionSettings); Mockito.when(mockEncryptionSettings.buildProtectedDataEncryptionKey(Mockito.any(CosmosClientEncryptionKeyProperties.class), Mockito.any(EncryptionKeyStoreProvider.class), Mockito.anyString())). thenThrow(new InvalidKeyException(), new InvalidKeyException(), new InvalidKeyException()).thenReturn(encryptionSettings.buildProtectedDataEncryptionKey(keyProperties, cosmosEncryptionAsyncClientAccessor.getEncryptionKeyStoreProviderImpl(cosmosEncryptionAsyncClient), keyProperties.getId())); try { encryptionProcessor.initializeEncryptionSettingsAsync(false).block(); fail("Expecting initializeEncryptionSettingsAsync to throw InvalidKeyException"); } catch (Exception ex) { InvalidKeyException invalidKeyException = Utils.as(ex.getCause(), InvalidKeyException.class); assertThat(invalidKeyException).isNotNull(); } }
class EncryptionProcessorAndSettingsTest { private static final int TIMEOUT = 6000_000; private static final ObjectMapper MAPPER = new ObjectMapper(); private static final EncryptionAsyncApiCrudTest.TestEncryptionKeyStoreProvider keyStoreProvider = new EncryptionAsyncApiCrudTest.TestEncryptionKeyStoreProvider(); private final static EncryptionImplementationBridgeHelpers.EncryptionKeyWrapProviderHelper.EncryptionKeyWrapProviderAccessor encryptionKeyWrapProviderAccessor = EncryptionImplementationBridgeHelpers.EncryptionKeyWrapProviderHelper.getEncryptionKeyWrapProviderAccessor(); private final static ImplementationBridgeHelpers.CosmosContainerPropertiesHelper.CosmosContainerPropertiesAccessor cosmosContainerPropertiesAccessor = ImplementationBridgeHelpers.CosmosContainerPropertiesHelper.getCosmosContainerPropertiesAccessor(); private final static EncryptionImplementationBridgeHelpers.CosmosEncryptionAsyncClientHelper.CosmosEncryptionAsyncClientAccessor cosmosEncryptionAsyncClientAccessor = EncryptionImplementationBridgeHelpers.CosmosEncryptionAsyncClientHelper.getCosmosEncryptionAsyncClientAccessor(); @Test(groups = {"unit"}, timeOut = TIMEOUT) public void initializeEncryptionSettingsAsync() throws Exception { CosmosAsyncContainer cosmosAsyncContainer = Mockito.mock(CosmosAsyncContainer.class); CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient = Mockito.mock(CosmosEncryptionAsyncClient.class); Mockito.when(cosmosEncryptionAsyncClient.getEncryptionKeyWrapProvider()).thenReturn(keyStoreProvider); Mockito.when(cosmosEncryptionAsyncClientAccessor.getContainerPropertiesAsync(cosmosEncryptionAsyncClient, Mockito.any(CosmosAsyncContainer.class), Mockito.anyBoolean())).thenReturn(Mono.just(generateContainerWithCosmosEncryptionPolicy())); Mockito.when(cosmosEncryptionAsyncClientAccessor.getClientEncryptionPropertiesAsync(cosmosEncryptionAsyncClient, Mockito.anyString(), Mockito.anyString(), Mockito.any(CosmosAsyncContainer.class), Mockito.anyBoolean(), Mockito.nullable(String.class), Mockito.anyBoolean())).thenReturn(Mono.just(generateClientEncryptionKeyProperties())); EncryptionProcessor encryptionProcessor = new EncryptionProcessor(cosmosAsyncContainer, cosmosEncryptionAsyncClient); EncryptionSettings encryptionSettings = encryptionProcessor.getEncryptionSettings(); try { encryptionSettings.getEncryptionSettingCacheByPropertyName().getAsync("sensitiveString", null, null).block(); fail("encryptionSettings should be empty"); } catch (NullPointerException ex) { } Assertions.assertThat(ReflectionUtils.isEncryptionSettingsInitDone(encryptionProcessor).get()).isFalse(); encryptionProcessor.initializeEncryptionSettingsAsync(false).block(); encryptionSettings = encryptionProcessor.getEncryptionSettings(); CachedEncryptionSettings cachedEncryptionSettings = encryptionSettings.getEncryptionSettingCacheByPropertyName().getAsync("sensitiveString", null, null).block(); assertThat(ReflectionUtils.isEncryptionSettingsInitDone(encryptionProcessor)).isTrue(); assertThat(cachedEncryptionSettings).isNotNull(); assertThat(cachedEncryptionSettings.getEncryptionSettings().getClientEncryptionKeyId()).isEqualTo("key1"); } @Test(groups = {"unit"}, timeOut = TIMEOUT) public void withoutInitializeEncryptionSettingsAsync() throws Exception { CosmosAsyncContainer cosmosAsyncContainer = Mockito.mock(CosmosAsyncContainer.class); CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient = Mockito.mock(CosmosEncryptionAsyncClient.class); Mockito.when(cosmosEncryptionAsyncClient.getEncryptionKeyWrapProvider()).thenReturn(keyStoreProvider); Mockito.when(cosmosEncryptionAsyncClientAccessor.getContainerPropertiesAsync(cosmosEncryptionAsyncClient, Mockito.any(CosmosAsyncContainer.class), Mockito.anyBoolean())).thenReturn(Mono.just(generateContainerWithCosmosEncryptionPolicy())); Mockito.when(cosmosEncryptionAsyncClientAccessor.getClientEncryptionPropertiesAsync(cosmosEncryptionAsyncClient, Mockito.anyString(), Mockito.anyString(), Mockito.any(CosmosAsyncContainer.class), Mockito.anyBoolean(), Mockito.nullable(String.class), Mockito.anyBoolean())).thenReturn(Mono.just(generateClientEncryptionKeyProperties())); EncryptionProcessor encryptionProcessor = new EncryptionProcessor(cosmosAsyncContainer, cosmosEncryptionAsyncClient); EncryptionSettings encryptionSettings = encryptionProcessor.getEncryptionSettings(); encryptionSettings.setDatabaseRid("TestDb"); try { encryptionSettings.getEncryptionSettingCacheByPropertyName().getAsync("sensitiveString", null, null).block(); fail("encryptionSettings should be empty"); } catch (NullPointerException ex) { } EncryptionSettings spyEncryptionSettings = Mockito.spy(encryptionSettings); EncryptionSettings cachedEncryptionSettings = spyEncryptionSettings.getEncryptionSettingForPropertyAsync("sensitiveString", encryptionProcessor).block(); assertThat(ReflectionUtils.isEncryptionSettingsInitDone(encryptionProcessor).get()).isFalse(); assertThat(cachedEncryptionSettings).isNotNull(); assertThat(cachedEncryptionSettings.getClientEncryptionKeyId()).isEqualTo("key1"); Mockito.verify(spyEncryptionSettings, Mockito.times(1)).fetchCachedEncryptionSettingsAsync(Mockito.anyString(), Mockito.any(EncryptionProcessor.class)); spyEncryptionSettings.getEncryptionSettingForPropertyAsync("sensitiveString", encryptionProcessor).block(); Mockito.verify(spyEncryptionSettings, Mockito.times(1)).fetchCachedEncryptionSettingsAsync(Mockito.anyString(), Mockito.any(EncryptionProcessor.class)); } @Test(groups = {"unit"}, timeOut = TIMEOUT) public void encryptionSettingCachedTimeToLive() throws Exception { CosmosAsyncContainer cosmosAsyncContainer = Mockito.mock(CosmosAsyncContainer.class); CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient = Mockito.mock(CosmosEncryptionAsyncClient.class); Mockito.when(cosmosEncryptionAsyncClient.getEncryptionKeyWrapProvider()).thenReturn(keyStoreProvider); Mockito.when(cosmosEncryptionAsyncClientAccessor.getContainerPropertiesAsync(cosmosEncryptionAsyncClient, Mockito.any(CosmosAsyncContainer.class), Mockito.anyBoolean())).thenReturn(Mono.just(generateContainerWithCosmosEncryptionPolicy())); Mockito.when(cosmosEncryptionAsyncClientAccessor.getClientEncryptionPropertiesAsync(cosmosEncryptionAsyncClient, Mockito.anyString(), Mockito.anyString(), Mockito.any(CosmosAsyncContainer.class), Mockito.anyBoolean(), Mockito.nullable(String.class), Mockito.anyBoolean())).thenReturn(Mono.just(generateClientEncryptionKeyProperties())); EncryptionProcessor encryptionProcessor = new EncryptionProcessor(cosmosAsyncContainer, cosmosEncryptionAsyncClient); EncryptionSettings encryptionSettings = encryptionProcessor.getEncryptionSettings(); encryptionSettings.setDatabaseRid("TestDb"); EncryptionSettings spyEncryptionSettings = Mockito.spy(encryptionSettings); EncryptionSettings cachedEncryptionSettings = spyEncryptionSettings.getEncryptionSettingForPropertyAsync("sensitiveString", encryptionProcessor).block(); Mockito.verify(spyEncryptionSettings, Mockito.times(1)).fetchCachedEncryptionSettingsAsync(Mockito.anyString(), Mockito.any(EncryptionProcessor.class)); assertThat(cachedEncryptionSettings).isNotNull(); assertThat(cachedEncryptionSettings.getClientEncryptionKeyId()).isEqualTo("key1"); assertThat(cachedEncryptionSettings.getEncryptionSettingTimeToLive()).isAfter(Instant.now().plus(Duration.ofMinutes(59))); assertThat(cachedEncryptionSettings.getEncryptionSettingTimeToLive()).isBefore(Instant.now().plus(Duration.ofMinutes(61))); spyEncryptionSettings.setEncryptionSettingForProperty("sensitiveString", cachedEncryptionSettings, Instant.now()); spyEncryptionSettings.getEncryptionSettingForPropertyAsync("sensitiveString", encryptionProcessor).block(); Mockito.verify(spyEncryptionSettings, Mockito.times(2)).fetchCachedEncryptionSettingsAsync(Mockito.anyString(), Mockito.any(EncryptionProcessor.class)); } @Test(groups = {"unit"}, timeOut = TIMEOUT) private ClientEncryptionPolicy generateClientEncryptionPolicy() { ClientEncryptionIncludedPath includedPath1 = new ClientEncryptionIncludedPath(); includedPath1.setClientEncryptionKeyId("key1"); includedPath1.setPath("/sensitiveString"); includedPath1.setEncryptionType(CosmosEncryptionType.DETERMINISTIC.getName()); includedPath1.setEncryptionAlgorithm(CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256.getName()); List<ClientEncryptionIncludedPath> paths = new ArrayList<>(); paths.add(includedPath1); return new ClientEncryptionPolicy(paths); } private CosmosContainerProperties generateContainerWithCosmosEncryptionPolicy() { CosmosContainerProperties containerProperties = new CosmosContainerProperties(UUID.randomUUID().toString(), "/mypk"); cosmosContainerPropertiesAccessor.setSelfLink(containerProperties, "dbs/testDb/colls/testCol"); ClientEncryptionIncludedPath includedPath1 = new ClientEncryptionIncludedPath(); includedPath1.setClientEncryptionKeyId("key1"); includedPath1.setPath("/sensitiveString"); includedPath1.setEncryptionType(CosmosEncryptionType.DETERMINISTIC.getName()); includedPath1.setEncryptionAlgorithm(CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256.getName()); List<ClientEncryptionIncludedPath> paths = new ArrayList<>(); paths.add(includedPath1); return containerProperties.setClientEncryptionPolicy(new ClientEncryptionPolicy(paths)); } private CosmosClientEncryptionKeyProperties generateClientEncryptionKeyProperties() throws JsonProcessingException { TextNode treeNode = new TextNode("S84PieiyZNyHxeuUuX5IXSV2KOktpt02tQM4QLhm8dI="); byte[] key = MAPPER.treeToValue(treeNode, byte[].class); EncryptionKeyWrapMetadata metadata = new EncryptionKeyWrapMetadata(keyStoreProvider.getProviderName(), "key1", "tempmetadata1"); return new CosmosClientEncryptionKeyProperties("key1", CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256.getName(), key, metadata); } }
class EncryptionProcessorAndSettingsTest { private static final int TIMEOUT = 6000_000; private static final ObjectMapper MAPPER = new ObjectMapper(); private static final EncryptionAsyncApiCrudTest.TestKeyEncryptionKeyResolver keyEncryptionKeyResolver = new EncryptionAsyncApiCrudTest.TestKeyEncryptionKeyResolver(); private final static ImplementationBridgeHelpers.CosmosContainerPropertiesHelper.CosmosContainerPropertiesAccessor cosmosContainerPropertiesAccessor = ImplementationBridgeHelpers.CosmosContainerPropertiesHelper.getCosmosContainerPropertiesAccessor(); private final static EncryptionImplementationBridgeHelpers.CosmosEncryptionAsyncClientHelper.CosmosEncryptionAsyncClientAccessor cosmosEncryptionAsyncClientAccessor = EncryptionImplementationBridgeHelpers.CosmosEncryptionAsyncClientHelper.getCosmosEncryptionAsyncClientAccessor(); @Test(groups = {"unit"}, timeOut = TIMEOUT) public void initializeEncryptionSettingsAsync() throws Exception { CosmosAsyncContainer cosmosAsyncContainer = Mockito.mock(CosmosAsyncContainer.class); CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient = Mockito.mock(CosmosEncryptionAsyncClient.class); Mockito.when(cosmosEncryptionAsyncClient.getKeyEncryptionKeyResolver()).thenReturn(keyEncryptionKeyResolver); Mockito.when(cosmosEncryptionAsyncClientAccessor.getContainerPropertiesAsync(cosmosEncryptionAsyncClient, Mockito.any(CosmosAsyncContainer.class), Mockito.anyBoolean())).thenReturn(Mono.just(generateContainerWithCosmosEncryptionPolicy())); Mockito.when(cosmosEncryptionAsyncClientAccessor.getClientEncryptionPropertiesAsync(cosmosEncryptionAsyncClient, Mockito.anyString(), Mockito.anyString(), Mockito.any(CosmosAsyncContainer.class), Mockito.anyBoolean(), Mockito.nullable(String.class), Mockito.anyBoolean())).thenReturn(Mono.just(generateClientEncryptionKeyProperties())); EncryptionKeyStoreProviderImpl encryptionKeyStoreProvider = new EncryptionKeyStoreProviderImpl(keyEncryptionKeyResolver, "TEST_KEY_RESOLVER"); Mockito.when(cosmosEncryptionAsyncClientAccessor.getEncryptionKeyStoreProviderImpl(cosmosEncryptionAsyncClient)).thenReturn(encryptionKeyStoreProvider); EncryptionProcessor encryptionProcessor = new EncryptionProcessor(cosmosAsyncContainer, cosmosEncryptionAsyncClient); EncryptionSettings encryptionSettings = encryptionProcessor.getEncryptionSettings(); try { encryptionSettings.getEncryptionSettingCacheByPropertyName().getAsync("sensitiveString", null, null).block(); fail("encryptionSettings should be empty"); } catch (NullPointerException ex) { } Assertions.assertThat(ReflectionUtils.isEncryptionSettingsInitDone(encryptionProcessor).get()).isFalse(); encryptionProcessor.initializeEncryptionSettingsAsync(false).block(); encryptionSettings = encryptionProcessor.getEncryptionSettings(); CachedEncryptionSettings cachedEncryptionSettings = encryptionSettings.getEncryptionSettingCacheByPropertyName().getAsync("sensitiveString", null, null).block(); assertThat(ReflectionUtils.isEncryptionSettingsInitDone(encryptionProcessor)).isTrue(); assertThat(cachedEncryptionSettings).isNotNull(); assertThat(cachedEncryptionSettings.getEncryptionSettings().getClientEncryptionKeyId()).isEqualTo("key1"); } @Test(groups = {"unit"}, timeOut = TIMEOUT) public void withoutInitializeEncryptionSettingsAsync() throws Exception { CosmosAsyncContainer cosmosAsyncContainer = Mockito.mock(CosmosAsyncContainer.class); CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient = Mockito.mock(CosmosEncryptionAsyncClient.class); Mockito.when(cosmosEncryptionAsyncClient.getKeyEncryptionKeyResolver()).thenReturn(keyEncryptionKeyResolver); Mockito.when(cosmosEncryptionAsyncClientAccessor.getContainerPropertiesAsync(cosmosEncryptionAsyncClient, Mockito.any(CosmosAsyncContainer.class), Mockito.anyBoolean())).thenReturn(Mono.just(generateContainerWithCosmosEncryptionPolicy())); Mockito.when(cosmosEncryptionAsyncClientAccessor.getClientEncryptionPropertiesAsync(cosmosEncryptionAsyncClient, Mockito.anyString(), Mockito.anyString(), Mockito.any(CosmosAsyncContainer.class), Mockito.anyBoolean(), Mockito.nullable(String.class), Mockito.anyBoolean())).thenReturn(Mono.just(generateClientEncryptionKeyProperties())); EncryptionKeyStoreProviderImpl encryptionKeyStoreProvider = new EncryptionKeyStoreProviderImpl(keyEncryptionKeyResolver, "TEST_KEY_RESOLVER"); Mockito.when(cosmosEncryptionAsyncClientAccessor.getEncryptionKeyStoreProviderImpl(cosmosEncryptionAsyncClient)).thenReturn(encryptionKeyStoreProvider); EncryptionProcessor encryptionProcessor = new EncryptionProcessor(cosmosAsyncContainer, cosmosEncryptionAsyncClient); EncryptionSettings encryptionSettings = encryptionProcessor.getEncryptionSettings(); encryptionSettings.setDatabaseRid("TestDb"); try { encryptionSettings.getEncryptionSettingCacheByPropertyName().getAsync("sensitiveString", null, null).block(); fail("encryptionSettings should be empty"); } catch (NullPointerException ex) { } EncryptionSettings spyEncryptionSettings = Mockito.spy(encryptionSettings); EncryptionSettings cachedEncryptionSettings = spyEncryptionSettings.getEncryptionSettingForPropertyAsync("sensitiveString", encryptionProcessor).block(); assertThat(ReflectionUtils.isEncryptionSettingsInitDone(encryptionProcessor).get()).isFalse(); assertThat(cachedEncryptionSettings).isNotNull(); assertThat(cachedEncryptionSettings.getClientEncryptionKeyId()).isEqualTo("key1"); Mockito.verify(spyEncryptionSettings, Mockito.times(1)).fetchCachedEncryptionSettingsAsync(Mockito.anyString(), Mockito.any(EncryptionProcessor.class)); spyEncryptionSettings.getEncryptionSettingForPropertyAsync("sensitiveString", encryptionProcessor).block(); Mockito.verify(spyEncryptionSettings, Mockito.times(1)).fetchCachedEncryptionSettingsAsync(Mockito.anyString(), Mockito.any(EncryptionProcessor.class)); } @Test(groups = {"unit"}, timeOut = TIMEOUT) public void encryptionSettingCachedTimeToLive() throws Exception { CosmosAsyncContainer cosmosAsyncContainer = Mockito.mock(CosmosAsyncContainer.class); CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient = Mockito.mock(CosmosEncryptionAsyncClient.class); Mockito.when(cosmosEncryptionAsyncClient.getKeyEncryptionKeyResolver()).thenReturn(keyEncryptionKeyResolver); Mockito.when(cosmosEncryptionAsyncClientAccessor.getContainerPropertiesAsync(cosmosEncryptionAsyncClient, Mockito.any(CosmosAsyncContainer.class), Mockito.anyBoolean())).thenReturn(Mono.just(generateContainerWithCosmosEncryptionPolicy())); Mockito.when(cosmosEncryptionAsyncClientAccessor.getClientEncryptionPropertiesAsync(cosmosEncryptionAsyncClient, Mockito.anyString(), Mockito.anyString(), Mockito.any(CosmosAsyncContainer.class), Mockito.anyBoolean(), Mockito.nullable(String.class), Mockito.anyBoolean())).thenReturn(Mono.just(generateClientEncryptionKeyProperties())); EncryptionKeyStoreProviderImpl encryptionKeyStoreProvider = new EncryptionKeyStoreProviderImpl(keyEncryptionKeyResolver, "TEST_KEY_RESOLVER"); Mockito.when(cosmosEncryptionAsyncClientAccessor.getEncryptionKeyStoreProviderImpl(cosmosEncryptionAsyncClient)).thenReturn(encryptionKeyStoreProvider); EncryptionProcessor encryptionProcessor = new EncryptionProcessor(cosmosAsyncContainer, cosmosEncryptionAsyncClient); EncryptionSettings encryptionSettings = encryptionProcessor.getEncryptionSettings(); encryptionSettings.setDatabaseRid("TestDb"); EncryptionSettings spyEncryptionSettings = Mockito.spy(encryptionSettings); EncryptionSettings cachedEncryptionSettings = spyEncryptionSettings.getEncryptionSettingForPropertyAsync("sensitiveString", encryptionProcessor).block(); Mockito.verify(spyEncryptionSettings, Mockito.times(1)).fetchCachedEncryptionSettingsAsync(Mockito.anyString(), Mockito.any(EncryptionProcessor.class)); assertThat(cachedEncryptionSettings).isNotNull(); assertThat(cachedEncryptionSettings.getClientEncryptionKeyId()).isEqualTo("key1"); assertThat(cachedEncryptionSettings.getEncryptionSettingTimeToLive()).isAfter(Instant.now().plus(Duration.ofMinutes(59))); assertThat(cachedEncryptionSettings.getEncryptionSettingTimeToLive()).isBefore(Instant.now().plus(Duration.ofMinutes(61))); spyEncryptionSettings.setEncryptionSettingForProperty("sensitiveString", cachedEncryptionSettings, Instant.now().minus(Duration.ofSeconds(5))); spyEncryptionSettings.getEncryptionSettingForPropertyAsync("sensitiveString", encryptionProcessor).block(); Mockito.verify(spyEncryptionSettings, Mockito.times(2)).fetchCachedEncryptionSettingsAsync(Mockito.anyString(), Mockito.any(EncryptionProcessor.class)); } @Test(groups = {"unit"}, timeOut = TIMEOUT) private ClientEncryptionPolicy generateClientEncryptionPolicy() { ClientEncryptionIncludedPath includedPath1 = new ClientEncryptionIncludedPath(); includedPath1.setClientEncryptionKeyId("key1"); includedPath1.setPath("/sensitiveString"); includedPath1.setEncryptionType(CosmosEncryptionType.DETERMINISTIC.getName()); includedPath1.setEncryptionAlgorithm(CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256.getName()); List<ClientEncryptionIncludedPath> paths = new ArrayList<>(); paths.add(includedPath1); return new ClientEncryptionPolicy(paths); } private CosmosContainerProperties generateContainerWithCosmosEncryptionPolicy() { CosmosContainerProperties containerProperties = new CosmosContainerProperties(UUID.randomUUID().toString(), "/mypk"); cosmosContainerPropertiesAccessor.setSelfLink(containerProperties, "dbs/testDb/colls/testCol"); ClientEncryptionIncludedPath includedPath1 = new ClientEncryptionIncludedPath(); includedPath1.setClientEncryptionKeyId("key1"); includedPath1.setPath("/sensitiveString"); includedPath1.setEncryptionType(CosmosEncryptionType.DETERMINISTIC.getName()); includedPath1.setEncryptionAlgorithm(CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256.getName()); List<ClientEncryptionIncludedPath> paths = new ArrayList<>(); paths.add(includedPath1); return containerProperties.setClientEncryptionPolicy(new ClientEncryptionPolicy(paths)); } private CosmosClientEncryptionKeyProperties generateClientEncryptionKeyProperties() throws JsonProcessingException { TextNode treeNode = new TextNode("S84PieiyZNyHxeuUuX5IXSV2KOktpt02tQM4QLhm8dI="); byte[] key = MAPPER.treeToValue(treeNode, byte[].class); EncryptionKeyWrapMetadata metadata = new EncryptionKeyWrapMetadata("TEST_KEY_RESOLVER", "key1", "tempmetadata1", "RSA-OAEP"); return new CosmosClientEncryptionKeyProperties("key1", CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256.getName(), key, metadata); } }
Fixed
public void invalidClientEncryptionKeyException() throws Exception { CosmosAsyncContainer cosmosAsyncContainer = Mockito.mock(CosmosAsyncContainer.class); CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient = Mockito.mock(CosmosEncryptionAsyncClient.class); Mockito.when(cosmosEncryptionAsyncClient.getEncryptionKeyWrapProvider()).thenReturn(keyStoreProvider); Mockito.when(cosmosEncryptionAsyncClientAccessor.getContainerPropertiesAsync(cosmosEncryptionAsyncClient, Mockito.any(CosmosAsyncContainer.class), Mockito.anyBoolean())).thenReturn(Mono.just(generateContainerWithCosmosEncryptionPolicy())); CosmosClientEncryptionKeyProperties keyProperties = generateClientEncryptionKeyProperties(); Mockito.when(cosmosEncryptionAsyncClientAccessor.getClientEncryptionPropertiesAsync(cosmosEncryptionAsyncClient, Mockito.anyString(), Mockito.anyString(), Mockito.any(CosmosAsyncContainer.class), Mockito.anyBoolean(), Mockito.nullable(String.class), Mockito.anyBoolean())).thenReturn(Mono.just(keyProperties)); EncryptionProcessor encryptionProcessor = new EncryptionProcessor(cosmosAsyncContainer, cosmosEncryptionAsyncClient); EncryptionSettings encryptionSettings = encryptionProcessor.getEncryptionSettings(); EncryptionSettings mockEncryptionSettings = Mockito.mock(EncryptionSettings.class); ReflectionUtils.setEncryptionSettings(encryptionProcessor, mockEncryptionSettings); Mockito.when(mockEncryptionSettings.buildProtectedDataEncryptionKey(Mockito.any(CosmosClientEncryptionKeyProperties.class), Mockito.any(EncryptionKeyStoreProvider.class), Mockito.anyString())). thenThrow(new InvalidKeyException()).thenReturn(encryptionSettings.buildProtectedDataEncryptionKey(keyProperties, encryptionKeyWrapProviderAccessor.getEncryptionKeyStoreProviderImpl(keyStoreProvider), keyProperties.getId())); Mockito.doNothing().when(mockEncryptionSettings).setEncryptionSettingForProperty(Mockito.anyString(), Mockito.any(EncryptionSettings.class), Mockito.any(Instant.class)); Assertions.assertThat(ReflectionUtils.isEncryptionSettingsInitDone(encryptionProcessor).get()).isFalse(); encryptionProcessor.initializeEncryptionSettingsAsync(false).block(); encryptionProcessor = new EncryptionProcessor(cosmosAsyncContainer, cosmosEncryptionAsyncClient); encryptionSettings = encryptionProcessor.getEncryptionSettings(); mockEncryptionSettings = Mockito.mock(EncryptionSettings.class); ReflectionUtils.setEncryptionSettings(encryptionProcessor, mockEncryptionSettings); Mockito.when(mockEncryptionSettings.buildProtectedDataEncryptionKey(Mockito.any(CosmosClientEncryptionKeyProperties.class), Mockito.any(EncryptionKeyStoreProvider.class), Mockito.anyString())). thenThrow(new InvalidKeyException(), new InvalidKeyException(), new InvalidKeyException()).thenReturn(encryptionSettings.buildProtectedDataEncryptionKey(keyProperties, encryptionKeyWrapProviderAccessor.getEncryptionKeyStoreProviderImpl(keyStoreProvider), keyProperties.getId())); try { encryptionProcessor.initializeEncryptionSettingsAsync(false).block(); fail("Expecting initializeEncryptionSettingsAsync to throw InvalidKeyException"); } catch (Exception ex) { InvalidKeyException invalidKeyException = Utils.as(ex.getCause(), InvalidKeyException.class); assertThat(invalidKeyException).isNotNull(); } }
public void invalidClientEncryptionKeyException() throws Exception { CosmosAsyncContainer cosmosAsyncContainer = Mockito.mock(CosmosAsyncContainer.class); CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient = Mockito.mock(CosmosEncryptionAsyncClient.class); Mockito.when(cosmosEncryptionAsyncClient.getKeyEncryptionKeyResolver()).thenReturn(keyEncryptionKeyResolver); Mockito.when(cosmosEncryptionAsyncClientAccessor.getContainerPropertiesAsync(cosmosEncryptionAsyncClient, Mockito.any(CosmosAsyncContainer.class), Mockito.anyBoolean())).thenReturn(Mono.just(generateContainerWithCosmosEncryptionPolicy())); CosmosClientEncryptionKeyProperties keyProperties = generateClientEncryptionKeyProperties(); Mockito.when(cosmosEncryptionAsyncClientAccessor.getClientEncryptionPropertiesAsync(cosmosEncryptionAsyncClient, Mockito.anyString(), Mockito.anyString(), Mockito.any(CosmosAsyncContainer.class), Mockito.anyBoolean(), Mockito.nullable(String.class), Mockito.anyBoolean())).thenReturn(Mono.just(keyProperties)); EncryptionKeyStoreProviderImpl encryptionKeyStoreProvider = new EncryptionKeyStoreProviderImpl(keyEncryptionKeyResolver, "TEST_KEY_RESOLVER"); Mockito.when(cosmosEncryptionAsyncClientAccessor.getEncryptionKeyStoreProviderImpl(cosmosEncryptionAsyncClient)).thenReturn(encryptionKeyStoreProvider); EncryptionProcessor encryptionProcessor = new EncryptionProcessor(cosmosAsyncContainer, cosmosEncryptionAsyncClient); EncryptionSettings encryptionSettings = encryptionProcessor.getEncryptionSettings(); EncryptionSettings mockEncryptionSettings = Mockito.mock(EncryptionSettings.class); ReflectionUtils.setEncryptionSettings(encryptionProcessor, mockEncryptionSettings); Mockito.when(mockEncryptionSettings.buildProtectedDataEncryptionKey(Mockito.any(CosmosClientEncryptionKeyProperties.class), Mockito.any(EncryptionKeyStoreProvider.class), Mockito.anyString())). thenThrow(new InvalidKeyException()).thenReturn(encryptionSettings.buildProtectedDataEncryptionKey(keyProperties, cosmosEncryptionAsyncClientAccessor.getEncryptionKeyStoreProviderImpl(cosmosEncryptionAsyncClient), keyProperties.getId())); Mockito.doNothing().when(mockEncryptionSettings).setEncryptionSettingForProperty(Mockito.anyString(), Mockito.any(EncryptionSettings.class), Mockito.any(Instant.class)); Assertions.assertThat(ReflectionUtils.isEncryptionSettingsInitDone(encryptionProcessor).get()).isFalse(); encryptionProcessor.initializeEncryptionSettingsAsync(false).block(); encryptionProcessor = new EncryptionProcessor(cosmosAsyncContainer, cosmosEncryptionAsyncClient); encryptionSettings = encryptionProcessor.getEncryptionSettings(); mockEncryptionSettings = Mockito.mock(EncryptionSettings.class); ReflectionUtils.setEncryptionSettings(encryptionProcessor, mockEncryptionSettings); Mockito.when(mockEncryptionSettings.buildProtectedDataEncryptionKey(Mockito.any(CosmosClientEncryptionKeyProperties.class), Mockito.any(EncryptionKeyStoreProvider.class), Mockito.anyString())). thenThrow(new InvalidKeyException(), new InvalidKeyException(), new InvalidKeyException()).thenReturn(encryptionSettings.buildProtectedDataEncryptionKey(keyProperties, cosmosEncryptionAsyncClientAccessor.getEncryptionKeyStoreProviderImpl(cosmosEncryptionAsyncClient), keyProperties.getId())); try { encryptionProcessor.initializeEncryptionSettingsAsync(false).block(); fail("Expecting initializeEncryptionSettingsAsync to throw InvalidKeyException"); } catch (Exception ex) { InvalidKeyException invalidKeyException = Utils.as(ex.getCause(), InvalidKeyException.class); assertThat(invalidKeyException).isNotNull(); } }
class EncryptionProcessorAndSettingsTest { private static final int TIMEOUT = 6000_000; private static final ObjectMapper MAPPER = new ObjectMapper(); private static final EncryptionAsyncApiCrudTest.TestEncryptionKeyStoreProvider keyStoreProvider = new EncryptionAsyncApiCrudTest.TestEncryptionKeyStoreProvider(); private final static EncryptionImplementationBridgeHelpers.EncryptionKeyWrapProviderHelper.EncryptionKeyWrapProviderAccessor encryptionKeyWrapProviderAccessor = EncryptionImplementationBridgeHelpers.EncryptionKeyWrapProviderHelper.getEncryptionKeyWrapProviderAccessor(); private final static ImplementationBridgeHelpers.CosmosContainerPropertiesHelper.CosmosContainerPropertiesAccessor cosmosContainerPropertiesAccessor = ImplementationBridgeHelpers.CosmosContainerPropertiesHelper.getCosmosContainerPropertiesAccessor(); private final static EncryptionImplementationBridgeHelpers.CosmosEncryptionAsyncClientHelper.CosmosEncryptionAsyncClientAccessor cosmosEncryptionAsyncClientAccessor = EncryptionImplementationBridgeHelpers.CosmosEncryptionAsyncClientHelper.getCosmosEncryptionAsyncClientAccessor(); @Test(groups = {"unit"}, timeOut = TIMEOUT) public void initializeEncryptionSettingsAsync() throws Exception { CosmosAsyncContainer cosmosAsyncContainer = Mockito.mock(CosmosAsyncContainer.class); CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient = Mockito.mock(CosmosEncryptionAsyncClient.class); Mockito.when(cosmosEncryptionAsyncClient.getEncryptionKeyWrapProvider()).thenReturn(keyStoreProvider); Mockito.when(cosmosEncryptionAsyncClientAccessor.getContainerPropertiesAsync(cosmosEncryptionAsyncClient, Mockito.any(CosmosAsyncContainer.class), Mockito.anyBoolean())).thenReturn(Mono.just(generateContainerWithCosmosEncryptionPolicy())); Mockito.when(cosmosEncryptionAsyncClientAccessor.getClientEncryptionPropertiesAsync(cosmosEncryptionAsyncClient, Mockito.anyString(), Mockito.anyString(), Mockito.any(CosmosAsyncContainer.class), Mockito.anyBoolean(), Mockito.nullable(String.class), Mockito.anyBoolean())).thenReturn(Mono.just(generateClientEncryptionKeyProperties())); EncryptionProcessor encryptionProcessor = new EncryptionProcessor(cosmosAsyncContainer, cosmosEncryptionAsyncClient); EncryptionSettings encryptionSettings = encryptionProcessor.getEncryptionSettings(); try { encryptionSettings.getEncryptionSettingCacheByPropertyName().getAsync("sensitiveString", null, null).block(); fail("encryptionSettings should be empty"); } catch (NullPointerException ex) { } Assertions.assertThat(ReflectionUtils.isEncryptionSettingsInitDone(encryptionProcessor).get()).isFalse(); encryptionProcessor.initializeEncryptionSettingsAsync(false).block(); encryptionSettings = encryptionProcessor.getEncryptionSettings(); CachedEncryptionSettings cachedEncryptionSettings = encryptionSettings.getEncryptionSettingCacheByPropertyName().getAsync("sensitiveString", null, null).block(); assertThat(ReflectionUtils.isEncryptionSettingsInitDone(encryptionProcessor)).isTrue(); assertThat(cachedEncryptionSettings).isNotNull(); assertThat(cachedEncryptionSettings.getEncryptionSettings().getClientEncryptionKeyId()).isEqualTo("key1"); } @Test(groups = {"unit"}, timeOut = TIMEOUT) public void withoutInitializeEncryptionSettingsAsync() throws Exception { CosmosAsyncContainer cosmosAsyncContainer = Mockito.mock(CosmosAsyncContainer.class); CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient = Mockito.mock(CosmosEncryptionAsyncClient.class); Mockito.when(cosmosEncryptionAsyncClient.getEncryptionKeyWrapProvider()).thenReturn(keyStoreProvider); Mockito.when(cosmosEncryptionAsyncClientAccessor.getContainerPropertiesAsync(cosmosEncryptionAsyncClient, Mockito.any(CosmosAsyncContainer.class), Mockito.anyBoolean())).thenReturn(Mono.just(generateContainerWithCosmosEncryptionPolicy())); Mockito.when(cosmosEncryptionAsyncClientAccessor.getClientEncryptionPropertiesAsync(cosmosEncryptionAsyncClient, Mockito.anyString(), Mockito.anyString(), Mockito.any(CosmosAsyncContainer.class), Mockito.anyBoolean(), Mockito.nullable(String.class), Mockito.anyBoolean())).thenReturn(Mono.just(generateClientEncryptionKeyProperties())); EncryptionProcessor encryptionProcessor = new EncryptionProcessor(cosmosAsyncContainer, cosmosEncryptionAsyncClient); EncryptionSettings encryptionSettings = encryptionProcessor.getEncryptionSettings(); encryptionSettings.setDatabaseRid("TestDb"); try { encryptionSettings.getEncryptionSettingCacheByPropertyName().getAsync("sensitiveString", null, null).block(); fail("encryptionSettings should be empty"); } catch (NullPointerException ex) { } EncryptionSettings spyEncryptionSettings = Mockito.spy(encryptionSettings); EncryptionSettings cachedEncryptionSettings = spyEncryptionSettings.getEncryptionSettingForPropertyAsync("sensitiveString", encryptionProcessor).block(); assertThat(ReflectionUtils.isEncryptionSettingsInitDone(encryptionProcessor).get()).isFalse(); assertThat(cachedEncryptionSettings).isNotNull(); assertThat(cachedEncryptionSettings.getClientEncryptionKeyId()).isEqualTo("key1"); Mockito.verify(spyEncryptionSettings, Mockito.times(1)).fetchCachedEncryptionSettingsAsync(Mockito.anyString(), Mockito.any(EncryptionProcessor.class)); spyEncryptionSettings.getEncryptionSettingForPropertyAsync("sensitiveString", encryptionProcessor).block(); Mockito.verify(spyEncryptionSettings, Mockito.times(1)).fetchCachedEncryptionSettingsAsync(Mockito.anyString(), Mockito.any(EncryptionProcessor.class)); } @Test(groups = {"unit"}, timeOut = TIMEOUT) public void encryptionSettingCachedTimeToLive() throws Exception { CosmosAsyncContainer cosmosAsyncContainer = Mockito.mock(CosmosAsyncContainer.class); CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient = Mockito.mock(CosmosEncryptionAsyncClient.class); Mockito.when(cosmosEncryptionAsyncClient.getEncryptionKeyWrapProvider()).thenReturn(keyStoreProvider); Mockito.when(cosmosEncryptionAsyncClientAccessor.getContainerPropertiesAsync(cosmosEncryptionAsyncClient, Mockito.any(CosmosAsyncContainer.class), Mockito.anyBoolean())).thenReturn(Mono.just(generateContainerWithCosmosEncryptionPolicy())); Mockito.when(cosmosEncryptionAsyncClientAccessor.getClientEncryptionPropertiesAsync(cosmosEncryptionAsyncClient, Mockito.anyString(), Mockito.anyString(), Mockito.any(CosmosAsyncContainer.class), Mockito.anyBoolean(), Mockito.nullable(String.class), Mockito.anyBoolean())).thenReturn(Mono.just(generateClientEncryptionKeyProperties())); EncryptionProcessor encryptionProcessor = new EncryptionProcessor(cosmosAsyncContainer, cosmosEncryptionAsyncClient); EncryptionSettings encryptionSettings = encryptionProcessor.getEncryptionSettings(); encryptionSettings.setDatabaseRid("TestDb"); EncryptionSettings spyEncryptionSettings = Mockito.spy(encryptionSettings); EncryptionSettings cachedEncryptionSettings = spyEncryptionSettings.getEncryptionSettingForPropertyAsync("sensitiveString", encryptionProcessor).block(); Mockito.verify(spyEncryptionSettings, Mockito.times(1)).fetchCachedEncryptionSettingsAsync(Mockito.anyString(), Mockito.any(EncryptionProcessor.class)); assertThat(cachedEncryptionSettings).isNotNull(); assertThat(cachedEncryptionSettings.getClientEncryptionKeyId()).isEqualTo("key1"); assertThat(cachedEncryptionSettings.getEncryptionSettingTimeToLive()).isAfter(Instant.now().plus(Duration.ofMinutes(59))); assertThat(cachedEncryptionSettings.getEncryptionSettingTimeToLive()).isBefore(Instant.now().plus(Duration.ofMinutes(61))); spyEncryptionSettings.setEncryptionSettingForProperty("sensitiveString", cachedEncryptionSettings, Instant.now()); spyEncryptionSettings.getEncryptionSettingForPropertyAsync("sensitiveString", encryptionProcessor).block(); Mockito.verify(spyEncryptionSettings, Mockito.times(2)).fetchCachedEncryptionSettingsAsync(Mockito.anyString(), Mockito.any(EncryptionProcessor.class)); } @Test(groups = {"unit"}, timeOut = TIMEOUT) private ClientEncryptionPolicy generateClientEncryptionPolicy() { ClientEncryptionIncludedPath includedPath1 = new ClientEncryptionIncludedPath(); includedPath1.setClientEncryptionKeyId("key1"); includedPath1.setPath("/sensitiveString"); includedPath1.setEncryptionType(CosmosEncryptionType.DETERMINISTIC.getName()); includedPath1.setEncryptionAlgorithm(CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256.getName()); List<ClientEncryptionIncludedPath> paths = new ArrayList<>(); paths.add(includedPath1); return new ClientEncryptionPolicy(paths); } private CosmosContainerProperties generateContainerWithCosmosEncryptionPolicy() { CosmosContainerProperties containerProperties = new CosmosContainerProperties(UUID.randomUUID().toString(), "/mypk"); cosmosContainerPropertiesAccessor.setSelfLink(containerProperties, "dbs/testDb/colls/testCol"); ClientEncryptionIncludedPath includedPath1 = new ClientEncryptionIncludedPath(); includedPath1.setClientEncryptionKeyId("key1"); includedPath1.setPath("/sensitiveString"); includedPath1.setEncryptionType(CosmosEncryptionType.DETERMINISTIC.getName()); includedPath1.setEncryptionAlgorithm(CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256.getName()); List<ClientEncryptionIncludedPath> paths = new ArrayList<>(); paths.add(includedPath1); return containerProperties.setClientEncryptionPolicy(new ClientEncryptionPolicy(paths)); } private CosmosClientEncryptionKeyProperties generateClientEncryptionKeyProperties() throws JsonProcessingException { TextNode treeNode = new TextNode("S84PieiyZNyHxeuUuX5IXSV2KOktpt02tQM4QLhm8dI="); byte[] key = MAPPER.treeToValue(treeNode, byte[].class); EncryptionKeyWrapMetadata metadata = new EncryptionKeyWrapMetadata(keyStoreProvider.getProviderName(), "key1", "tempmetadata1"); return new CosmosClientEncryptionKeyProperties("key1", CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256.getName(), key, metadata); } }
class EncryptionProcessorAndSettingsTest { private static final int TIMEOUT = 6000_000; private static final ObjectMapper MAPPER = new ObjectMapper(); private static final EncryptionAsyncApiCrudTest.TestKeyEncryptionKeyResolver keyEncryptionKeyResolver = new EncryptionAsyncApiCrudTest.TestKeyEncryptionKeyResolver(); private final static ImplementationBridgeHelpers.CosmosContainerPropertiesHelper.CosmosContainerPropertiesAccessor cosmosContainerPropertiesAccessor = ImplementationBridgeHelpers.CosmosContainerPropertiesHelper.getCosmosContainerPropertiesAccessor(); private final static EncryptionImplementationBridgeHelpers.CosmosEncryptionAsyncClientHelper.CosmosEncryptionAsyncClientAccessor cosmosEncryptionAsyncClientAccessor = EncryptionImplementationBridgeHelpers.CosmosEncryptionAsyncClientHelper.getCosmosEncryptionAsyncClientAccessor(); @Test(groups = {"unit"}, timeOut = TIMEOUT) public void initializeEncryptionSettingsAsync() throws Exception { CosmosAsyncContainer cosmosAsyncContainer = Mockito.mock(CosmosAsyncContainer.class); CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient = Mockito.mock(CosmosEncryptionAsyncClient.class); Mockito.when(cosmosEncryptionAsyncClient.getKeyEncryptionKeyResolver()).thenReturn(keyEncryptionKeyResolver); Mockito.when(cosmosEncryptionAsyncClientAccessor.getContainerPropertiesAsync(cosmosEncryptionAsyncClient, Mockito.any(CosmosAsyncContainer.class), Mockito.anyBoolean())).thenReturn(Mono.just(generateContainerWithCosmosEncryptionPolicy())); Mockito.when(cosmosEncryptionAsyncClientAccessor.getClientEncryptionPropertiesAsync(cosmosEncryptionAsyncClient, Mockito.anyString(), Mockito.anyString(), Mockito.any(CosmosAsyncContainer.class), Mockito.anyBoolean(), Mockito.nullable(String.class), Mockito.anyBoolean())).thenReturn(Mono.just(generateClientEncryptionKeyProperties())); EncryptionKeyStoreProviderImpl encryptionKeyStoreProvider = new EncryptionKeyStoreProviderImpl(keyEncryptionKeyResolver, "TEST_KEY_RESOLVER"); Mockito.when(cosmosEncryptionAsyncClientAccessor.getEncryptionKeyStoreProviderImpl(cosmosEncryptionAsyncClient)).thenReturn(encryptionKeyStoreProvider); EncryptionProcessor encryptionProcessor = new EncryptionProcessor(cosmosAsyncContainer, cosmosEncryptionAsyncClient); EncryptionSettings encryptionSettings = encryptionProcessor.getEncryptionSettings(); try { encryptionSettings.getEncryptionSettingCacheByPropertyName().getAsync("sensitiveString", null, null).block(); fail("encryptionSettings should be empty"); } catch (NullPointerException ex) { } Assertions.assertThat(ReflectionUtils.isEncryptionSettingsInitDone(encryptionProcessor).get()).isFalse(); encryptionProcessor.initializeEncryptionSettingsAsync(false).block(); encryptionSettings = encryptionProcessor.getEncryptionSettings(); CachedEncryptionSettings cachedEncryptionSettings = encryptionSettings.getEncryptionSettingCacheByPropertyName().getAsync("sensitiveString", null, null).block(); assertThat(ReflectionUtils.isEncryptionSettingsInitDone(encryptionProcessor)).isTrue(); assertThat(cachedEncryptionSettings).isNotNull(); assertThat(cachedEncryptionSettings.getEncryptionSettings().getClientEncryptionKeyId()).isEqualTo("key1"); } @Test(groups = {"unit"}, timeOut = TIMEOUT) public void withoutInitializeEncryptionSettingsAsync() throws Exception { CosmosAsyncContainer cosmosAsyncContainer = Mockito.mock(CosmosAsyncContainer.class); CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient = Mockito.mock(CosmosEncryptionAsyncClient.class); Mockito.when(cosmosEncryptionAsyncClient.getKeyEncryptionKeyResolver()).thenReturn(keyEncryptionKeyResolver); Mockito.when(cosmosEncryptionAsyncClientAccessor.getContainerPropertiesAsync(cosmosEncryptionAsyncClient, Mockito.any(CosmosAsyncContainer.class), Mockito.anyBoolean())).thenReturn(Mono.just(generateContainerWithCosmosEncryptionPolicy())); Mockito.when(cosmosEncryptionAsyncClientAccessor.getClientEncryptionPropertiesAsync(cosmosEncryptionAsyncClient, Mockito.anyString(), Mockito.anyString(), Mockito.any(CosmosAsyncContainer.class), Mockito.anyBoolean(), Mockito.nullable(String.class), Mockito.anyBoolean())).thenReturn(Mono.just(generateClientEncryptionKeyProperties())); EncryptionKeyStoreProviderImpl encryptionKeyStoreProvider = new EncryptionKeyStoreProviderImpl(keyEncryptionKeyResolver, "TEST_KEY_RESOLVER"); Mockito.when(cosmosEncryptionAsyncClientAccessor.getEncryptionKeyStoreProviderImpl(cosmosEncryptionAsyncClient)).thenReturn(encryptionKeyStoreProvider); EncryptionProcessor encryptionProcessor = new EncryptionProcessor(cosmosAsyncContainer, cosmosEncryptionAsyncClient); EncryptionSettings encryptionSettings = encryptionProcessor.getEncryptionSettings(); encryptionSettings.setDatabaseRid("TestDb"); try { encryptionSettings.getEncryptionSettingCacheByPropertyName().getAsync("sensitiveString", null, null).block(); fail("encryptionSettings should be empty"); } catch (NullPointerException ex) { } EncryptionSettings spyEncryptionSettings = Mockito.spy(encryptionSettings); EncryptionSettings cachedEncryptionSettings = spyEncryptionSettings.getEncryptionSettingForPropertyAsync("sensitiveString", encryptionProcessor).block(); assertThat(ReflectionUtils.isEncryptionSettingsInitDone(encryptionProcessor).get()).isFalse(); assertThat(cachedEncryptionSettings).isNotNull(); assertThat(cachedEncryptionSettings.getClientEncryptionKeyId()).isEqualTo("key1"); Mockito.verify(spyEncryptionSettings, Mockito.times(1)).fetchCachedEncryptionSettingsAsync(Mockito.anyString(), Mockito.any(EncryptionProcessor.class)); spyEncryptionSettings.getEncryptionSettingForPropertyAsync("sensitiveString", encryptionProcessor).block(); Mockito.verify(spyEncryptionSettings, Mockito.times(1)).fetchCachedEncryptionSettingsAsync(Mockito.anyString(), Mockito.any(EncryptionProcessor.class)); } @Test(groups = {"unit"}, timeOut = TIMEOUT) public void encryptionSettingCachedTimeToLive() throws Exception { CosmosAsyncContainer cosmosAsyncContainer = Mockito.mock(CosmosAsyncContainer.class); CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient = Mockito.mock(CosmosEncryptionAsyncClient.class); Mockito.when(cosmosEncryptionAsyncClient.getKeyEncryptionKeyResolver()).thenReturn(keyEncryptionKeyResolver); Mockito.when(cosmosEncryptionAsyncClientAccessor.getContainerPropertiesAsync(cosmosEncryptionAsyncClient, Mockito.any(CosmosAsyncContainer.class), Mockito.anyBoolean())).thenReturn(Mono.just(generateContainerWithCosmosEncryptionPolicy())); Mockito.when(cosmosEncryptionAsyncClientAccessor.getClientEncryptionPropertiesAsync(cosmosEncryptionAsyncClient, Mockito.anyString(), Mockito.anyString(), Mockito.any(CosmosAsyncContainer.class), Mockito.anyBoolean(), Mockito.nullable(String.class), Mockito.anyBoolean())).thenReturn(Mono.just(generateClientEncryptionKeyProperties())); EncryptionKeyStoreProviderImpl encryptionKeyStoreProvider = new EncryptionKeyStoreProviderImpl(keyEncryptionKeyResolver, "TEST_KEY_RESOLVER"); Mockito.when(cosmosEncryptionAsyncClientAccessor.getEncryptionKeyStoreProviderImpl(cosmosEncryptionAsyncClient)).thenReturn(encryptionKeyStoreProvider); EncryptionProcessor encryptionProcessor = new EncryptionProcessor(cosmosAsyncContainer, cosmosEncryptionAsyncClient); EncryptionSettings encryptionSettings = encryptionProcessor.getEncryptionSettings(); encryptionSettings.setDatabaseRid("TestDb"); EncryptionSettings spyEncryptionSettings = Mockito.spy(encryptionSettings); EncryptionSettings cachedEncryptionSettings = spyEncryptionSettings.getEncryptionSettingForPropertyAsync("sensitiveString", encryptionProcessor).block(); Mockito.verify(spyEncryptionSettings, Mockito.times(1)).fetchCachedEncryptionSettingsAsync(Mockito.anyString(), Mockito.any(EncryptionProcessor.class)); assertThat(cachedEncryptionSettings).isNotNull(); assertThat(cachedEncryptionSettings.getClientEncryptionKeyId()).isEqualTo("key1"); assertThat(cachedEncryptionSettings.getEncryptionSettingTimeToLive()).isAfter(Instant.now().plus(Duration.ofMinutes(59))); assertThat(cachedEncryptionSettings.getEncryptionSettingTimeToLive()).isBefore(Instant.now().plus(Duration.ofMinutes(61))); spyEncryptionSettings.setEncryptionSettingForProperty("sensitiveString", cachedEncryptionSettings, Instant.now().minus(Duration.ofSeconds(5))); spyEncryptionSettings.getEncryptionSettingForPropertyAsync("sensitiveString", encryptionProcessor).block(); Mockito.verify(spyEncryptionSettings, Mockito.times(2)).fetchCachedEncryptionSettingsAsync(Mockito.anyString(), Mockito.any(EncryptionProcessor.class)); } @Test(groups = {"unit"}, timeOut = TIMEOUT) private ClientEncryptionPolicy generateClientEncryptionPolicy() { ClientEncryptionIncludedPath includedPath1 = new ClientEncryptionIncludedPath(); includedPath1.setClientEncryptionKeyId("key1"); includedPath1.setPath("/sensitiveString"); includedPath1.setEncryptionType(CosmosEncryptionType.DETERMINISTIC.getName()); includedPath1.setEncryptionAlgorithm(CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256.getName()); List<ClientEncryptionIncludedPath> paths = new ArrayList<>(); paths.add(includedPath1); return new ClientEncryptionPolicy(paths); } private CosmosContainerProperties generateContainerWithCosmosEncryptionPolicy() { CosmosContainerProperties containerProperties = new CosmosContainerProperties(UUID.randomUUID().toString(), "/mypk"); cosmosContainerPropertiesAccessor.setSelfLink(containerProperties, "dbs/testDb/colls/testCol"); ClientEncryptionIncludedPath includedPath1 = new ClientEncryptionIncludedPath(); includedPath1.setClientEncryptionKeyId("key1"); includedPath1.setPath("/sensitiveString"); includedPath1.setEncryptionType(CosmosEncryptionType.DETERMINISTIC.getName()); includedPath1.setEncryptionAlgorithm(CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256.getName()); List<ClientEncryptionIncludedPath> paths = new ArrayList<>(); paths.add(includedPath1); return containerProperties.setClientEncryptionPolicy(new ClientEncryptionPolicy(paths)); } private CosmosClientEncryptionKeyProperties generateClientEncryptionKeyProperties() throws JsonProcessingException { TextNode treeNode = new TextNode("S84PieiyZNyHxeuUuX5IXSV2KOktpt02tQM4QLhm8dI="); byte[] key = MAPPER.treeToValue(treeNode, byte[].class); EncryptionKeyWrapMetadata metadata = new EncryptionKeyWrapMetadata("TEST_KEY_RESOLVER", "key1", "tempmetadata1", "RSA-OAEP"); return new CosmosClientEncryptionKeyProperties("key1", CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256.getName(), key, metadata); } }
You are returning little `l` `long` here - but your comment says it returns null if a virtual directory?
public long size() { return this.internalAttributes.size(); }
}
public long size() { return this.internalAttributes.size(); }
class AzureBasicFileAttributes implements BasicFileAttributes { private final ClientLogger logger = new ClientLogger(AzureBasicFileAttributes.class); static final Set<String> ATTRIBUTE_STRINGS; static { Set<String> set = new HashSet<>(); set.add("lastModifiedTime"); set.add("isRegularFile"); set.add("isDirectory"); set.add("isVirtualDirectory"); set.add("isSymbolicLink"); set.add("isOther"); set.add("size"); set.add("creationTime"); ATTRIBUTE_STRINGS = Collections.unmodifiableSet(set); } private final AzureBlobFileAttributes internalAttributes; /* In order to support Files.exist() and other methods like Files.walkFileTree() which depend on it, we have had to add support for virtual directories. This is not ideal as customers will have to now perform null checks when inspecting attributes (or at least check if it is a virtual directory before inspecting properties). It also incurs extra network requests as we have to call a checkDirectoryExists() after receiving the initial 404. This is two additional network requests, though they only happen in the case when a file doesn't exist or is virtual, so it shouldn't happen in the majority of api calls. */ AzureBasicFileAttributes(Path path) throws IOException { this.internalAttributes = new AzureBlobFileAttributes(path); } /** * Returns the time of last modification or null if this is a virtual directory. * * @return the time of last modification or null if this is a virtual directory */ @Override public FileTime lastModifiedTime() { return this.internalAttributes.lastModifiedTime(); } /** * Returns the time of last modification or null if this is a virtual directory * <p> * Last access time is not supported by the blob service. In this case, it is typical for implementations to return * the {@link * * @return the time of last modification or null if this is a virtual directory */ @Override public FileTime lastAccessTime() { return this.internalAttributes.lastAccessTime(); } /** * Returns the creation time. The creation time is the time that the file was created. Returns null if this is a * virtual directory. * * @return The creation time or null if this is a virtual directory */ @Override public FileTime creationTime() { return this.internalAttributes.creationTime(); } /** * Tells whether the file is a regular file with opaque content. * * @return whether the file is a regular file. */ @Override public boolean isRegularFile() { return this.internalAttributes.isRegularFile(); } /** * Tells whether the file is a directory. * <p> * Will only return true if the directory is a concrete directory. See * {@link AzureFileSystemProvider * concrete directories. * * @return whether the file is a directory */ @Override public boolean isDirectory() { return this.internalAttributes.isDirectory(); } /** * Tells whether the file is a virtual directory. * <p> * See {@link AzureFileSystemProvider * concrete directories. * * @return whether the file is a virtual directory */ public boolean isVirtualDirectory() { return this.internalAttributes.isVirtualDirectory(); } /** * Tells whether the file is a symbolic link. * * @return false. Symbolic links are not supported. */ @Override public boolean isSymbolicLink() { return this.internalAttributes.isSymbolicLink(); } /** * Tells whether the file is something other than a regular file, directory, or symbolic link. * * @return false. No other object types are supported. */ @Override public boolean isOther() { return this.internalAttributes.isOther(); } /** * Returns the size of the file (in bytes) or null if this is a virtual directory. * * @return the size of the file or null if this is a virtual directory */ @Override /** * Returns the url of the resource. * * @return The file key, which is the url. */ @Override public Object fileKey() { return this.internalAttributes.fileKey(); } }
class AzureBasicFileAttributes implements BasicFileAttributes { private final ClientLogger logger = new ClientLogger(AzureBasicFileAttributes.class); static final Set<String> ATTRIBUTE_STRINGS; static { Set<String> set = new HashSet<>(); set.add("lastModifiedTime"); set.add("isRegularFile"); set.add("isDirectory"); set.add("isVirtualDirectory"); set.add("isSymbolicLink"); set.add("isOther"); set.add("size"); set.add("creationTime"); ATTRIBUTE_STRINGS = Collections.unmodifiableSet(set); } private final AzureBlobFileAttributes internalAttributes; /* In order to support Files.exist() and other methods like Files.walkFileTree() which depend on it, we have had to add support for virtual directories. This is not ideal as customers will have to now perform null checks when inspecting attributes (or at least check if it is a virtual directory before inspecting properties). It also incurs extra network requests as we have to call a checkDirectoryExists() after receiving the initial 404. This is two additional network requests, though they only happen in the case when a file doesn't exist or is virtual, so it shouldn't happen in the majority of api calls. */ AzureBasicFileAttributes(Path path) throws IOException { this.internalAttributes = new AzureBlobFileAttributes(path); } /** * Returns the time of last modification or null if this is a virtual directory. * * @return the time of last modification or null if this is a virtual directory */ @Override public FileTime lastModifiedTime() { return this.internalAttributes.lastModifiedTime(); } /** * Returns the time of last modification or null if this is a virtual directory * <p> * Last access time is not supported by the blob service. In this case, it is typical for implementations to return * the {@link * * @return the time of last modification or null if this is a virtual directory */ @Override public FileTime lastAccessTime() { return this.internalAttributes.lastAccessTime(); } /** * Returns the creation time. The creation time is the time that the file was created. Returns null if this is a * virtual directory. * * @return The creation time or null if this is a virtual directory */ @Override public FileTime creationTime() { return this.internalAttributes.creationTime(); } /** * Tells whether the file is a regular file with opaque content. * * @return whether the file is a regular file. */ @Override public boolean isRegularFile() { return this.internalAttributes.isRegularFile(); } /** * Tells whether the file is a directory. * <p> * Will only return true if the directory is a concrete directory. See * {@link AzureFileSystemProvider * concrete directories. * * @return whether the file is a directory */ @Override public boolean isDirectory() { return this.internalAttributes.isDirectory(); } /** * Tells whether the file is a virtual directory. * <p> * See {@link AzureFileSystemProvider * concrete directories. * * @return whether the file is a virtual directory */ public boolean isVirtualDirectory() { return this.internalAttributes.isVirtualDirectory(); } /** * Tells whether the file is a symbolic link. * * @return false. Symbolic links are not supported. */ @Override public boolean isSymbolicLink() { return this.internalAttributes.isSymbolicLink(); } /** * Tells whether the file is something other than a regular file, directory, or symbolic link. * * @return false. No other object types are supported. */ @Override public boolean isOther() { return this.internalAttributes.isOther(); } /** * Returns the size of the file (in bytes). * * @return the size of the file */ @Override /** * Returns the url of the resource. * * @return The file key, which is the url. */ @Override public Object fileKey() { return this.internalAttributes.fileKey(); } }
Good catch! Thank you. I think in the case of regular directories we return 0, and there's probably no need to distinguish here, so I'll change this to return 0 and update the comment.
public long size() { return this.internalAttributes.size(); }
}
public long size() { return this.internalAttributes.size(); }
class AzureBasicFileAttributes implements BasicFileAttributes { private final ClientLogger logger = new ClientLogger(AzureBasicFileAttributes.class); static final Set<String> ATTRIBUTE_STRINGS; static { Set<String> set = new HashSet<>(); set.add("lastModifiedTime"); set.add("isRegularFile"); set.add("isDirectory"); set.add("isVirtualDirectory"); set.add("isSymbolicLink"); set.add("isOther"); set.add("size"); set.add("creationTime"); ATTRIBUTE_STRINGS = Collections.unmodifiableSet(set); } private final AzureBlobFileAttributes internalAttributes; /* In order to support Files.exist() and other methods like Files.walkFileTree() which depend on it, we have had to add support for virtual directories. This is not ideal as customers will have to now perform null checks when inspecting attributes (or at least check if it is a virtual directory before inspecting properties). It also incurs extra network requests as we have to call a checkDirectoryExists() after receiving the initial 404. This is two additional network requests, though they only happen in the case when a file doesn't exist or is virtual, so it shouldn't happen in the majority of api calls. */ AzureBasicFileAttributes(Path path) throws IOException { this.internalAttributes = new AzureBlobFileAttributes(path); } /** * Returns the time of last modification or null if this is a virtual directory. * * @return the time of last modification or null if this is a virtual directory */ @Override public FileTime lastModifiedTime() { return this.internalAttributes.lastModifiedTime(); } /** * Returns the time of last modification or null if this is a virtual directory * <p> * Last access time is not supported by the blob service. In this case, it is typical for implementations to return * the {@link * * @return the time of last modification or null if this is a virtual directory */ @Override public FileTime lastAccessTime() { return this.internalAttributes.lastAccessTime(); } /** * Returns the creation time. The creation time is the time that the file was created. Returns null if this is a * virtual directory. * * @return The creation time or null if this is a virtual directory */ @Override public FileTime creationTime() { return this.internalAttributes.creationTime(); } /** * Tells whether the file is a regular file with opaque content. * * @return whether the file is a regular file. */ @Override public boolean isRegularFile() { return this.internalAttributes.isRegularFile(); } /** * Tells whether the file is a directory. * <p> * Will only return true if the directory is a concrete directory. See * {@link AzureFileSystemProvider * concrete directories. * * @return whether the file is a directory */ @Override public boolean isDirectory() { return this.internalAttributes.isDirectory(); } /** * Tells whether the file is a virtual directory. * <p> * See {@link AzureFileSystemProvider * concrete directories. * * @return whether the file is a virtual directory */ public boolean isVirtualDirectory() { return this.internalAttributes.isVirtualDirectory(); } /** * Tells whether the file is a symbolic link. * * @return false. Symbolic links are not supported. */ @Override public boolean isSymbolicLink() { return this.internalAttributes.isSymbolicLink(); } /** * Tells whether the file is something other than a regular file, directory, or symbolic link. * * @return false. No other object types are supported. */ @Override public boolean isOther() { return this.internalAttributes.isOther(); } /** * Returns the size of the file (in bytes) or null if this is a virtual directory. * * @return the size of the file or null if this is a virtual directory */ @Override /** * Returns the url of the resource. * * @return The file key, which is the url. */ @Override public Object fileKey() { return this.internalAttributes.fileKey(); } }
class AzureBasicFileAttributes implements BasicFileAttributes { private final ClientLogger logger = new ClientLogger(AzureBasicFileAttributes.class); static final Set<String> ATTRIBUTE_STRINGS; static { Set<String> set = new HashSet<>(); set.add("lastModifiedTime"); set.add("isRegularFile"); set.add("isDirectory"); set.add("isVirtualDirectory"); set.add("isSymbolicLink"); set.add("isOther"); set.add("size"); set.add("creationTime"); ATTRIBUTE_STRINGS = Collections.unmodifiableSet(set); } private final AzureBlobFileAttributes internalAttributes; /* In order to support Files.exist() and other methods like Files.walkFileTree() which depend on it, we have had to add support for virtual directories. This is not ideal as customers will have to now perform null checks when inspecting attributes (or at least check if it is a virtual directory before inspecting properties). It also incurs extra network requests as we have to call a checkDirectoryExists() after receiving the initial 404. This is two additional network requests, though they only happen in the case when a file doesn't exist or is virtual, so it shouldn't happen in the majority of api calls. */ AzureBasicFileAttributes(Path path) throws IOException { this.internalAttributes = new AzureBlobFileAttributes(path); } /** * Returns the time of last modification or null if this is a virtual directory. * * @return the time of last modification or null if this is a virtual directory */ @Override public FileTime lastModifiedTime() { return this.internalAttributes.lastModifiedTime(); } /** * Returns the time of last modification or null if this is a virtual directory * <p> * Last access time is not supported by the blob service. In this case, it is typical for implementations to return * the {@link * * @return the time of last modification or null if this is a virtual directory */ @Override public FileTime lastAccessTime() { return this.internalAttributes.lastAccessTime(); } /** * Returns the creation time. The creation time is the time that the file was created. Returns null if this is a * virtual directory. * * @return The creation time or null if this is a virtual directory */ @Override public FileTime creationTime() { return this.internalAttributes.creationTime(); } /** * Tells whether the file is a regular file with opaque content. * * @return whether the file is a regular file. */ @Override public boolean isRegularFile() { return this.internalAttributes.isRegularFile(); } /** * Tells whether the file is a directory. * <p> * Will only return true if the directory is a concrete directory. See * {@link AzureFileSystemProvider * concrete directories. * * @return whether the file is a directory */ @Override public boolean isDirectory() { return this.internalAttributes.isDirectory(); } /** * Tells whether the file is a virtual directory. * <p> * See {@link AzureFileSystemProvider * concrete directories. * * @return whether the file is a virtual directory */ public boolean isVirtualDirectory() { return this.internalAttributes.isVirtualDirectory(); } /** * Tells whether the file is a symbolic link. * * @return false. Symbolic links are not supported. */ @Override public boolean isSymbolicLink() { return this.internalAttributes.isSymbolicLink(); } /** * Tells whether the file is something other than a regular file, directory, or symbolic link. * * @return false. No other object types are supported. */ @Override public boolean isOther() { return this.internalAttributes.isOther(); } /** * Returns the size of the file (in bytes). * * @return the size of the file */ @Override /** * Returns the url of the resource. * * @return The file key, which is the url. */ @Override public Object fileKey() { return this.internalAttributes.fileKey(); } }
As a minor stylistic comment (and as such, I'm not expecting anything from saying this), one pattern I've favoured over the years is to reverse this code approach, to reduce indentation and 'fall out' of the method early, rather than late. In this case, I would have considered doing it as such: ```java public long size() { if (this.isVirtualDirectory) { return 0; } return properties.getBlobSize(); } ``` In some projects, we even had a code style expectation that such simple return statements didn't need to have braces, so we would have made it simpler and on one line, as below: ```java public long size() { if (this.isVirtualDirectory) return 0; return properties.getBlobSize(); } ``` Of course, at this point we might as well go the whole way and just do a single-line ternary expression: ```java public long size() { return this.isVirtualDirectory ? 0 : properties.getBlobSize(); } ```
public long size() { if (!this.isVirtualDirectory) { return properties.getBlobSize(); } else { return 0; } }
}
public long size() { return !this.isVirtualDirectory ? properties.getBlobSize() : 0; }
class AzureBlobFileAttributes implements BasicFileAttributes { /* Some blob properties do not have getters as they do not make sense in the context of nio. These properties are: - incremental snapshot related properties (only for page blobs) - lease related properties (leases not currently supported) - sequence number (only for page blobs) - encryption key sha256 (cpk not supported) - committed block count (only for append blobs) */ private final ClientLogger logger = new ClientLogger(AzureBlobFileAttributes.class); private final BlobProperties properties; private final AzureResource resource; private final boolean isVirtualDirectory; AzureBlobFileAttributes(Path path) throws IOException { this.resource = new AzureResource(path); BlobProperties props = null; try { props = resource.getBlobClient().getProperties(); } catch (BlobStorageException e) { if (e.getStatusCode() == 404 && this.resource.checkDirectoryExists()) { this.isVirtualDirectory = true; this.properties = null; return; } else { throw LoggingUtility.logError(logger, new IOException("Path: " + path.toString(), e)); } } this.properties = props; this.isVirtualDirectory = false; } static Map<String, Supplier<Object>> getAttributeSuppliers(AzureBlobFileAttributes attributes) { Map<String, Supplier<Object>> map = new HashMap<>(); map.put("creationTime", attributes::creationTime); map.put("lastModifiedTime", attributes::lastModifiedTime); map.put("eTag", attributes::eTag); map.put("blobHttpHeaders", attributes::blobHttpHeaders); map.put("blobType", attributes::blobType); map.put("copyId", attributes::copyId); map.put("copyStatus", attributes::copyStatus); map.put("copySource", attributes::copySource); map.put("copyProgress", attributes::copyProgress); map.put("copyCompletionTime", attributes::copyCompletionTime); map.put("copyStatusDescription", attributes::copyStatusDescription); map.put("isServerEncrypted", attributes::isServerEncrypted); map.put("accessTier", attributes::accessTier); map.put("isAccessTierInferred", attributes::isAccessTierInferred); map.put("archiveStatus", attributes::archiveStatus); map.put("accessTierChangeTime", attributes::accessTierChangeTime); map.put("metadata", attributes::metadata); map.put("isRegularFile", attributes::isRegularFile); map.put("isDirectory", attributes::isDirectory); map.put("isVirtualDirectory", attributes::isVirtualDirectory); map.put("isSymbolicLink", attributes::isSymbolicLink); map.put("isOther", attributes::isOther); map.put("size", attributes::size); return map; } /** * Returns the creation time. The creation time is the time that the file was created. Returns null if this is a * virtual directory. * * @return The creation time or */ @Override public FileTime creationTime() { if (!this.isVirtualDirectory) { return FileTime.from(this.properties.getCreationTime().toInstant()); } else { return null; } } /** * Returns the time of last modification. Returns null if this is a virtual directory * * @return the time of last modification or null if this is a virtual directory */ @Override public FileTime lastModifiedTime() { if (!this.isVirtualDirectory) { return FileTime.from(this.properties.getLastModified().toInstant()); } else { return null; } } /** * Returns the eTag of the blob or null if this is a virtual directory * * @return the eTag of the blob or null if this is a virtual directory */ public String eTag() { if (!this.isVirtualDirectory) { return this.properties.getETag(); } else { return null; } } /** * Returns the {@link BlobHttpHeaders} of the blob or null if this is a virtual directory. * * @return {@link BlobHttpHeaders} or null if this is a virtual directory */ public BlobHttpHeaders blobHttpHeaders() { if (!this.isVirtualDirectory) { /* We return these all as one value so it's consistent with the way of setting, especially the setAttribute method that accepts a string argument for the name of the property. Returning them individually would mean we have to support setting them individually as well, which is not possible due to service constraints. */ return new BlobHttpHeaders() .setContentType(this.properties.getContentType()) .setContentLanguage(this.properties.getContentLanguage()) .setContentMd5(this.properties.getContentMd5()) .setContentDisposition(this.properties.getContentDisposition()) .setContentEncoding(this.properties.getContentEncoding()) .setCacheControl(this.properties.getCacheControl()); } else { return null; } } /** * Returns the type of the blob or null if this is a virtual directory * * @return the type of the blob or null if this is a virtual directory */ public BlobType blobType() { if (!this.isVirtualDirectory) { return this.properties.getBlobType(); } else { return null; } } /** * Returns the identifier of the last copy operation. If this blob hasn't been the target of a copy operation or has * been modified since this won't be set. Returns null if this is a virtual directory * * @return the identifier of the last copy operation or null if this is a virtual directory */ public String copyId() { if (!this.isVirtualDirectory) { return this.properties.getCopyId(); } else { return null; } } /** * Returns the status of the last copy operation. If this blob hasn't been the target of a copy operation or has * been modified since this won't be set. Returns null if this is a virtual directory * * @return the status of the last copy operation or null if this is a virtual directory */ public CopyStatusType copyStatus() { if (!this.isVirtualDirectory) { return this.properties.getCopyStatus(); } else { return null; } } /** * Returns the source blob URL from the last copy operation. If this blob hasn't been the target of a copy operation * or has been modified since this won't be set. Returns null if this is a virtual directory * * @return the source blob URL from the last copy operation or null if this is a virtual directory */ public String copySource() { if (!this.isVirtualDirectory) { return this.properties.getCopySource(); } else { return null; } } /** * Returns the number of bytes copied and total bytes in the source from the last copy operation (bytes copied/total * bytes). If this blob hasn't been the target of a copy operation or has been modified since this won't be set. * Returns null if this is a virtual directory * * @return the number of bytes copied and total bytes in the source from the last copy operation null if this is a * virtual directory */ public String copyProgress() { if (!this.isVirtualDirectory) { return this.properties.getCopyProgress(); } else { return null; } } /** * Returns the completion time of the last copy operation. If this blob hasn't been the target of a copy operation * or has been modified since this won't be set. Returns null if this is a virtual directory. * * @return the completion time of the last copy operation or null if this is a virtual directory */ public OffsetDateTime copyCompletionTime() { if (!this.isVirtualDirectory) { return this.properties.getCopyCompletionTime(); } else { return null; } } /** * Returns the description of the last copy failure, this is set when the {@link * {@link CopyStatusType * target of a copy operation or has been modified since this won't be set. Returns null if this is a virtual * directory. * * @return the description of the last copy failure or null if this is a virtual directory */ public String copyStatusDescription() { if (!this.isVirtualDirectory) { return this.properties.getCopyStatusDescription(); } else { return null; } } /** * Returns the status of the blob being encrypted on the server or null if this is a virtual directory. * * @return the status of the blob being encrypted on the server or null if this is a virtual directory */ public Boolean isServerEncrypted() { if (!this.isVirtualDirectory) { return this.properties.isServerEncrypted(); } else { return null; } } /** * Returns the tier of the blob. This is only set for Page blobs on a premium storage account or for Block blobs on * blob storage or general purpose V2 account. Returns null if this is a virtual directory. * * @return the tier of the blob or null if this is a virtual directory */ public AccessTier accessTier() { if (!this.isVirtualDirectory) { return this.properties.getAccessTier(); } else { return null; } } /** * Returns the status of the tier being inferred for the blob. This is only set for Page blobs on a premium storage * account or for Block blobs on blob storage or general purpose V2 account. Returns null if this is a virtual * directory. * * @return the status of the tier being inferred for the blob or null if this is a virtual directory */ public Boolean isAccessTierInferred() { if (!this.isVirtualDirectory) { return this.properties.isAccessTierInferred(); } else { return null; } } /** * Returns the archive status of the blob. This is only for blobs on a blob storage and general purpose v2 account. * Returns null if this is a virtual directory. * * @return the archive status of the blob or null if this is a virtual directory */ public ArchiveStatus archiveStatus() { if (!this.isVirtualDirectory) { return this.properties.getArchiveStatus(); } else { return null; } } /** * Returns the time when the access tier for the blob was last changed or null if this is a virtual directory. * * @return the time when the access tier for the blob was last changed or null if this is a virtual directory */ public OffsetDateTime accessTierChangeTime() { if (!this.isVirtualDirectory) { return this.properties.getAccessTierChangeTime(); } else { return null; } } /** * Returns the metadata associated with this blob or null if this is a virtual directory. * * @return the metadata associated with this blob or null if this is a virtual directory */ public Map<String, String> metadata() { if (!this.isVirtualDirectory) { return Collections.unmodifiableMap(this.properties.getMetadata()); } else { return null; } } /** * Returns the time of last modification or null if this is a virtual directory. * <p> * Last access time is not supported by the blob service. In this case, it is typical for implementations to return * the {@link * * @return the time of last modification null if this is a virtual directory */ @Override public FileTime lastAccessTime() { if (!this.isVirtualDirectory) { return this.lastModifiedTime(); } else { return null; } } /** * Tells whether the file is a regular file with opaque content. * * @return whether the file is a regular file. */ @Override public boolean isRegularFile() { if (!this.isVirtualDirectory) { return !this.properties.getMetadata().getOrDefault(AzureResource.DIR_METADATA_MARKER, "false").equals("true"); } else { return false; } } /** * Tells whether the file is a directory. * <p> * Will return true if the directory is a concrete or virtual directory. See * {@link AzureFileSystemProvider * concrete directories. * * @return whether the file is a directory */ @Override public boolean isDirectory() { return !this.isRegularFile(); } /** * Tells whether the file is a virtual directory. * <p> * See {@link AzureFileSystemProvider * concrete directories. * * @return whether the file is a virtual directory */ public boolean isVirtualDirectory() { return this.isVirtualDirectory; } /** * Tells whether the file is a symbolic link. * * @return false. Symbolic links are not supported. */ @Override public boolean isSymbolicLink() { return false; } /** * Tells whether the file is something other than a regular file, directory, or symbolic link. * * @return false. No other object types are supported. */ @Override public boolean isOther() { return false; } /** * Returns the size of the file (in bytes). * * @return the size of the file */ @Override /** * Returns the url of the resource. * * @return The file key, which is the url. */ @Override public Object fileKey() { return resource.getBlobClient().getBlobUrl(); } }
class AzureBlobFileAttributes implements BasicFileAttributes { /* Some blob properties do not have getters as they do not make sense in the context of nio. These properties are: - incremental snapshot related properties (only for page blobs) - lease related properties (leases not currently supported) - sequence number (only for page blobs) - encryption key sha256 (cpk not supported) - committed block count (only for append blobs) */ private final ClientLogger logger = new ClientLogger(AzureBlobFileAttributes.class); private final BlobProperties properties; private final AzureResource resource; private final boolean isVirtualDirectory; AzureBlobFileAttributes(Path path) throws IOException { this.resource = new AzureResource(path); BlobProperties props = null; try { props = resource.getBlobClient().getProperties(); } catch (BlobStorageException e) { if (e.getStatusCode() == 404 && this.resource.checkVirtualDirectoryExists()) { this.isVirtualDirectory = true; this.properties = null; return; } else { throw LoggingUtility.logError(logger, new IOException("Path: " + path.toString(), e)); } } this.properties = props; this.isVirtualDirectory = false; } static Map<String, Supplier<Object>> getAttributeSuppliers(AzureBlobFileAttributes attributes) { Map<String, Supplier<Object>> map = new HashMap<>(); map.put("creationTime", attributes::creationTime); map.put("lastModifiedTime", attributes::lastModifiedTime); map.put("eTag", attributes::eTag); map.put("blobHttpHeaders", attributes::blobHttpHeaders); map.put("blobType", attributes::blobType); map.put("copyId", attributes::copyId); map.put("copyStatus", attributes::copyStatus); map.put("copySource", attributes::copySource); map.put("copyProgress", attributes::copyProgress); map.put("copyCompletionTime", attributes::copyCompletionTime); map.put("copyStatusDescription", attributes::copyStatusDescription); map.put("isServerEncrypted", attributes::isServerEncrypted); map.put("accessTier", attributes::accessTier); map.put("isAccessTierInferred", attributes::isAccessTierInferred); map.put("archiveStatus", attributes::archiveStatus); map.put("accessTierChangeTime", attributes::accessTierChangeTime); map.put("metadata", attributes::metadata); map.put("isRegularFile", attributes::isRegularFile); map.put("isDirectory", attributes::isDirectory); map.put("isVirtualDirectory", attributes::isVirtualDirectory); map.put("isSymbolicLink", attributes::isSymbolicLink); map.put("isOther", attributes::isOther); map.put("size", attributes::size); return map; } /** * Returns the creation time. The creation time is the time that the file was created. Returns null if this is a * virtual directory. * * @return The creation time or null if this is a virtual directory */ @Override public FileTime creationTime() { return !this.isVirtualDirectory ? FileTime.from(this.properties.getCreationTime().toInstant()) : null; } /** * Returns the time of last modification. Returns null if this is a virtual directory * * @return the time of last modification or null if this is a virtual directory */ @Override public FileTime lastModifiedTime() { return !this.isVirtualDirectory ? FileTime.from(this.properties.getLastModified().toInstant()) : null; } /** * Returns the eTag of the blob or null if this is a virtual directory * * @return the eTag of the blob or null if this is a virtual directory */ public String eTag() { return !this.isVirtualDirectory ? this.properties.getETag() : null; } /** * Returns the {@link BlobHttpHeaders} of the blob or null if this is a virtual directory. * * @return {@link BlobHttpHeaders} or null if this is a virtual directory */ public BlobHttpHeaders blobHttpHeaders() { if (this.isVirtualDirectory) { return null; } /* We return these all as one value, so it's consistent with the way of setting, especially the setAttribute method that accepts a string argument for the name of the property. Returning them individually would mean we have to support setting them individually as well, which is not possible due to service constraints. */ return new BlobHttpHeaders() .setContentType(this.properties.getContentType()) .setContentLanguage(this.properties.getContentLanguage()) .setContentMd5(this.properties.getContentMd5()) .setContentDisposition(this.properties.getContentDisposition()) .setContentEncoding(this.properties.getContentEncoding()) .setCacheControl(this.properties.getCacheControl()); } /** * Returns the type of the blob or null if this is a virtual directory * * @return the type of the blob or null if this is a virtual directory */ public BlobType blobType() { return !this.isVirtualDirectory ? this.properties.getBlobType() : null; } /** * Returns the identifier of the last copy operation. If this blob hasn't been the target of a copy operation or has * been modified since this won't be set. Returns null if this is a virtual directory * * @return the identifier of the last copy operation or null if this is a virtual directory */ public String copyId() { return !this.isVirtualDirectory ? this.properties.getCopyId() : null; } /** * Returns the status of the last copy operation. If this blob hasn't been the target of a copy operation or has * been modified since this won't be set. Returns null if this is a virtual directory * * @return the status of the last copy operation or null if this is a virtual directory */ public CopyStatusType copyStatus() { return !this.isVirtualDirectory ? this.properties.getCopyStatus() : null; } /** * Returns the source blob URL from the last copy operation. If this blob hasn't been the target of a copy operation * or has been modified since this won't be set. Returns null if this is a virtual directory * * @return the source blob URL from the last copy operation or null if this is a virtual directory */ public String copySource() { return !this.isVirtualDirectory ? this.properties.getCopySource() : null; } /** * Returns the number of bytes copied and total bytes in the source from the last copy operation (bytes copied/total * bytes). If this blob hasn't been the target of a copy operation or has been modified since this won't be set. * Returns null if this is a virtual directory * * @return the number of bytes copied and total bytes in the source from the last copy operation null if this is a * virtual directory */ public String copyProgress() { return !this.isVirtualDirectory ? this.properties.getCopyProgress() : null; } /** * Returns the completion time of the last copy operation. If this blob hasn't been the target of a copy operation * or has been modified since this won't be set. Returns null if this is a virtual directory. * * @return the completion time of the last copy operation or null if this is a virtual directory */ public OffsetDateTime copyCompletionTime() { return !this.isVirtualDirectory ? this.properties.getCopyCompletionTime() : null; } /** * Returns the description of the last copy failure, this is set when the {@link * {@link CopyStatusType * target of a copy operation or has been modified since this won't be set. Returns null if this is a virtual * directory. * * @return the description of the last copy failure or null if this is a virtual directory */ public String copyStatusDescription() { return !this.isVirtualDirectory ? this.properties.getCopyStatusDescription() : null; } /** * Returns the status of the blob being encrypted on the server or null if this is a virtual directory. * * @return the status of the blob being encrypted on the server or null if this is a virtual directory */ public Boolean isServerEncrypted() { return !this.isVirtualDirectory ? this.properties.isServerEncrypted() : null; } /** * Returns the tier of the blob. This is only set for Page blobs on a premium storage account or for Block blobs on * blob storage or general purpose V2 account. Returns null if this is a virtual directory. * * @return the tier of the blob or null if this is a virtual directory */ public AccessTier accessTier() { return !this.isVirtualDirectory ? this.properties.getAccessTier() : null; } /** * Returns the status of the tier being inferred for the blob. This is only set for Page blobs on a premium storage * account or for Block blobs on blob storage or general purpose V2 account. Returns null if this is a virtual * directory. * * @return the status of the tier being inferred for the blob or null if this is a virtual directory */ public Boolean isAccessTierInferred() { return !this.isVirtualDirectory ? this.properties.isAccessTierInferred() : null; } /** * Returns the archive status of the blob. This is only for blobs on a blob storage and general purpose v2 account. * Returns null if this is a virtual directory. * * @return the archive status of the blob or null if this is a virtual directory */ public ArchiveStatus archiveStatus() { return !this.isVirtualDirectory ? this.properties.getArchiveStatus() : null; } /** * Returns the time when the access tier for the blob was last changed or null if this is a virtual directory. * * @return the time when the access tier for the blob was last changed or null if this is a virtual directory */ public OffsetDateTime accessTierChangeTime() { return !this.isVirtualDirectory ? this.properties.getAccessTierChangeTime() : null; } /** * Returns the metadata associated with this blob or null if this is a virtual directory. * * @return the metadata associated with this blob or null if this is a virtual directory */ public Map<String, String> metadata() { return !this.isVirtualDirectory ? Collections.unmodifiableMap(this.properties.getMetadata()) : null; } /** * Returns the time of last modification or null if this is a virtual directory. * <p> * Last access time is not supported by the blob service. In this case, it is typical for implementations to return * the {@link * * @return the time of last modification or null if this is a virtual directory */ @Override public FileTime lastAccessTime() { return !this.isVirtualDirectory ? FileTime.from(this.properties.getLastAccessedTime().toInstant()) : null; } /** * Tells whether the file is a regular file with opaque content. * * @return whether the file is a regular file. */ @Override public boolean isRegularFile() { return !this.isVirtualDirectory && !this.properties.getMetadata().getOrDefault(AzureResource.DIR_METADATA_MARKER, "false").equals("true"); } /** * Tells whether the file is a directory. * <p> * Will return true if the directory is a concrete or virtual directory. See * {@link AzureFileSystemProvider * concrete directories. * * @return whether the file is a directory */ @Override public boolean isDirectory() { return !this.isRegularFile(); } /** * Tells whether the file is a virtual directory. * <p> * See {@link AzureFileSystemProvider * concrete directories. * * @return whether the file is a virtual directory */ public boolean isVirtualDirectory() { return this.isVirtualDirectory; } /** * Tells whether the file is a symbolic link. * * @return false. Symbolic links are not supported. */ @Override public boolean isSymbolicLink() { return false; } /** * Tells whether the file is something other than a regular file, directory, or symbolic link. * * @return false. No other object types are supported. */ @Override public boolean isOther() { return false; } /** * Returns the size of the file (in bytes). * * @return the size of the file */ @Override /** * Returns the url of the resource. * * @return The file key, which is the url. */ @Override public Object fileKey() { return resource.getBlobClient().getBlobUrl(); } }
Good idea. I can turn these into ternary expressions. Much cleaner
public long size() { if (!this.isVirtualDirectory) { return properties.getBlobSize(); } else { return 0; } }
}
public long size() { return !this.isVirtualDirectory ? properties.getBlobSize() : 0; }
class AzureBlobFileAttributes implements BasicFileAttributes { /* Some blob properties do not have getters as they do not make sense in the context of nio. These properties are: - incremental snapshot related properties (only for page blobs) - lease related properties (leases not currently supported) - sequence number (only for page blobs) - encryption key sha256 (cpk not supported) - committed block count (only for append blobs) */ private final ClientLogger logger = new ClientLogger(AzureBlobFileAttributes.class); private final BlobProperties properties; private final AzureResource resource; private final boolean isVirtualDirectory; AzureBlobFileAttributes(Path path) throws IOException { this.resource = new AzureResource(path); BlobProperties props = null; try { props = resource.getBlobClient().getProperties(); } catch (BlobStorageException e) { if (e.getStatusCode() == 404 && this.resource.checkDirectoryExists()) { this.isVirtualDirectory = true; this.properties = null; return; } else { throw LoggingUtility.logError(logger, new IOException("Path: " + path.toString(), e)); } } this.properties = props; this.isVirtualDirectory = false; } static Map<String, Supplier<Object>> getAttributeSuppliers(AzureBlobFileAttributes attributes) { Map<String, Supplier<Object>> map = new HashMap<>(); map.put("creationTime", attributes::creationTime); map.put("lastModifiedTime", attributes::lastModifiedTime); map.put("eTag", attributes::eTag); map.put("blobHttpHeaders", attributes::blobHttpHeaders); map.put("blobType", attributes::blobType); map.put("copyId", attributes::copyId); map.put("copyStatus", attributes::copyStatus); map.put("copySource", attributes::copySource); map.put("copyProgress", attributes::copyProgress); map.put("copyCompletionTime", attributes::copyCompletionTime); map.put("copyStatusDescription", attributes::copyStatusDescription); map.put("isServerEncrypted", attributes::isServerEncrypted); map.put("accessTier", attributes::accessTier); map.put("isAccessTierInferred", attributes::isAccessTierInferred); map.put("archiveStatus", attributes::archiveStatus); map.put("accessTierChangeTime", attributes::accessTierChangeTime); map.put("metadata", attributes::metadata); map.put("isRegularFile", attributes::isRegularFile); map.put("isDirectory", attributes::isDirectory); map.put("isVirtualDirectory", attributes::isVirtualDirectory); map.put("isSymbolicLink", attributes::isSymbolicLink); map.put("isOther", attributes::isOther); map.put("size", attributes::size); return map; } /** * Returns the creation time. The creation time is the time that the file was created. Returns null if this is a * virtual directory. * * @return The creation time or */ @Override public FileTime creationTime() { if (!this.isVirtualDirectory) { return FileTime.from(this.properties.getCreationTime().toInstant()); } else { return null; } } /** * Returns the time of last modification. Returns null if this is a virtual directory * * @return the time of last modification or null if this is a virtual directory */ @Override public FileTime lastModifiedTime() { if (!this.isVirtualDirectory) { return FileTime.from(this.properties.getLastModified().toInstant()); } else { return null; } } /** * Returns the eTag of the blob or null if this is a virtual directory * * @return the eTag of the blob or null if this is a virtual directory */ public String eTag() { if (!this.isVirtualDirectory) { return this.properties.getETag(); } else { return null; } } /** * Returns the {@link BlobHttpHeaders} of the blob or null if this is a virtual directory. * * @return {@link BlobHttpHeaders} or null if this is a virtual directory */ public BlobHttpHeaders blobHttpHeaders() { if (!this.isVirtualDirectory) { /* We return these all as one value so it's consistent with the way of setting, especially the setAttribute method that accepts a string argument for the name of the property. Returning them individually would mean we have to support setting them individually as well, which is not possible due to service constraints. */ return new BlobHttpHeaders() .setContentType(this.properties.getContentType()) .setContentLanguage(this.properties.getContentLanguage()) .setContentMd5(this.properties.getContentMd5()) .setContentDisposition(this.properties.getContentDisposition()) .setContentEncoding(this.properties.getContentEncoding()) .setCacheControl(this.properties.getCacheControl()); } else { return null; } } /** * Returns the type of the blob or null if this is a virtual directory * * @return the type of the blob or null if this is a virtual directory */ public BlobType blobType() { if (!this.isVirtualDirectory) { return this.properties.getBlobType(); } else { return null; } } /** * Returns the identifier of the last copy operation. If this blob hasn't been the target of a copy operation or has * been modified since this won't be set. Returns null if this is a virtual directory * * @return the identifier of the last copy operation or null if this is a virtual directory */ public String copyId() { if (!this.isVirtualDirectory) { return this.properties.getCopyId(); } else { return null; } } /** * Returns the status of the last copy operation. If this blob hasn't been the target of a copy operation or has * been modified since this won't be set. Returns null if this is a virtual directory * * @return the status of the last copy operation or null if this is a virtual directory */ public CopyStatusType copyStatus() { if (!this.isVirtualDirectory) { return this.properties.getCopyStatus(); } else { return null; } } /** * Returns the source blob URL from the last copy operation. If this blob hasn't been the target of a copy operation * or has been modified since this won't be set. Returns null if this is a virtual directory * * @return the source blob URL from the last copy operation or null if this is a virtual directory */ public String copySource() { if (!this.isVirtualDirectory) { return this.properties.getCopySource(); } else { return null; } } /** * Returns the number of bytes copied and total bytes in the source from the last copy operation (bytes copied/total * bytes). If this blob hasn't been the target of a copy operation or has been modified since this won't be set. * Returns null if this is a virtual directory * * @return the number of bytes copied and total bytes in the source from the last copy operation null if this is a * virtual directory */ public String copyProgress() { if (!this.isVirtualDirectory) { return this.properties.getCopyProgress(); } else { return null; } } /** * Returns the completion time of the last copy operation. If this blob hasn't been the target of a copy operation * or has been modified since this won't be set. Returns null if this is a virtual directory. * * @return the completion time of the last copy operation or null if this is a virtual directory */ public OffsetDateTime copyCompletionTime() { if (!this.isVirtualDirectory) { return this.properties.getCopyCompletionTime(); } else { return null; } } /** * Returns the description of the last copy failure, this is set when the {@link * {@link CopyStatusType * target of a copy operation or has been modified since this won't be set. Returns null if this is a virtual * directory. * * @return the description of the last copy failure or null if this is a virtual directory */ public String copyStatusDescription() { if (!this.isVirtualDirectory) { return this.properties.getCopyStatusDescription(); } else { return null; } } /** * Returns the status of the blob being encrypted on the server or null if this is a virtual directory. * * @return the status of the blob being encrypted on the server or null if this is a virtual directory */ public Boolean isServerEncrypted() { if (!this.isVirtualDirectory) { return this.properties.isServerEncrypted(); } else { return null; } } /** * Returns the tier of the blob. This is only set for Page blobs on a premium storage account or for Block blobs on * blob storage or general purpose V2 account. Returns null if this is a virtual directory. * * @return the tier of the blob or null if this is a virtual directory */ public AccessTier accessTier() { if (!this.isVirtualDirectory) { return this.properties.getAccessTier(); } else { return null; } } /** * Returns the status of the tier being inferred for the blob. This is only set for Page blobs on a premium storage * account or for Block blobs on blob storage or general purpose V2 account. Returns null if this is a virtual * directory. * * @return the status of the tier being inferred for the blob or null if this is a virtual directory */ public Boolean isAccessTierInferred() { if (!this.isVirtualDirectory) { return this.properties.isAccessTierInferred(); } else { return null; } } /** * Returns the archive status of the blob. This is only for blobs on a blob storage and general purpose v2 account. * Returns null if this is a virtual directory. * * @return the archive status of the blob or null if this is a virtual directory */ public ArchiveStatus archiveStatus() { if (!this.isVirtualDirectory) { return this.properties.getArchiveStatus(); } else { return null; } } /** * Returns the time when the access tier for the blob was last changed or null if this is a virtual directory. * * @return the time when the access tier for the blob was last changed or null if this is a virtual directory */ public OffsetDateTime accessTierChangeTime() { if (!this.isVirtualDirectory) { return this.properties.getAccessTierChangeTime(); } else { return null; } } /** * Returns the metadata associated with this blob or null if this is a virtual directory. * * @return the metadata associated with this blob or null if this is a virtual directory */ public Map<String, String> metadata() { if (!this.isVirtualDirectory) { return Collections.unmodifiableMap(this.properties.getMetadata()); } else { return null; } } /** * Returns the time of last modification or null if this is a virtual directory. * <p> * Last access time is not supported by the blob service. In this case, it is typical for implementations to return * the {@link * * @return the time of last modification null if this is a virtual directory */ @Override public FileTime lastAccessTime() { if (!this.isVirtualDirectory) { return this.lastModifiedTime(); } else { return null; } } /** * Tells whether the file is a regular file with opaque content. * * @return whether the file is a regular file. */ @Override public boolean isRegularFile() { if (!this.isVirtualDirectory) { return !this.properties.getMetadata().getOrDefault(AzureResource.DIR_METADATA_MARKER, "false").equals("true"); } else { return false; } } /** * Tells whether the file is a directory. * <p> * Will return true if the directory is a concrete or virtual directory. See * {@link AzureFileSystemProvider * concrete directories. * * @return whether the file is a directory */ @Override public boolean isDirectory() { return !this.isRegularFile(); } /** * Tells whether the file is a virtual directory. * <p> * See {@link AzureFileSystemProvider * concrete directories. * * @return whether the file is a virtual directory */ public boolean isVirtualDirectory() { return this.isVirtualDirectory; } /** * Tells whether the file is a symbolic link. * * @return false. Symbolic links are not supported. */ @Override public boolean isSymbolicLink() { return false; } /** * Tells whether the file is something other than a regular file, directory, or symbolic link. * * @return false. No other object types are supported. */ @Override public boolean isOther() { return false; } /** * Returns the size of the file (in bytes). * * @return the size of the file */ @Override /** * Returns the url of the resource. * * @return The file key, which is the url. */ @Override public Object fileKey() { return resource.getBlobClient().getBlobUrl(); } }
class AzureBlobFileAttributes implements BasicFileAttributes { /* Some blob properties do not have getters as they do not make sense in the context of nio. These properties are: - incremental snapshot related properties (only for page blobs) - lease related properties (leases not currently supported) - sequence number (only for page blobs) - encryption key sha256 (cpk not supported) - committed block count (only for append blobs) */ private final ClientLogger logger = new ClientLogger(AzureBlobFileAttributes.class); private final BlobProperties properties; private final AzureResource resource; private final boolean isVirtualDirectory; AzureBlobFileAttributes(Path path) throws IOException { this.resource = new AzureResource(path); BlobProperties props = null; try { props = resource.getBlobClient().getProperties(); } catch (BlobStorageException e) { if (e.getStatusCode() == 404 && this.resource.checkVirtualDirectoryExists()) { this.isVirtualDirectory = true; this.properties = null; return; } else { throw LoggingUtility.logError(logger, new IOException("Path: " + path.toString(), e)); } } this.properties = props; this.isVirtualDirectory = false; } static Map<String, Supplier<Object>> getAttributeSuppliers(AzureBlobFileAttributes attributes) { Map<String, Supplier<Object>> map = new HashMap<>(); map.put("creationTime", attributes::creationTime); map.put("lastModifiedTime", attributes::lastModifiedTime); map.put("eTag", attributes::eTag); map.put("blobHttpHeaders", attributes::blobHttpHeaders); map.put("blobType", attributes::blobType); map.put("copyId", attributes::copyId); map.put("copyStatus", attributes::copyStatus); map.put("copySource", attributes::copySource); map.put("copyProgress", attributes::copyProgress); map.put("copyCompletionTime", attributes::copyCompletionTime); map.put("copyStatusDescription", attributes::copyStatusDescription); map.put("isServerEncrypted", attributes::isServerEncrypted); map.put("accessTier", attributes::accessTier); map.put("isAccessTierInferred", attributes::isAccessTierInferred); map.put("archiveStatus", attributes::archiveStatus); map.put("accessTierChangeTime", attributes::accessTierChangeTime); map.put("metadata", attributes::metadata); map.put("isRegularFile", attributes::isRegularFile); map.put("isDirectory", attributes::isDirectory); map.put("isVirtualDirectory", attributes::isVirtualDirectory); map.put("isSymbolicLink", attributes::isSymbolicLink); map.put("isOther", attributes::isOther); map.put("size", attributes::size); return map; } /** * Returns the creation time. The creation time is the time that the file was created. Returns null if this is a * virtual directory. * * @return The creation time or null if this is a virtual directory */ @Override public FileTime creationTime() { return !this.isVirtualDirectory ? FileTime.from(this.properties.getCreationTime().toInstant()) : null; } /** * Returns the time of last modification. Returns null if this is a virtual directory * * @return the time of last modification or null if this is a virtual directory */ @Override public FileTime lastModifiedTime() { return !this.isVirtualDirectory ? FileTime.from(this.properties.getLastModified().toInstant()) : null; } /** * Returns the eTag of the blob or null if this is a virtual directory * * @return the eTag of the blob or null if this is a virtual directory */ public String eTag() { return !this.isVirtualDirectory ? this.properties.getETag() : null; } /** * Returns the {@link BlobHttpHeaders} of the blob or null if this is a virtual directory. * * @return {@link BlobHttpHeaders} or null if this is a virtual directory */ public BlobHttpHeaders blobHttpHeaders() { if (this.isVirtualDirectory) { return null; } /* We return these all as one value, so it's consistent with the way of setting, especially the setAttribute method that accepts a string argument for the name of the property. Returning them individually would mean we have to support setting them individually as well, which is not possible due to service constraints. */ return new BlobHttpHeaders() .setContentType(this.properties.getContentType()) .setContentLanguage(this.properties.getContentLanguage()) .setContentMd5(this.properties.getContentMd5()) .setContentDisposition(this.properties.getContentDisposition()) .setContentEncoding(this.properties.getContentEncoding()) .setCacheControl(this.properties.getCacheControl()); } /** * Returns the type of the blob or null if this is a virtual directory * * @return the type of the blob or null if this is a virtual directory */ public BlobType blobType() { return !this.isVirtualDirectory ? this.properties.getBlobType() : null; } /** * Returns the identifier of the last copy operation. If this blob hasn't been the target of a copy operation or has * been modified since this won't be set. Returns null if this is a virtual directory * * @return the identifier of the last copy operation or null if this is a virtual directory */ public String copyId() { return !this.isVirtualDirectory ? this.properties.getCopyId() : null; } /** * Returns the status of the last copy operation. If this blob hasn't been the target of a copy operation or has * been modified since this won't be set. Returns null if this is a virtual directory * * @return the status of the last copy operation or null if this is a virtual directory */ public CopyStatusType copyStatus() { return !this.isVirtualDirectory ? this.properties.getCopyStatus() : null; } /** * Returns the source blob URL from the last copy operation. If this blob hasn't been the target of a copy operation * or has been modified since this won't be set. Returns null if this is a virtual directory * * @return the source blob URL from the last copy operation or null if this is a virtual directory */ public String copySource() { return !this.isVirtualDirectory ? this.properties.getCopySource() : null; } /** * Returns the number of bytes copied and total bytes in the source from the last copy operation (bytes copied/total * bytes). If this blob hasn't been the target of a copy operation or has been modified since this won't be set. * Returns null if this is a virtual directory * * @return the number of bytes copied and total bytes in the source from the last copy operation null if this is a * virtual directory */ public String copyProgress() { return !this.isVirtualDirectory ? this.properties.getCopyProgress() : null; } /** * Returns the completion time of the last copy operation. If this blob hasn't been the target of a copy operation * or has been modified since this won't be set. Returns null if this is a virtual directory. * * @return the completion time of the last copy operation or null if this is a virtual directory */ public OffsetDateTime copyCompletionTime() { return !this.isVirtualDirectory ? this.properties.getCopyCompletionTime() : null; } /** * Returns the description of the last copy failure, this is set when the {@link * {@link CopyStatusType * target of a copy operation or has been modified since this won't be set. Returns null if this is a virtual * directory. * * @return the description of the last copy failure or null if this is a virtual directory */ public String copyStatusDescription() { return !this.isVirtualDirectory ? this.properties.getCopyStatusDescription() : null; } /** * Returns the status of the blob being encrypted on the server or null if this is a virtual directory. * * @return the status of the blob being encrypted on the server or null if this is a virtual directory */ public Boolean isServerEncrypted() { return !this.isVirtualDirectory ? this.properties.isServerEncrypted() : null; } /** * Returns the tier of the blob. This is only set for Page blobs on a premium storage account or for Block blobs on * blob storage or general purpose V2 account. Returns null if this is a virtual directory. * * @return the tier of the blob or null if this is a virtual directory */ public AccessTier accessTier() { return !this.isVirtualDirectory ? this.properties.getAccessTier() : null; } /** * Returns the status of the tier being inferred for the blob. This is only set for Page blobs on a premium storage * account or for Block blobs on blob storage or general purpose V2 account. Returns null if this is a virtual * directory. * * @return the status of the tier being inferred for the blob or null if this is a virtual directory */ public Boolean isAccessTierInferred() { return !this.isVirtualDirectory ? this.properties.isAccessTierInferred() : null; } /** * Returns the archive status of the blob. This is only for blobs on a blob storage and general purpose v2 account. * Returns null if this is a virtual directory. * * @return the archive status of the blob or null if this is a virtual directory */ public ArchiveStatus archiveStatus() { return !this.isVirtualDirectory ? this.properties.getArchiveStatus() : null; } /** * Returns the time when the access tier for the blob was last changed or null if this is a virtual directory. * * @return the time when the access tier for the blob was last changed or null if this is a virtual directory */ public OffsetDateTime accessTierChangeTime() { return !this.isVirtualDirectory ? this.properties.getAccessTierChangeTime() : null; } /** * Returns the metadata associated with this blob or null if this is a virtual directory. * * @return the metadata associated with this blob or null if this is a virtual directory */ public Map<String, String> metadata() { return !this.isVirtualDirectory ? Collections.unmodifiableMap(this.properties.getMetadata()) : null; } /** * Returns the time of last modification or null if this is a virtual directory. * <p> * Last access time is not supported by the blob service. In this case, it is typical for implementations to return * the {@link * * @return the time of last modification or null if this is a virtual directory */ @Override public FileTime lastAccessTime() { return !this.isVirtualDirectory ? FileTime.from(this.properties.getLastAccessedTime().toInstant()) : null; } /** * Tells whether the file is a regular file with opaque content. * * @return whether the file is a regular file. */ @Override public boolean isRegularFile() { return !this.isVirtualDirectory && !this.properties.getMetadata().getOrDefault(AzureResource.DIR_METADATA_MARKER, "false").equals("true"); } /** * Tells whether the file is a directory. * <p> * Will return true if the directory is a concrete or virtual directory. See * {@link AzureFileSystemProvider * concrete directories. * * @return whether the file is a directory */ @Override public boolean isDirectory() { return !this.isRegularFile(); } /** * Tells whether the file is a virtual directory. * <p> * See {@link AzureFileSystemProvider * concrete directories. * * @return whether the file is a virtual directory */ public boolean isVirtualDirectory() { return this.isVirtualDirectory; } /** * Tells whether the file is a symbolic link. * * @return false. Symbolic links are not supported. */ @Override public boolean isSymbolicLink() { return false; } /** * Tells whether the file is something other than a regular file, directory, or symbolic link. * * @return false. No other object types are supported. */ @Override public boolean isOther() { return false; } /** * Returns the size of the file (in bytes). * * @return the size of the file */ @Override /** * Returns the url of the resource. * * @return The file key, which is the url. */ @Override public Object fileKey() { return resource.getBlobClient().getBlobUrl(); } }
Curious why a null check is required here?
public Mono<CosmosContainerResponse> createContainerIfNotExists(CosmosEntityInformation<?, ?> information) { return createDatabaseIfNotExists() .publishOn(Schedulers.parallel()) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to create database", throwable, this.responseDiagnosticsProcessor)) .flatMap(cosmosDatabaseResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosDatabaseResponse.getDiagnostics(), null); final CosmosContainerProperties cosmosContainerProperties = new CosmosContainerProperties(information.getContainerName(), information.getPartitionKeyPath()); cosmosContainerProperties.setDefaultTimeToLiveInSeconds(information.getTimeToLive()); cosmosContainerProperties.setIndexingPolicy(information.getIndexingPolicy()); final UniqueKeyPolicy uniqueKeyPolicy = information.getUniqueKeyPolicy(); if (uniqueKeyPolicy != null) { cosmosContainerProperties.setUniqueKeyPolicy(uniqueKeyPolicy); } CosmosAsyncDatabase database = cosmosAsyncClient.getDatabase(cosmosDatabaseResponse.getProperties().getId()); Mono<CosmosContainerResponse> cosmosContainerResponseMono; if (information.getRequestUnit() == null) { cosmosContainerResponseMono = database.createContainerIfNotExists(cosmosContainerProperties); } else { ThroughputProperties throughputProperties = information.isAutoScale() ? ThroughputProperties.createAutoscaledThroughput(information.getRequestUnit()) : ThroughputProperties.createManualThroughput(information.getRequestUnit()); cosmosContainerResponseMono = database.createContainerIfNotExists(cosmosContainerProperties, throughputProperties); } return cosmosContainerResponseMono .map(cosmosContainerResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosContainerResponse.getDiagnostics(), null); return cosmosContainerResponse; }) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to create container", throwable, this.responseDiagnosticsProcessor)); }); }
cosmosContainerProperties.setUniqueKeyPolicy(uniqueKeyPolicy);
public Mono<CosmosContainerResponse> createContainerIfNotExists(CosmosEntityInformation<?, ?> information) { return createDatabaseIfNotExists() .publishOn(Schedulers.parallel()) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to create database", throwable, this.responseDiagnosticsProcessor)) .flatMap(cosmosDatabaseResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosDatabaseResponse.getDiagnostics(), null); final CosmosContainerProperties cosmosContainerProperties = new CosmosContainerProperties(information.getContainerName(), information.getPartitionKeyPath()); cosmosContainerProperties.setDefaultTimeToLiveInSeconds(information.getTimeToLive()); cosmosContainerProperties.setIndexingPolicy(information.getIndexingPolicy()); final UniqueKeyPolicy uniqueKeyPolicy = information.getUniqueKeyPolicy(); if (uniqueKeyPolicy != null) { cosmosContainerProperties.setUniqueKeyPolicy(uniqueKeyPolicy); } CosmosAsyncDatabase database = cosmosAsyncClient.getDatabase(cosmosDatabaseResponse.getProperties().getId()); Mono<CosmosContainerResponse> cosmosContainerResponseMono; if (information.getRequestUnit() == null) { cosmosContainerResponseMono = database.createContainerIfNotExists(cosmosContainerProperties); } else { ThroughputProperties throughputProperties = information.isAutoScale() ? ThroughputProperties.createAutoscaledThroughput(information.getRequestUnit()) : ThroughputProperties.createManualThroughput(information.getRequestUnit()); cosmosContainerResponseMono = database.createContainerIfNotExists(cosmosContainerProperties, throughputProperties); } return cosmosContainerResponseMono .map(cosmosContainerResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosContainerResponse.getDiagnostics(), null); return cosmosContainerResponse; }) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to create container", throwable, this.responseDiagnosticsProcessor)); }); }
class ReactiveCosmosTemplate implements ReactiveCosmosOperations, ApplicationContextAware { private static final Logger LOGGER = LoggerFactory.getLogger(ReactiveCosmosTemplate.class); private final MappingCosmosConverter mappingCosmosConverter; private final String databaseName; private final ResponseDiagnosticsProcessor responseDiagnosticsProcessor; private final boolean queryMetricsEnabled; private final CosmosAsyncClient cosmosAsyncClient; private final IsNewAwareAuditingHandler cosmosAuditingHandler; private final DatabaseThroughputConfig databaseThroughputConfig; private ApplicationContext applicationContext; /** * Initialization * * @param client must not be {@literal null} * @param databaseName must not be {@literal null} * @param cosmosConfig must not be {@literal null} * @param mappingCosmosConverter must not be {@literal null} * @param cosmosAuditingHandler can be {@literal null} */ public ReactiveCosmosTemplate(CosmosAsyncClient client, String databaseName, CosmosConfig cosmosConfig, MappingCosmosConverter mappingCosmosConverter, IsNewAwareAuditingHandler cosmosAuditingHandler) { this(new CosmosFactory(client, databaseName), cosmosConfig, mappingCosmosConverter, cosmosAuditingHandler); } /** * Initialization * * @param client must not be {@literal null} * @param databaseName must not be {@literal null} * @param cosmosConfig must not be {@literal null} * @param mappingCosmosConverter must not be {@literal null} */ public ReactiveCosmosTemplate(CosmosAsyncClient client, String databaseName, CosmosConfig cosmosConfig, MappingCosmosConverter mappingCosmosConverter) { this(new CosmosFactory(client, databaseName), cosmosConfig, mappingCosmosConverter, null); } /** * Constructor * * @param cosmosFactory the cosmos db factory * @param cosmosConfig the cosmos config * @param mappingCosmosConverter the mappingCosmosConverter * @param cosmosAuditingHandler the auditing handler */ public ReactiveCosmosTemplate(CosmosFactory cosmosFactory, CosmosConfig cosmosConfig, MappingCosmosConverter mappingCosmosConverter, IsNewAwareAuditingHandler cosmosAuditingHandler) { Assert.notNull(cosmosFactory, "CosmosFactory must not be null!"); Assert.notNull(cosmosConfig, "CosmosConfig must not be null!"); Assert.notNull(mappingCosmosConverter, "MappingCosmosConverter must not be null!"); this.mappingCosmosConverter = mappingCosmosConverter; this.cosmosAsyncClient = cosmosFactory.getCosmosAsyncClient(); this.databaseName = cosmosFactory.getDatabaseName(); this.responseDiagnosticsProcessor = cosmosConfig.getResponseDiagnosticsProcessor(); this.queryMetricsEnabled = cosmosConfig.isQueryMetricsEnabled(); this.cosmosAuditingHandler = cosmosAuditingHandler; this.databaseThroughputConfig = cosmosConfig.getDatabaseThroughputConfig(); } /** * Initialization * * @param cosmosFactory must not be {@literal null} * @param cosmosConfig must not be {@literal null} * @param mappingCosmosConverter must not be {@literal null} */ public ReactiveCosmosTemplate(CosmosFactory cosmosFactory, CosmosConfig cosmosConfig, MappingCosmosConverter mappingCosmosConverter) { this(cosmosFactory, cosmosConfig, mappingCosmosConverter, null); } /** * @param applicationContext the application context * @throws BeansException the bean exception */ public void setApplicationContext(@NonNull ApplicationContext applicationContext) throws BeansException { this.applicationContext = applicationContext; } /** * Creates a container if it doesn't already exist * * @param information the CosmosEntityInformation * @return Mono containing CosmosContainerResponse */ @Override private Mono<CosmosDatabaseResponse> createDatabaseIfNotExists() { if (databaseThroughputConfig == null) { return cosmosAsyncClient .createDatabaseIfNotExists(this.databaseName); } else { ThroughputProperties throughputProperties = databaseThroughputConfig.isAutoScale() ? ThroughputProperties.createAutoscaledThroughput(databaseThroughputConfig.getRequestUnits()) : ThroughputProperties.createManualThroughput(databaseThroughputConfig.getRequestUnits()); return cosmosAsyncClient .createDatabaseIfNotExists(this.databaseName, throughputProperties); } } @Override public Mono<CosmosContainerProperties> getContainerProperties(String containerName) { return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .read() .map(CosmosContainerResponse::getProperties); } @Override public Mono<CosmosContainerProperties> replaceContainerProperties(String containerName, CosmosContainerProperties properties) { return this.cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .replace(properties) .map(CosmosContainerResponse::getProperties); } /** * * Find all items in a given container * * @param containerName the containerName * @param domainType the domainType * @return Flux with all the found items or error */ @Override public <T> Flux<T> findAll(String containerName, Class<T> domainType) { final CosmosQuery query = new CosmosQuery(Criteria.getInstance(CriteriaType.ALL)); return find(query, domainType, containerName); } /** * Find all items in a given container * * @param domainType the domainType * @return Flux with all the found items or error */ @Override public <T> Flux<T> findAll(Class<T> domainType) { return findAll(domainType.getSimpleName(), domainType); } @Override public <T> Flux<T> findAll(PartitionKey partitionKey, Class<T> domainType) { Assert.notNull(partitionKey, "partitionKey should not be null"); Assert.notNull(domainType, "domainType should not be null"); final String containerName = getContainerName(domainType); final CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); cosmosQueryRequestOptions.setPartitionKey(partitionKey); cosmosQueryRequestOptions.setQueryMetricsEnabled(this.queryMetricsEnabled); return cosmosAsyncClient .getDatabase(this.databaseName) .getContainer(containerName) .queryItems("SELECT * FROM r", cosmosQueryRequestOptions, JsonNode.class) .byPage() .publishOn(Schedulers.parallel()) .flatMap(cosmosItemFeedResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemFeedResponse.getCosmosDiagnostics(), cosmosItemFeedResponse); return Flux.fromIterable(cosmosItemFeedResponse.getResults()); }) .map(cosmosItemProperties -> emitOnLoadEventAndConvertToDomainObject(domainType, cosmosItemProperties)) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to find items", throwable, this.responseDiagnosticsProcessor)); } /** * Find by id * * @param id the id * @param domainType the domainType * @return Mono with the item or error */ @Override public <T> Mono<T> findById(Object id, Class<T> domainType) { Assert.notNull(domainType, "domainType should not be null"); return findById(getContainerName(domainType), id, domainType); } /** * Find by id * * @param containerName the container name * @param id the id * @param domainType the entity class * @return Mono with the item or error */ @Override public <T> Mono<T> findById(String containerName, Object id, Class<T> domainType) { Assert.hasText(containerName, "containerName should not be null, empty or only whitespaces"); Assert.notNull(domainType, "domainType should not be null"); final String query = "select * from root where root.id = @ROOT_ID"; final SqlParameter param = new SqlParameter("@ROOT_ID", CosmosUtils.getStringIDValue(id)); final SqlQuerySpec sqlQuerySpec = new SqlQuerySpec(query, param); final CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setQueryMetricsEnabled(this.queryMetricsEnabled); return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .queryItems(sqlQuerySpec, options, JsonNode.class) .byPage() .publishOn(Schedulers.parallel()) .flatMap(cosmosItemFeedResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemFeedResponse.getCosmosDiagnostics(), cosmosItemFeedResponse); return Mono.justOrEmpty(cosmosItemFeedResponse .getResults() .stream() .map(cosmosItem -> emitOnLoadEventAndConvertToDomainObject(domainType, cosmosItem)) .findFirst()); }) .onErrorResume(throwable -> CosmosExceptionUtils.findAPIExceptionHandler("Failed to find item", throwable, this.responseDiagnosticsProcessor)) .next(); } /** * Find by id * * @param id the id * @param domainType the entity class * @param partitionKey partition Key * @return Mono with the item or error */ @Override public <T> Mono<T> findById(Object id, Class<T> domainType, PartitionKey partitionKey) { Assert.notNull(domainType, "domainType should not be null"); String idToFind = CosmosUtils.getStringIDValue(id); final String containerName = getContainerName(domainType); return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .readItem(idToFind, partitionKey, JsonNode.class) .publishOn(Schedulers.parallel()) .flatMap(cosmosItemResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemResponse.getDiagnostics(), null); return Mono.justOrEmpty(emitOnLoadEventAndConvertToDomainObject(domainType, cosmosItemResponse.getItem())); }) .onErrorResume(throwable -> CosmosExceptionUtils.findAPIExceptionHandler("Failed to find item", throwable, this.responseDiagnosticsProcessor)); } /** * Insert * * @param <T> type of inserted objectToSave * @param objectToSave the object to save * @param partitionKey the partition key * @return Mono with the item or error */ public <T> Mono<T> insert(T objectToSave, PartitionKey partitionKey) { return insert(getContainerName(objectToSave.getClass()), objectToSave, partitionKey); } /** * Insert * * @param objectToSave the object to save * @param <T> type of inserted objectToSave * @return Mono with the item or error */ public <T> Mono<T> insert(T objectToSave) { return insert(getContainerName(objectToSave.getClass()), objectToSave, null); } /** * Insert * * @param <T> type of inserted objectToSave * @param containerName the container name * @param objectToSave the object to save * @param partitionKey the partition key * @return Mono with the item or error */ public <T> Mono<T> insert(String containerName, Object objectToSave, PartitionKey partitionKey) { Assert.hasText(containerName, "containerName should not be null, empty or only whitespaces"); Assert.notNull(objectToSave, "objectToSave should not be null"); final Class<T> domainType = (Class<T>) objectToSave.getClass(); markAuditedIfConfigured(objectToSave); generateIdIfNullAndAutoGenerationEnabled(objectToSave, domainType); final JsonNode originalItem = mappingCosmosConverter.writeJsonNode(objectToSave); final CosmosItemRequestOptions options = new CosmosItemRequestOptions(); return cosmosAsyncClient .getDatabase(this.databaseName) .getContainer(containerName) .createItem(originalItem, partitionKey, options) .publishOn(Schedulers.parallel()) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to insert item", throwable, this.responseDiagnosticsProcessor)) .flatMap(cosmosItemResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemResponse.getDiagnostics(), null); return Mono.just(toDomainObject(domainType, cosmosItemResponse.getItem())); }); } /** * Insert * * @param <T> type of inserted objectToSave * @param containerName the container name * @param objectToSave the object to save * @return Mono with the item or error */ @Override public <T> Mono<T> insert(String containerName, T objectToSave) { return insert(containerName, objectToSave, null); } @SuppressWarnings("unchecked") private <T> void generateIdIfNullAndAutoGenerationEnabled(T originalItem, Class<?> type) { CosmosEntityInformation<?, ?> entityInfo = CosmosEntityInformation.getInstance(type); if (entityInfo.shouldGenerateId() && ReflectionUtils.getField(entityInfo.getIdField(), originalItem) == null) { ReflectionUtils.setField(entityInfo.getIdField(), originalItem, UUID.randomUUID().toString()); } } /** * Upsert * * @param object the object to upsert * @return Mono with the item or error */ @Override public <T> Mono<T> upsert(T object) { return upsert(getContainerName(object.getClass()), object); } /** * Upsert * * @param containerName the container name * @param object the object to save * @return Mono with the item or error */ @Override public <T> Mono<T> upsert(String containerName, T object) { final Class<T> domainType = (Class<T>) object.getClass(); markAuditedIfConfigured(object); final JsonNode originalItem = mappingCosmosConverter.writeJsonNode(object); final CosmosItemRequestOptions options = new CosmosItemRequestOptions(); applyVersioning(object.getClass(), originalItem, options); return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .upsertItem(originalItem, options) .publishOn(Schedulers.parallel()) .flatMap(cosmosItemResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemResponse.getDiagnostics(), null); return Mono.just(toDomainObject(domainType, cosmosItemResponse.getItem())); }) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to upsert item", throwable, this.responseDiagnosticsProcessor)); } /** * Deletes the item with id and partition key. * * @param containerName Container name of database * @param id item id * @param partitionKey the partition key */ @Override public Mono<Void> deleteById(String containerName, Object id, PartitionKey partitionKey) { return deleteById(containerName, id, partitionKey, new CosmosItemRequestOptions()); } private Mono<Void> deleteById(String containerName, Object id, PartitionKey partitionKey, CosmosItemRequestOptions cosmosItemRequestOptions) { Assert.hasText(containerName, "container name should not be null, empty or only whitespaces"); String idToDelete = CosmosUtils.getStringIDValue(id); if (partitionKey == null) { partitionKey = PartitionKey.NONE; } return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .deleteItem(idToDelete, partitionKey, cosmosItemRequestOptions) .publishOn(Schedulers.parallel()) .doOnNext(cosmosItemResponse -> CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemResponse.getDiagnostics(), null)) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to delete item", throwable, this.responseDiagnosticsProcessor)) .then(); } /** * Deletes the entity * * @param <T> type class of domain type * @param containerName Container name of database * @param entity the entity to delete * @return void Mono */ public <T> Mono<Void> deleteEntity(String containerName, T entity) { Assert.notNull(entity, "entity to be deleted should not be null"); @SuppressWarnings("unchecked") final Class<T> domainType = (Class<T>) entity.getClass(); final JsonNode originalItem = mappingCosmosConverter.writeJsonNode(entity); final CosmosItemRequestOptions options = new CosmosItemRequestOptions(); applyVersioning(entity.getClass(), originalItem, options); return deleteItem(originalItem, containerName, domainType).then(); } /** * Delete all items in a container * * @param containerName the container name * @param domainType the domainType * @return void Mono */ @Override public Mono<Void> deleteAll(@NonNull String containerName, @NonNull Class<?> domainType) { Assert.hasText(containerName, "container name should not be null, empty or only whitespaces"); final CosmosQuery query = new CosmosQuery(Criteria.getInstance(CriteriaType.ALL)); return this.delete(query, domainType, containerName).then(); } /** * Delete items matching query * * @param query the document query * @param domainType the entity class * @param containerName the container name * @return Mono */ @Override public <T> Flux<T> delete(CosmosQuery query, Class<T> domainType, String containerName) { Assert.notNull(query, "DocumentQuery should not be null."); Assert.notNull(domainType, "domainType should not be null."); Assert.hasText(containerName, "container name should not be null, empty or only whitespaces"); final Flux<JsonNode> results = findItems(query, containerName, domainType); return results.flatMap(d -> deleteItem(d, containerName, domainType)); } /** * Find items * * @param query the document query * @param domainType the entity class * @param containerName the container name * @return Flux with found items or error */ @Override public <T> Flux<T> find(CosmosQuery query, Class<T> domainType, String containerName) { return findItems(query, containerName, domainType) .map(cosmosItemProperties -> emitOnLoadEventAndConvertToDomainObject(domainType, cosmosItemProperties)); } /** * Exists * * @param query the document query * @param domainType the entity class * @param containerName the container name * @return Mono with a boolean or error */ @Override public Mono<Boolean> exists(CosmosQuery query, Class<?> domainType, String containerName) { return count(query, containerName).flatMap(count -> Mono.just(count > 0)); } /** * Exists * * @param id the id * @param domainType the entity class * @param containerName the container name * @return Mono with a boolean or error */ public Mono<Boolean> existsById(Object id, Class<?> domainType, String containerName) { return findById(containerName, id, domainType) .flatMap(o -> Mono.just(o != null)); } /** * Count * * @param containerName the container name * @return Mono with the count or error */ @Override public Mono<Long> count(String containerName) { final CosmosQuery query = new CosmosQuery(Criteria.getInstance(CriteriaType.ALL)); return count(query, containerName); } /** * Count * * @param query the document query * @param containerName the container name * @return Mono with count or error */ @Override public Mono<Long> count(CosmosQuery query, String containerName) { final SqlQuerySpec querySpec = new CountQueryGenerator().generateCosmos(query); return getCountValue(querySpec, containerName); } /** * Count * * @param querySpec the document query spec * @param containerName the container name * @return Mono with count or error */ @Override public Mono<Long> count(SqlQuerySpec querySpec, String containerName) { return getCountValue(querySpec, containerName); } @Override public MappingCosmosConverter getConverter() { return mappingCosmosConverter; } @Override public <T> Flux<T> runQuery(SqlQuerySpec querySpec, Class<?> domainType, Class<T> returnType) { return runQuery(querySpec, Sort.unsorted(), domainType, returnType); } @Override public <T> Flux<T> runQuery(SqlQuerySpec querySpec, Sort sort, Class<?> domainType, Class<T> returnType) { SqlQuerySpec sortedQuerySpec = NativeQueryGenerator.getInstance().generateSortedQuery(querySpec, sort); return runQuery(sortedQuerySpec, domainType) .map(cosmosItemProperties -> emitOnLoadEventAndConvertToDomainObject(returnType, cosmosItemProperties)); } private Flux<JsonNode> runQuery(SqlQuerySpec querySpec, Class<?> domainType) { String containerName = getContainerName(domainType); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .queryItems(querySpec, options, JsonNode.class) .byPage() .publishOn(Schedulers.parallel()) .flatMap(cosmosItemFeedResponse -> { CosmosUtils .fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemFeedResponse.getCosmosDiagnostics(), cosmosItemFeedResponse); return Flux.fromIterable(cosmosItemFeedResponse.getResults()); }) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to find items", throwable, this.responseDiagnosticsProcessor)); } private Mono<Long> getCountValue(SqlQuerySpec querySpec, String containerName) { final CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setQueryMetricsEnabled(this.queryMetricsEnabled); return executeQuery(querySpec, containerName, options) .doOnNext(feedResponse -> CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, feedResponse.getCosmosDiagnostics(), feedResponse)) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to get count value", throwable, this.responseDiagnosticsProcessor)) .next() .map(r -> r.getResults().get(0).asLong()); } private Flux<FeedResponse<JsonNode>> executeQuery(SqlQuerySpec sqlQuerySpec, String containerName, CosmosQueryRequestOptions options) { return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .queryItems(sqlQuerySpec, options, JsonNode.class) .byPage() .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to execute query", throwable, this.responseDiagnosticsProcessor)); } /** * Delete container with container name * * @param containerName the container name */ @Override public void deleteContainer(@NonNull String containerName) { Assert.hasText(containerName, "containerName should have text."); cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .delete() .doOnNext(cosmosContainerResponse -> CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosContainerResponse.getDiagnostics(), null)) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to delete container", throwable, this.responseDiagnosticsProcessor)) .block(); } /** * @param domainType the domain class * @return the container name */ public String getContainerName(Class<?> domainType) { Assert.notNull(domainType, "domainType should not be null"); return CosmosEntityInformation.getInstance(domainType).getContainerName(); } private void markAuditedIfConfigured(Object object) { if (cosmosAuditingHandler != null) { cosmosAuditingHandler.markAudited(object); } } private <T> Flux<JsonNode> findItems(@NonNull CosmosQuery query, @NonNull String containerName, @NonNull Class<T> domainType) { final SqlQuerySpec sqlQuerySpec = new FindQuerySpecGenerator().generateCosmos(query); final CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); cosmosQueryRequestOptions.setQueryMetricsEnabled(this.queryMetricsEnabled); Optional<Object> partitionKeyValue = query.getPartitionKeyValue(domainType); partitionKeyValue.ifPresent(o -> { LOGGER.debug("Setting partition key {}", o); cosmosQueryRequestOptions.setPartitionKey(new PartitionKey(o)); }); return cosmosAsyncClient .getDatabase(this.databaseName) .getContainer(containerName) .queryItems(sqlQuerySpec, cosmosQueryRequestOptions, JsonNode.class) .byPage() .publishOn(Schedulers.parallel()) .flatMap(cosmosItemFeedResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemFeedResponse.getCosmosDiagnostics(), cosmosItemFeedResponse); return Flux.fromIterable(cosmosItemFeedResponse.getResults()); }) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to query items", throwable, this.responseDiagnosticsProcessor)); } private <T> Mono<T> deleteItem(@NonNull JsonNode jsonNode, String containerName, @NonNull Class<T> domainType) { final CosmosItemRequestOptions options = new CosmosItemRequestOptions(); applyVersioning(domainType, jsonNode, options); return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .deleteItem(jsonNode, options) .publishOn(Schedulers.parallel()) .map(cosmosItemResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemResponse.getDiagnostics(), null); return cosmosItemResponse; }) .flatMap(objectCosmosItemResponse -> Mono.just(toDomainObject(domainType, jsonNode))) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to delete item", throwable, this.responseDiagnosticsProcessor)); } private <T> T emitOnLoadEventAndConvertToDomainObject(@NonNull Class<T> domainType, JsonNode responseJsonNode) { CosmosEntityInformation<?, ?> entityInformation = CosmosEntityInformation.getInstance(domainType); maybeEmitEvent(new AfterLoadEvent<>(responseJsonNode, domainType, entityInformation.getContainerName())); return toDomainObject(domainType, responseJsonNode); } private <T> T toDomainObject(@NonNull Class<T> domainType, JsonNode jsonNode) { return mappingCosmosConverter.read(domainType, jsonNode); } private void applyVersioning(Class<?> domainType, JsonNode jsonNode, CosmosItemRequestOptions options) { CosmosEntityInformation<?, ?> entityInformation = CosmosEntityInformation.getInstance(domainType); if (entityInformation.isVersioned()) { options.setIfMatchETag(jsonNode.get(Constants.ETAG_PROPERTY_DEFAULT_NAME).asText()); } } private void maybeEmitEvent(CosmosMappingEvent<?> event) { if (canPublishEvent()) { this.applicationContext.publishEvent(event); } } private boolean canPublishEvent() { return this.applicationContext != null; } }
class ReactiveCosmosTemplate implements ReactiveCosmosOperations, ApplicationContextAware { private static final Logger LOGGER = LoggerFactory.getLogger(ReactiveCosmosTemplate.class); private final MappingCosmosConverter mappingCosmosConverter; private final String databaseName; private final ResponseDiagnosticsProcessor responseDiagnosticsProcessor; private final boolean queryMetricsEnabled; private final CosmosAsyncClient cosmosAsyncClient; private final IsNewAwareAuditingHandler cosmosAuditingHandler; private final DatabaseThroughputConfig databaseThroughputConfig; private ApplicationContext applicationContext; /** * Initialization * * @param client must not be {@literal null} * @param databaseName must not be {@literal null} * @param cosmosConfig must not be {@literal null} * @param mappingCosmosConverter must not be {@literal null} * @param cosmosAuditingHandler can be {@literal null} */ public ReactiveCosmosTemplate(CosmosAsyncClient client, String databaseName, CosmosConfig cosmosConfig, MappingCosmosConverter mappingCosmosConverter, IsNewAwareAuditingHandler cosmosAuditingHandler) { this(new CosmosFactory(client, databaseName), cosmosConfig, mappingCosmosConverter, cosmosAuditingHandler); } /** * Initialization * * @param client must not be {@literal null} * @param databaseName must not be {@literal null} * @param cosmosConfig must not be {@literal null} * @param mappingCosmosConverter must not be {@literal null} */ public ReactiveCosmosTemplate(CosmosAsyncClient client, String databaseName, CosmosConfig cosmosConfig, MappingCosmosConverter mappingCosmosConverter) { this(new CosmosFactory(client, databaseName), cosmosConfig, mappingCosmosConverter, null); } /** * Constructor * * @param cosmosFactory the cosmos db factory * @param cosmosConfig the cosmos config * @param mappingCosmosConverter the mappingCosmosConverter * @param cosmosAuditingHandler the auditing handler */ public ReactiveCosmosTemplate(CosmosFactory cosmosFactory, CosmosConfig cosmosConfig, MappingCosmosConverter mappingCosmosConverter, IsNewAwareAuditingHandler cosmosAuditingHandler) { Assert.notNull(cosmosFactory, "CosmosFactory must not be null!"); Assert.notNull(cosmosConfig, "CosmosConfig must not be null!"); Assert.notNull(mappingCosmosConverter, "MappingCosmosConverter must not be null!"); this.mappingCosmosConverter = mappingCosmosConverter; this.cosmosAsyncClient = cosmosFactory.getCosmosAsyncClient(); this.databaseName = cosmosFactory.getDatabaseName(); this.responseDiagnosticsProcessor = cosmosConfig.getResponseDiagnosticsProcessor(); this.queryMetricsEnabled = cosmosConfig.isQueryMetricsEnabled(); this.cosmosAuditingHandler = cosmosAuditingHandler; this.databaseThroughputConfig = cosmosConfig.getDatabaseThroughputConfig(); } /** * Initialization * * @param cosmosFactory must not be {@literal null} * @param cosmosConfig must not be {@literal null} * @param mappingCosmosConverter must not be {@literal null} */ public ReactiveCosmosTemplate(CosmosFactory cosmosFactory, CosmosConfig cosmosConfig, MappingCosmosConverter mappingCosmosConverter) { this(cosmosFactory, cosmosConfig, mappingCosmosConverter, null); } /** * @param applicationContext the application context * @throws BeansException the bean exception */ public void setApplicationContext(@NonNull ApplicationContext applicationContext) throws BeansException { this.applicationContext = applicationContext; } /** * Creates a container if it doesn't already exist * * @param information the CosmosEntityInformation * @return Mono containing CosmosContainerResponse */ @Override private Mono<CosmosDatabaseResponse> createDatabaseIfNotExists() { if (databaseThroughputConfig == null) { return cosmosAsyncClient .createDatabaseIfNotExists(this.databaseName); } else { ThroughputProperties throughputProperties = databaseThroughputConfig.isAutoScale() ? ThroughputProperties.createAutoscaledThroughput(databaseThroughputConfig.getRequestUnits()) : ThroughputProperties.createManualThroughput(databaseThroughputConfig.getRequestUnits()); return cosmosAsyncClient .createDatabaseIfNotExists(this.databaseName, throughputProperties); } } @Override public Mono<CosmosContainerProperties> getContainerProperties(String containerName) { return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .read() .map(CosmosContainerResponse::getProperties); } @Override public Mono<CosmosContainerProperties> replaceContainerProperties(String containerName, CosmosContainerProperties properties) { return this.cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .replace(properties) .map(CosmosContainerResponse::getProperties); } /** * * Find all items in a given container * * @param containerName the containerName * @param domainType the domainType * @return Flux with all the found items or error */ @Override public <T> Flux<T> findAll(String containerName, Class<T> domainType) { final CosmosQuery query = new CosmosQuery(Criteria.getInstance(CriteriaType.ALL)); return find(query, domainType, containerName); } /** * Find all items in a given container * * @param domainType the domainType * @return Flux with all the found items or error */ @Override public <T> Flux<T> findAll(Class<T> domainType) { return findAll(domainType.getSimpleName(), domainType); } @Override public <T> Flux<T> findAll(PartitionKey partitionKey, Class<T> domainType) { Assert.notNull(partitionKey, "partitionKey should not be null"); Assert.notNull(domainType, "domainType should not be null"); final String containerName = getContainerName(domainType); final CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); cosmosQueryRequestOptions.setPartitionKey(partitionKey); cosmosQueryRequestOptions.setQueryMetricsEnabled(this.queryMetricsEnabled); return cosmosAsyncClient .getDatabase(this.databaseName) .getContainer(containerName) .queryItems("SELECT * FROM r", cosmosQueryRequestOptions, JsonNode.class) .byPage() .publishOn(Schedulers.parallel()) .flatMap(cosmosItemFeedResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemFeedResponse.getCosmosDiagnostics(), cosmosItemFeedResponse); return Flux.fromIterable(cosmosItemFeedResponse.getResults()); }) .map(cosmosItemProperties -> emitOnLoadEventAndConvertToDomainObject(domainType, cosmosItemProperties)) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to find items", throwable, this.responseDiagnosticsProcessor)); } /** * Find by id * * @param id the id * @param domainType the domainType * @return Mono with the item or error */ @Override public <T> Mono<T> findById(Object id, Class<T> domainType) { Assert.notNull(domainType, "domainType should not be null"); return findById(getContainerName(domainType), id, domainType); } /** * Find by id * * @param containerName the container name * @param id the id * @param domainType the entity class * @return Mono with the item or error */ @Override public <T> Mono<T> findById(String containerName, Object id, Class<T> domainType) { Assert.hasText(containerName, "containerName should not be null, empty or only whitespaces"); Assert.notNull(domainType, "domainType should not be null"); final String query = "select * from root where root.id = @ROOT_ID"; final SqlParameter param = new SqlParameter("@ROOT_ID", CosmosUtils.getStringIDValue(id)); final SqlQuerySpec sqlQuerySpec = new SqlQuerySpec(query, param); final CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setQueryMetricsEnabled(this.queryMetricsEnabled); return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .queryItems(sqlQuerySpec, options, JsonNode.class) .byPage() .publishOn(Schedulers.parallel()) .flatMap(cosmosItemFeedResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemFeedResponse.getCosmosDiagnostics(), cosmosItemFeedResponse); return Mono.justOrEmpty(cosmosItemFeedResponse .getResults() .stream() .map(cosmosItem -> emitOnLoadEventAndConvertToDomainObject(domainType, cosmosItem)) .findFirst()); }) .onErrorResume(throwable -> CosmosExceptionUtils.findAPIExceptionHandler("Failed to find item", throwable, this.responseDiagnosticsProcessor)) .next(); } /** * Find by id * * @param id the id * @param domainType the entity class * @param partitionKey partition Key * @return Mono with the item or error */ @Override public <T> Mono<T> findById(Object id, Class<T> domainType, PartitionKey partitionKey) { Assert.notNull(domainType, "domainType should not be null"); String idToFind = CosmosUtils.getStringIDValue(id); final String containerName = getContainerName(domainType); return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .readItem(idToFind, partitionKey, JsonNode.class) .publishOn(Schedulers.parallel()) .flatMap(cosmosItemResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemResponse.getDiagnostics(), null); return Mono.justOrEmpty(emitOnLoadEventAndConvertToDomainObject(domainType, cosmosItemResponse.getItem())); }) .onErrorResume(throwable -> CosmosExceptionUtils.findAPIExceptionHandler("Failed to find item", throwable, this.responseDiagnosticsProcessor)); } /** * Insert * * @param <T> type of inserted objectToSave * @param objectToSave the object to save * @param partitionKey the partition key * @return Mono with the item or error */ public <T> Mono<T> insert(T objectToSave, PartitionKey partitionKey) { return insert(getContainerName(objectToSave.getClass()), objectToSave, partitionKey); } /** * Insert * * @param objectToSave the object to save * @param <T> type of inserted objectToSave * @return Mono with the item or error */ public <T> Mono<T> insert(T objectToSave) { return insert(getContainerName(objectToSave.getClass()), objectToSave, null); } /** * Insert * * @param <T> type of inserted objectToSave * @param containerName the container name * @param objectToSave the object to save * @param partitionKey the partition key * @return Mono with the item or error */ public <T> Mono<T> insert(String containerName, Object objectToSave, PartitionKey partitionKey) { Assert.hasText(containerName, "containerName should not be null, empty or only whitespaces"); Assert.notNull(objectToSave, "objectToSave should not be null"); final Class<T> domainType = (Class<T>) objectToSave.getClass(); markAuditedIfConfigured(objectToSave); generateIdIfNullAndAutoGenerationEnabled(objectToSave, domainType); final JsonNode originalItem = mappingCosmosConverter.writeJsonNode(objectToSave); final CosmosItemRequestOptions options = new CosmosItemRequestOptions(); return cosmosAsyncClient .getDatabase(this.databaseName) .getContainer(containerName) .createItem(originalItem, partitionKey, options) .publishOn(Schedulers.parallel()) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to insert item", throwable, this.responseDiagnosticsProcessor)) .flatMap(cosmosItemResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemResponse.getDiagnostics(), null); return Mono.just(toDomainObject(domainType, cosmosItemResponse.getItem())); }); } /** * Insert * * @param <T> type of inserted objectToSave * @param containerName the container name * @param objectToSave the object to save * @return Mono with the item or error */ @Override public <T> Mono<T> insert(String containerName, T objectToSave) { return insert(containerName, objectToSave, null); } @SuppressWarnings("unchecked") private <T> void generateIdIfNullAndAutoGenerationEnabled(T originalItem, Class<?> type) { CosmosEntityInformation<?, ?> entityInfo = CosmosEntityInformation.getInstance(type); if (entityInfo.shouldGenerateId() && ReflectionUtils.getField(entityInfo.getIdField(), originalItem) == null) { ReflectionUtils.setField(entityInfo.getIdField(), originalItem, UUID.randomUUID().toString()); } } /** * Upsert * * @param object the object to upsert * @return Mono with the item or error */ @Override public <T> Mono<T> upsert(T object) { return upsert(getContainerName(object.getClass()), object); } /** * Upsert * * @param containerName the container name * @param object the object to save * @return Mono with the item or error */ @Override public <T> Mono<T> upsert(String containerName, T object) { final Class<T> domainType = (Class<T>) object.getClass(); markAuditedIfConfigured(object); final JsonNode originalItem = mappingCosmosConverter.writeJsonNode(object); final CosmosItemRequestOptions options = new CosmosItemRequestOptions(); applyVersioning(object.getClass(), originalItem, options); return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .upsertItem(originalItem, options) .publishOn(Schedulers.parallel()) .flatMap(cosmosItemResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemResponse.getDiagnostics(), null); return Mono.just(toDomainObject(domainType, cosmosItemResponse.getItem())); }) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to upsert item", throwable, this.responseDiagnosticsProcessor)); } /** * Deletes the item with id and partition key. * * @param containerName Container name of database * @param id item id * @param partitionKey the partition key */ @Override public Mono<Void> deleteById(String containerName, Object id, PartitionKey partitionKey) { return deleteById(containerName, id, partitionKey, new CosmosItemRequestOptions()); } private Mono<Void> deleteById(String containerName, Object id, PartitionKey partitionKey, CosmosItemRequestOptions cosmosItemRequestOptions) { Assert.hasText(containerName, "container name should not be null, empty or only whitespaces"); String idToDelete = CosmosUtils.getStringIDValue(id); if (partitionKey == null) { partitionKey = PartitionKey.NONE; } return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .deleteItem(idToDelete, partitionKey, cosmosItemRequestOptions) .publishOn(Schedulers.parallel()) .doOnNext(cosmosItemResponse -> CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemResponse.getDiagnostics(), null)) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to delete item", throwable, this.responseDiagnosticsProcessor)) .then(); } /** * Deletes the entity * * @param <T> type class of domain type * @param containerName Container name of database * @param entity the entity to delete * @return void Mono */ public <T> Mono<Void> deleteEntity(String containerName, T entity) { Assert.notNull(entity, "entity to be deleted should not be null"); @SuppressWarnings("unchecked") final Class<T> domainType = (Class<T>) entity.getClass(); final JsonNode originalItem = mappingCosmosConverter.writeJsonNode(entity); final CosmosItemRequestOptions options = new CosmosItemRequestOptions(); applyVersioning(entity.getClass(), originalItem, options); return deleteItem(originalItem, containerName, domainType).then(); } /** * Delete all items in a container * * @param containerName the container name * @param domainType the domainType * @return void Mono */ @Override public Mono<Void> deleteAll(@NonNull String containerName, @NonNull Class<?> domainType) { Assert.hasText(containerName, "container name should not be null, empty or only whitespaces"); final CosmosQuery query = new CosmosQuery(Criteria.getInstance(CriteriaType.ALL)); return this.delete(query, domainType, containerName).then(); } /** * Delete items matching query * * @param query the document query * @param domainType the entity class * @param containerName the container name * @return Mono */ @Override public <T> Flux<T> delete(CosmosQuery query, Class<T> domainType, String containerName) { Assert.notNull(query, "DocumentQuery should not be null."); Assert.notNull(domainType, "domainType should not be null."); Assert.hasText(containerName, "container name should not be null, empty or only whitespaces"); final Flux<JsonNode> results = findItems(query, containerName, domainType); return results.flatMap(d -> deleteItem(d, containerName, domainType)); } /** * Find items * * @param query the document query * @param domainType the entity class * @param containerName the container name * @return Flux with found items or error */ @Override public <T> Flux<T> find(CosmosQuery query, Class<T> domainType, String containerName) { return findItems(query, containerName, domainType) .map(cosmosItemProperties -> emitOnLoadEventAndConvertToDomainObject(domainType, cosmosItemProperties)); } /** * Exists * * @param query the document query * @param domainType the entity class * @param containerName the container name * @return Mono with a boolean or error */ @Override public Mono<Boolean> exists(CosmosQuery query, Class<?> domainType, String containerName) { return count(query, containerName).flatMap(count -> Mono.just(count > 0)); } /** * Exists * * @param id the id * @param domainType the entity class * @param containerName the container name * @return Mono with a boolean or error */ public Mono<Boolean> existsById(Object id, Class<?> domainType, String containerName) { return findById(containerName, id, domainType) .flatMap(o -> Mono.just(o != null)); } /** * Count * * @param containerName the container name * @return Mono with the count or error */ @Override public Mono<Long> count(String containerName) { final CosmosQuery query = new CosmosQuery(Criteria.getInstance(CriteriaType.ALL)); return count(query, containerName); } /** * Count * * @param query the document query * @param containerName the container name * @return Mono with count or error */ @Override public Mono<Long> count(CosmosQuery query, String containerName) { final SqlQuerySpec querySpec = new CountQueryGenerator().generateCosmos(query); return getCountValue(querySpec, containerName); } /** * Count * * @param querySpec the document query spec * @param containerName the container name * @return Mono with count or error */ @Override public Mono<Long> count(SqlQuerySpec querySpec, String containerName) { return getCountValue(querySpec, containerName); } @Override public MappingCosmosConverter getConverter() { return mappingCosmosConverter; } @Override public <T> Flux<T> runQuery(SqlQuerySpec querySpec, Class<?> domainType, Class<T> returnType) { return runQuery(querySpec, Sort.unsorted(), domainType, returnType); } @Override public <T> Flux<T> runQuery(SqlQuerySpec querySpec, Sort sort, Class<?> domainType, Class<T> returnType) { SqlQuerySpec sortedQuerySpec = NativeQueryGenerator.getInstance().generateSortedQuery(querySpec, sort); return runQuery(sortedQuerySpec, domainType) .map(cosmosItemProperties -> emitOnLoadEventAndConvertToDomainObject(returnType, cosmosItemProperties)); } private Flux<JsonNode> runQuery(SqlQuerySpec querySpec, Class<?> domainType) { String containerName = getContainerName(domainType); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .queryItems(querySpec, options, JsonNode.class) .byPage() .publishOn(Schedulers.parallel()) .flatMap(cosmosItemFeedResponse -> { CosmosUtils .fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemFeedResponse.getCosmosDiagnostics(), cosmosItemFeedResponse); return Flux.fromIterable(cosmosItemFeedResponse.getResults()); }) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to find items", throwable, this.responseDiagnosticsProcessor)); } private Mono<Long> getCountValue(SqlQuerySpec querySpec, String containerName) { final CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setQueryMetricsEnabled(this.queryMetricsEnabled); return executeQuery(querySpec, containerName, options) .doOnNext(feedResponse -> CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, feedResponse.getCosmosDiagnostics(), feedResponse)) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to get count value", throwable, this.responseDiagnosticsProcessor)) .next() .map(r -> r.getResults().get(0).asLong()); } private Flux<FeedResponse<JsonNode>> executeQuery(SqlQuerySpec sqlQuerySpec, String containerName, CosmosQueryRequestOptions options) { return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .queryItems(sqlQuerySpec, options, JsonNode.class) .byPage() .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to execute query", throwable, this.responseDiagnosticsProcessor)); } /** * Delete container with container name * * @param containerName the container name */ @Override public void deleteContainer(@NonNull String containerName) { Assert.hasText(containerName, "containerName should have text."); cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .delete() .doOnNext(cosmosContainerResponse -> CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosContainerResponse.getDiagnostics(), null)) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to delete container", throwable, this.responseDiagnosticsProcessor)) .block(); } /** * @param domainType the domain class * @return the container name */ public String getContainerName(Class<?> domainType) { Assert.notNull(domainType, "domainType should not be null"); return CosmosEntityInformation.getInstance(domainType).getContainerName(); } private void markAuditedIfConfigured(Object object) { if (cosmosAuditingHandler != null) { cosmosAuditingHandler.markAudited(object); } } private <T> Flux<JsonNode> findItems(@NonNull CosmosQuery query, @NonNull String containerName, @NonNull Class<T> domainType) { final SqlQuerySpec sqlQuerySpec = new FindQuerySpecGenerator().generateCosmos(query); final CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); cosmosQueryRequestOptions.setQueryMetricsEnabled(this.queryMetricsEnabled); Optional<Object> partitionKeyValue = query.getPartitionKeyValue(domainType); partitionKeyValue.ifPresent(o -> { LOGGER.debug("Setting partition key {}", o); cosmosQueryRequestOptions.setPartitionKey(new PartitionKey(o)); }); return cosmosAsyncClient .getDatabase(this.databaseName) .getContainer(containerName) .queryItems(sqlQuerySpec, cosmosQueryRequestOptions, JsonNode.class) .byPage() .publishOn(Schedulers.parallel()) .flatMap(cosmosItemFeedResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemFeedResponse.getCosmosDiagnostics(), cosmosItemFeedResponse); return Flux.fromIterable(cosmosItemFeedResponse.getResults()); }) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to query items", throwable, this.responseDiagnosticsProcessor)); } private <T> Mono<T> deleteItem(@NonNull JsonNode jsonNode, String containerName, @NonNull Class<T> domainType) { final CosmosItemRequestOptions options = new CosmosItemRequestOptions(); applyVersioning(domainType, jsonNode, options); return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .deleteItem(jsonNode, options) .publishOn(Schedulers.parallel()) .map(cosmosItemResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemResponse.getDiagnostics(), null); return cosmosItemResponse; }) .flatMap(objectCosmosItemResponse -> Mono.just(toDomainObject(domainType, jsonNode))) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to delete item", throwable, this.responseDiagnosticsProcessor)); } private <T> T emitOnLoadEventAndConvertToDomainObject(@NonNull Class<T> domainType, JsonNode responseJsonNode) { CosmosEntityInformation<?, ?> entityInformation = CosmosEntityInformation.getInstance(domainType); maybeEmitEvent(new AfterLoadEvent<>(responseJsonNode, domainType, entityInformation.getContainerName())); return toDomainObject(domainType, responseJsonNode); } private <T> T toDomainObject(@NonNull Class<T> domainType, JsonNode jsonNode) { return mappingCosmosConverter.read(domainType, jsonNode); } private void applyVersioning(Class<?> domainType, JsonNode jsonNode, CosmosItemRequestOptions options) { CosmosEntityInformation<?, ?> entityInformation = CosmosEntityInformation.getInstance(domainType); if (entityInformation.isVersioned()) { options.setIfMatchETag(jsonNode.get(Constants.ETAG_PROPERTY_DEFAULT_NAME).asText()); } } private void maybeEmitEvent(CosmosMappingEvent<?> event) { if (canPublishEvent()) { this.applicationContext.publishEvent(event); } } private boolean canPublishEvent() { return this.applicationContext != null; } }
Just a safety check, to make sure we don't override customer's unique key policy with our empty unique key policy.
public Mono<CosmosContainerResponse> createContainerIfNotExists(CosmosEntityInformation<?, ?> information) { return createDatabaseIfNotExists() .publishOn(Schedulers.parallel()) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to create database", throwable, this.responseDiagnosticsProcessor)) .flatMap(cosmosDatabaseResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosDatabaseResponse.getDiagnostics(), null); final CosmosContainerProperties cosmosContainerProperties = new CosmosContainerProperties(information.getContainerName(), information.getPartitionKeyPath()); cosmosContainerProperties.setDefaultTimeToLiveInSeconds(information.getTimeToLive()); cosmosContainerProperties.setIndexingPolicy(information.getIndexingPolicy()); final UniqueKeyPolicy uniqueKeyPolicy = information.getUniqueKeyPolicy(); if (uniqueKeyPolicy != null) { cosmosContainerProperties.setUniqueKeyPolicy(uniqueKeyPolicy); } CosmosAsyncDatabase database = cosmosAsyncClient.getDatabase(cosmosDatabaseResponse.getProperties().getId()); Mono<CosmosContainerResponse> cosmosContainerResponseMono; if (information.getRequestUnit() == null) { cosmosContainerResponseMono = database.createContainerIfNotExists(cosmosContainerProperties); } else { ThroughputProperties throughputProperties = information.isAutoScale() ? ThroughputProperties.createAutoscaledThroughput(information.getRequestUnit()) : ThroughputProperties.createManualThroughput(information.getRequestUnit()); cosmosContainerResponseMono = database.createContainerIfNotExists(cosmosContainerProperties, throughputProperties); } return cosmosContainerResponseMono .map(cosmosContainerResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosContainerResponse.getDiagnostics(), null); return cosmosContainerResponse; }) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to create container", throwable, this.responseDiagnosticsProcessor)); }); }
cosmosContainerProperties.setUniqueKeyPolicy(uniqueKeyPolicy);
public Mono<CosmosContainerResponse> createContainerIfNotExists(CosmosEntityInformation<?, ?> information) { return createDatabaseIfNotExists() .publishOn(Schedulers.parallel()) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to create database", throwable, this.responseDiagnosticsProcessor)) .flatMap(cosmosDatabaseResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosDatabaseResponse.getDiagnostics(), null); final CosmosContainerProperties cosmosContainerProperties = new CosmosContainerProperties(information.getContainerName(), information.getPartitionKeyPath()); cosmosContainerProperties.setDefaultTimeToLiveInSeconds(information.getTimeToLive()); cosmosContainerProperties.setIndexingPolicy(information.getIndexingPolicy()); final UniqueKeyPolicy uniqueKeyPolicy = information.getUniqueKeyPolicy(); if (uniqueKeyPolicy != null) { cosmosContainerProperties.setUniqueKeyPolicy(uniqueKeyPolicy); } CosmosAsyncDatabase database = cosmosAsyncClient.getDatabase(cosmosDatabaseResponse.getProperties().getId()); Mono<CosmosContainerResponse> cosmosContainerResponseMono; if (information.getRequestUnit() == null) { cosmosContainerResponseMono = database.createContainerIfNotExists(cosmosContainerProperties); } else { ThroughputProperties throughputProperties = information.isAutoScale() ? ThroughputProperties.createAutoscaledThroughput(information.getRequestUnit()) : ThroughputProperties.createManualThroughput(information.getRequestUnit()); cosmosContainerResponseMono = database.createContainerIfNotExists(cosmosContainerProperties, throughputProperties); } return cosmosContainerResponseMono .map(cosmosContainerResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosContainerResponse.getDiagnostics(), null); return cosmosContainerResponse; }) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to create container", throwable, this.responseDiagnosticsProcessor)); }); }
class ReactiveCosmosTemplate implements ReactiveCosmosOperations, ApplicationContextAware { private static final Logger LOGGER = LoggerFactory.getLogger(ReactiveCosmosTemplate.class); private final MappingCosmosConverter mappingCosmosConverter; private final String databaseName; private final ResponseDiagnosticsProcessor responseDiagnosticsProcessor; private final boolean queryMetricsEnabled; private final CosmosAsyncClient cosmosAsyncClient; private final IsNewAwareAuditingHandler cosmosAuditingHandler; private final DatabaseThroughputConfig databaseThroughputConfig; private ApplicationContext applicationContext; /** * Initialization * * @param client must not be {@literal null} * @param databaseName must not be {@literal null} * @param cosmosConfig must not be {@literal null} * @param mappingCosmosConverter must not be {@literal null} * @param cosmosAuditingHandler can be {@literal null} */ public ReactiveCosmosTemplate(CosmosAsyncClient client, String databaseName, CosmosConfig cosmosConfig, MappingCosmosConverter mappingCosmosConverter, IsNewAwareAuditingHandler cosmosAuditingHandler) { this(new CosmosFactory(client, databaseName), cosmosConfig, mappingCosmosConverter, cosmosAuditingHandler); } /** * Initialization * * @param client must not be {@literal null} * @param databaseName must not be {@literal null} * @param cosmosConfig must not be {@literal null} * @param mappingCosmosConverter must not be {@literal null} */ public ReactiveCosmosTemplate(CosmosAsyncClient client, String databaseName, CosmosConfig cosmosConfig, MappingCosmosConverter mappingCosmosConverter) { this(new CosmosFactory(client, databaseName), cosmosConfig, mappingCosmosConverter, null); } /** * Constructor * * @param cosmosFactory the cosmos db factory * @param cosmosConfig the cosmos config * @param mappingCosmosConverter the mappingCosmosConverter * @param cosmosAuditingHandler the auditing handler */ public ReactiveCosmosTemplate(CosmosFactory cosmosFactory, CosmosConfig cosmosConfig, MappingCosmosConverter mappingCosmosConverter, IsNewAwareAuditingHandler cosmosAuditingHandler) { Assert.notNull(cosmosFactory, "CosmosFactory must not be null!"); Assert.notNull(cosmosConfig, "CosmosConfig must not be null!"); Assert.notNull(mappingCosmosConverter, "MappingCosmosConverter must not be null!"); this.mappingCosmosConverter = mappingCosmosConverter; this.cosmosAsyncClient = cosmosFactory.getCosmosAsyncClient(); this.databaseName = cosmosFactory.getDatabaseName(); this.responseDiagnosticsProcessor = cosmosConfig.getResponseDiagnosticsProcessor(); this.queryMetricsEnabled = cosmosConfig.isQueryMetricsEnabled(); this.cosmosAuditingHandler = cosmosAuditingHandler; this.databaseThroughputConfig = cosmosConfig.getDatabaseThroughputConfig(); } /** * Initialization * * @param cosmosFactory must not be {@literal null} * @param cosmosConfig must not be {@literal null} * @param mappingCosmosConverter must not be {@literal null} */ public ReactiveCosmosTemplate(CosmosFactory cosmosFactory, CosmosConfig cosmosConfig, MappingCosmosConverter mappingCosmosConverter) { this(cosmosFactory, cosmosConfig, mappingCosmosConverter, null); } /** * @param applicationContext the application context * @throws BeansException the bean exception */ public void setApplicationContext(@NonNull ApplicationContext applicationContext) throws BeansException { this.applicationContext = applicationContext; } /** * Creates a container if it doesn't already exist * * @param information the CosmosEntityInformation * @return Mono containing CosmosContainerResponse */ @Override private Mono<CosmosDatabaseResponse> createDatabaseIfNotExists() { if (databaseThroughputConfig == null) { return cosmosAsyncClient .createDatabaseIfNotExists(this.databaseName); } else { ThroughputProperties throughputProperties = databaseThroughputConfig.isAutoScale() ? ThroughputProperties.createAutoscaledThroughput(databaseThroughputConfig.getRequestUnits()) : ThroughputProperties.createManualThroughput(databaseThroughputConfig.getRequestUnits()); return cosmosAsyncClient .createDatabaseIfNotExists(this.databaseName, throughputProperties); } } @Override public Mono<CosmosContainerProperties> getContainerProperties(String containerName) { return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .read() .map(CosmosContainerResponse::getProperties); } @Override public Mono<CosmosContainerProperties> replaceContainerProperties(String containerName, CosmosContainerProperties properties) { return this.cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .replace(properties) .map(CosmosContainerResponse::getProperties); } /** * * Find all items in a given container * * @param containerName the containerName * @param domainType the domainType * @return Flux with all the found items or error */ @Override public <T> Flux<T> findAll(String containerName, Class<T> domainType) { final CosmosQuery query = new CosmosQuery(Criteria.getInstance(CriteriaType.ALL)); return find(query, domainType, containerName); } /** * Find all items in a given container * * @param domainType the domainType * @return Flux with all the found items or error */ @Override public <T> Flux<T> findAll(Class<T> domainType) { return findAll(domainType.getSimpleName(), domainType); } @Override public <T> Flux<T> findAll(PartitionKey partitionKey, Class<T> domainType) { Assert.notNull(partitionKey, "partitionKey should not be null"); Assert.notNull(domainType, "domainType should not be null"); final String containerName = getContainerName(domainType); final CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); cosmosQueryRequestOptions.setPartitionKey(partitionKey); cosmosQueryRequestOptions.setQueryMetricsEnabled(this.queryMetricsEnabled); return cosmosAsyncClient .getDatabase(this.databaseName) .getContainer(containerName) .queryItems("SELECT * FROM r", cosmosQueryRequestOptions, JsonNode.class) .byPage() .publishOn(Schedulers.parallel()) .flatMap(cosmosItemFeedResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemFeedResponse.getCosmosDiagnostics(), cosmosItemFeedResponse); return Flux.fromIterable(cosmosItemFeedResponse.getResults()); }) .map(cosmosItemProperties -> emitOnLoadEventAndConvertToDomainObject(domainType, cosmosItemProperties)) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to find items", throwable, this.responseDiagnosticsProcessor)); } /** * Find by id * * @param id the id * @param domainType the domainType * @return Mono with the item or error */ @Override public <T> Mono<T> findById(Object id, Class<T> domainType) { Assert.notNull(domainType, "domainType should not be null"); return findById(getContainerName(domainType), id, domainType); } /** * Find by id * * @param containerName the container name * @param id the id * @param domainType the entity class * @return Mono with the item or error */ @Override public <T> Mono<T> findById(String containerName, Object id, Class<T> domainType) { Assert.hasText(containerName, "containerName should not be null, empty or only whitespaces"); Assert.notNull(domainType, "domainType should not be null"); final String query = "select * from root where root.id = @ROOT_ID"; final SqlParameter param = new SqlParameter("@ROOT_ID", CosmosUtils.getStringIDValue(id)); final SqlQuerySpec sqlQuerySpec = new SqlQuerySpec(query, param); final CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setQueryMetricsEnabled(this.queryMetricsEnabled); return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .queryItems(sqlQuerySpec, options, JsonNode.class) .byPage() .publishOn(Schedulers.parallel()) .flatMap(cosmosItemFeedResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemFeedResponse.getCosmosDiagnostics(), cosmosItemFeedResponse); return Mono.justOrEmpty(cosmosItemFeedResponse .getResults() .stream() .map(cosmosItem -> emitOnLoadEventAndConvertToDomainObject(domainType, cosmosItem)) .findFirst()); }) .onErrorResume(throwable -> CosmosExceptionUtils.findAPIExceptionHandler("Failed to find item", throwable, this.responseDiagnosticsProcessor)) .next(); } /** * Find by id * * @param id the id * @param domainType the entity class * @param partitionKey partition Key * @return Mono with the item or error */ @Override public <T> Mono<T> findById(Object id, Class<T> domainType, PartitionKey partitionKey) { Assert.notNull(domainType, "domainType should not be null"); String idToFind = CosmosUtils.getStringIDValue(id); final String containerName = getContainerName(domainType); return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .readItem(idToFind, partitionKey, JsonNode.class) .publishOn(Schedulers.parallel()) .flatMap(cosmosItemResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemResponse.getDiagnostics(), null); return Mono.justOrEmpty(emitOnLoadEventAndConvertToDomainObject(domainType, cosmosItemResponse.getItem())); }) .onErrorResume(throwable -> CosmosExceptionUtils.findAPIExceptionHandler("Failed to find item", throwable, this.responseDiagnosticsProcessor)); } /** * Insert * * @param <T> type of inserted objectToSave * @param objectToSave the object to save * @param partitionKey the partition key * @return Mono with the item or error */ public <T> Mono<T> insert(T objectToSave, PartitionKey partitionKey) { return insert(getContainerName(objectToSave.getClass()), objectToSave, partitionKey); } /** * Insert * * @param objectToSave the object to save * @param <T> type of inserted objectToSave * @return Mono with the item or error */ public <T> Mono<T> insert(T objectToSave) { return insert(getContainerName(objectToSave.getClass()), objectToSave, null); } /** * Insert * * @param <T> type of inserted objectToSave * @param containerName the container name * @param objectToSave the object to save * @param partitionKey the partition key * @return Mono with the item or error */ public <T> Mono<T> insert(String containerName, Object objectToSave, PartitionKey partitionKey) { Assert.hasText(containerName, "containerName should not be null, empty or only whitespaces"); Assert.notNull(objectToSave, "objectToSave should not be null"); final Class<T> domainType = (Class<T>) objectToSave.getClass(); markAuditedIfConfigured(objectToSave); generateIdIfNullAndAutoGenerationEnabled(objectToSave, domainType); final JsonNode originalItem = mappingCosmosConverter.writeJsonNode(objectToSave); final CosmosItemRequestOptions options = new CosmosItemRequestOptions(); return cosmosAsyncClient .getDatabase(this.databaseName) .getContainer(containerName) .createItem(originalItem, partitionKey, options) .publishOn(Schedulers.parallel()) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to insert item", throwable, this.responseDiagnosticsProcessor)) .flatMap(cosmosItemResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemResponse.getDiagnostics(), null); return Mono.just(toDomainObject(domainType, cosmosItemResponse.getItem())); }); } /** * Insert * * @param <T> type of inserted objectToSave * @param containerName the container name * @param objectToSave the object to save * @return Mono with the item or error */ @Override public <T> Mono<T> insert(String containerName, T objectToSave) { return insert(containerName, objectToSave, null); } @SuppressWarnings("unchecked") private <T> void generateIdIfNullAndAutoGenerationEnabled(T originalItem, Class<?> type) { CosmosEntityInformation<?, ?> entityInfo = CosmosEntityInformation.getInstance(type); if (entityInfo.shouldGenerateId() && ReflectionUtils.getField(entityInfo.getIdField(), originalItem) == null) { ReflectionUtils.setField(entityInfo.getIdField(), originalItem, UUID.randomUUID().toString()); } } /** * Upsert * * @param object the object to upsert * @return Mono with the item or error */ @Override public <T> Mono<T> upsert(T object) { return upsert(getContainerName(object.getClass()), object); } /** * Upsert * * @param containerName the container name * @param object the object to save * @return Mono with the item or error */ @Override public <T> Mono<T> upsert(String containerName, T object) { final Class<T> domainType = (Class<T>) object.getClass(); markAuditedIfConfigured(object); final JsonNode originalItem = mappingCosmosConverter.writeJsonNode(object); final CosmosItemRequestOptions options = new CosmosItemRequestOptions(); applyVersioning(object.getClass(), originalItem, options); return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .upsertItem(originalItem, options) .publishOn(Schedulers.parallel()) .flatMap(cosmosItemResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemResponse.getDiagnostics(), null); return Mono.just(toDomainObject(domainType, cosmosItemResponse.getItem())); }) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to upsert item", throwable, this.responseDiagnosticsProcessor)); } /** * Deletes the item with id and partition key. * * @param containerName Container name of database * @param id item id * @param partitionKey the partition key */ @Override public Mono<Void> deleteById(String containerName, Object id, PartitionKey partitionKey) { return deleteById(containerName, id, partitionKey, new CosmosItemRequestOptions()); } private Mono<Void> deleteById(String containerName, Object id, PartitionKey partitionKey, CosmosItemRequestOptions cosmosItemRequestOptions) { Assert.hasText(containerName, "container name should not be null, empty or only whitespaces"); String idToDelete = CosmosUtils.getStringIDValue(id); if (partitionKey == null) { partitionKey = PartitionKey.NONE; } return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .deleteItem(idToDelete, partitionKey, cosmosItemRequestOptions) .publishOn(Schedulers.parallel()) .doOnNext(cosmosItemResponse -> CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemResponse.getDiagnostics(), null)) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to delete item", throwable, this.responseDiagnosticsProcessor)) .then(); } /** * Deletes the entity * * @param <T> type class of domain type * @param containerName Container name of database * @param entity the entity to delete * @return void Mono */ public <T> Mono<Void> deleteEntity(String containerName, T entity) { Assert.notNull(entity, "entity to be deleted should not be null"); @SuppressWarnings("unchecked") final Class<T> domainType = (Class<T>) entity.getClass(); final JsonNode originalItem = mappingCosmosConverter.writeJsonNode(entity); final CosmosItemRequestOptions options = new CosmosItemRequestOptions(); applyVersioning(entity.getClass(), originalItem, options); return deleteItem(originalItem, containerName, domainType).then(); } /** * Delete all items in a container * * @param containerName the container name * @param domainType the domainType * @return void Mono */ @Override public Mono<Void> deleteAll(@NonNull String containerName, @NonNull Class<?> domainType) { Assert.hasText(containerName, "container name should not be null, empty or only whitespaces"); final CosmosQuery query = new CosmosQuery(Criteria.getInstance(CriteriaType.ALL)); return this.delete(query, domainType, containerName).then(); } /** * Delete items matching query * * @param query the document query * @param domainType the entity class * @param containerName the container name * @return Mono */ @Override public <T> Flux<T> delete(CosmosQuery query, Class<T> domainType, String containerName) { Assert.notNull(query, "DocumentQuery should not be null."); Assert.notNull(domainType, "domainType should not be null."); Assert.hasText(containerName, "container name should not be null, empty or only whitespaces"); final Flux<JsonNode> results = findItems(query, containerName, domainType); return results.flatMap(d -> deleteItem(d, containerName, domainType)); } /** * Find items * * @param query the document query * @param domainType the entity class * @param containerName the container name * @return Flux with found items or error */ @Override public <T> Flux<T> find(CosmosQuery query, Class<T> domainType, String containerName) { return findItems(query, containerName, domainType) .map(cosmosItemProperties -> emitOnLoadEventAndConvertToDomainObject(domainType, cosmosItemProperties)); } /** * Exists * * @param query the document query * @param domainType the entity class * @param containerName the container name * @return Mono with a boolean or error */ @Override public Mono<Boolean> exists(CosmosQuery query, Class<?> domainType, String containerName) { return count(query, containerName).flatMap(count -> Mono.just(count > 0)); } /** * Exists * * @param id the id * @param domainType the entity class * @param containerName the container name * @return Mono with a boolean or error */ public Mono<Boolean> existsById(Object id, Class<?> domainType, String containerName) { return findById(containerName, id, domainType) .flatMap(o -> Mono.just(o != null)); } /** * Count * * @param containerName the container name * @return Mono with the count or error */ @Override public Mono<Long> count(String containerName) { final CosmosQuery query = new CosmosQuery(Criteria.getInstance(CriteriaType.ALL)); return count(query, containerName); } /** * Count * * @param query the document query * @param containerName the container name * @return Mono with count or error */ @Override public Mono<Long> count(CosmosQuery query, String containerName) { final SqlQuerySpec querySpec = new CountQueryGenerator().generateCosmos(query); return getCountValue(querySpec, containerName); } /** * Count * * @param querySpec the document query spec * @param containerName the container name * @return Mono with count or error */ @Override public Mono<Long> count(SqlQuerySpec querySpec, String containerName) { return getCountValue(querySpec, containerName); } @Override public MappingCosmosConverter getConverter() { return mappingCosmosConverter; } @Override public <T> Flux<T> runQuery(SqlQuerySpec querySpec, Class<?> domainType, Class<T> returnType) { return runQuery(querySpec, Sort.unsorted(), domainType, returnType); } @Override public <T> Flux<T> runQuery(SqlQuerySpec querySpec, Sort sort, Class<?> domainType, Class<T> returnType) { SqlQuerySpec sortedQuerySpec = NativeQueryGenerator.getInstance().generateSortedQuery(querySpec, sort); return runQuery(sortedQuerySpec, domainType) .map(cosmosItemProperties -> emitOnLoadEventAndConvertToDomainObject(returnType, cosmosItemProperties)); } private Flux<JsonNode> runQuery(SqlQuerySpec querySpec, Class<?> domainType) { String containerName = getContainerName(domainType); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .queryItems(querySpec, options, JsonNode.class) .byPage() .publishOn(Schedulers.parallel()) .flatMap(cosmosItemFeedResponse -> { CosmosUtils .fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemFeedResponse.getCosmosDiagnostics(), cosmosItemFeedResponse); return Flux.fromIterable(cosmosItemFeedResponse.getResults()); }) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to find items", throwable, this.responseDiagnosticsProcessor)); } private Mono<Long> getCountValue(SqlQuerySpec querySpec, String containerName) { final CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setQueryMetricsEnabled(this.queryMetricsEnabled); return executeQuery(querySpec, containerName, options) .doOnNext(feedResponse -> CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, feedResponse.getCosmosDiagnostics(), feedResponse)) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to get count value", throwable, this.responseDiagnosticsProcessor)) .next() .map(r -> r.getResults().get(0).asLong()); } private Flux<FeedResponse<JsonNode>> executeQuery(SqlQuerySpec sqlQuerySpec, String containerName, CosmosQueryRequestOptions options) { return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .queryItems(sqlQuerySpec, options, JsonNode.class) .byPage() .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to execute query", throwable, this.responseDiagnosticsProcessor)); } /** * Delete container with container name * * @param containerName the container name */ @Override public void deleteContainer(@NonNull String containerName) { Assert.hasText(containerName, "containerName should have text."); cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .delete() .doOnNext(cosmosContainerResponse -> CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosContainerResponse.getDiagnostics(), null)) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to delete container", throwable, this.responseDiagnosticsProcessor)) .block(); } /** * @param domainType the domain class * @return the container name */ public String getContainerName(Class<?> domainType) { Assert.notNull(domainType, "domainType should not be null"); return CosmosEntityInformation.getInstance(domainType).getContainerName(); } private void markAuditedIfConfigured(Object object) { if (cosmosAuditingHandler != null) { cosmosAuditingHandler.markAudited(object); } } private <T> Flux<JsonNode> findItems(@NonNull CosmosQuery query, @NonNull String containerName, @NonNull Class<T> domainType) { final SqlQuerySpec sqlQuerySpec = new FindQuerySpecGenerator().generateCosmos(query); final CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); cosmosQueryRequestOptions.setQueryMetricsEnabled(this.queryMetricsEnabled); Optional<Object> partitionKeyValue = query.getPartitionKeyValue(domainType); partitionKeyValue.ifPresent(o -> { LOGGER.debug("Setting partition key {}", o); cosmosQueryRequestOptions.setPartitionKey(new PartitionKey(o)); }); return cosmosAsyncClient .getDatabase(this.databaseName) .getContainer(containerName) .queryItems(sqlQuerySpec, cosmosQueryRequestOptions, JsonNode.class) .byPage() .publishOn(Schedulers.parallel()) .flatMap(cosmosItemFeedResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemFeedResponse.getCosmosDiagnostics(), cosmosItemFeedResponse); return Flux.fromIterable(cosmosItemFeedResponse.getResults()); }) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to query items", throwable, this.responseDiagnosticsProcessor)); } private <T> Mono<T> deleteItem(@NonNull JsonNode jsonNode, String containerName, @NonNull Class<T> domainType) { final CosmosItemRequestOptions options = new CosmosItemRequestOptions(); applyVersioning(domainType, jsonNode, options); return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .deleteItem(jsonNode, options) .publishOn(Schedulers.parallel()) .map(cosmosItemResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemResponse.getDiagnostics(), null); return cosmosItemResponse; }) .flatMap(objectCosmosItemResponse -> Mono.just(toDomainObject(domainType, jsonNode))) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to delete item", throwable, this.responseDiagnosticsProcessor)); } private <T> T emitOnLoadEventAndConvertToDomainObject(@NonNull Class<T> domainType, JsonNode responseJsonNode) { CosmosEntityInformation<?, ?> entityInformation = CosmosEntityInformation.getInstance(domainType); maybeEmitEvent(new AfterLoadEvent<>(responseJsonNode, domainType, entityInformation.getContainerName())); return toDomainObject(domainType, responseJsonNode); } private <T> T toDomainObject(@NonNull Class<T> domainType, JsonNode jsonNode) { return mappingCosmosConverter.read(domainType, jsonNode); } private void applyVersioning(Class<?> domainType, JsonNode jsonNode, CosmosItemRequestOptions options) { CosmosEntityInformation<?, ?> entityInformation = CosmosEntityInformation.getInstance(domainType); if (entityInformation.isVersioned()) { options.setIfMatchETag(jsonNode.get(Constants.ETAG_PROPERTY_DEFAULT_NAME).asText()); } } private void maybeEmitEvent(CosmosMappingEvent<?> event) { if (canPublishEvent()) { this.applicationContext.publishEvent(event); } } private boolean canPublishEvent() { return this.applicationContext != null; } }
class ReactiveCosmosTemplate implements ReactiveCosmosOperations, ApplicationContextAware { private static final Logger LOGGER = LoggerFactory.getLogger(ReactiveCosmosTemplate.class); private final MappingCosmosConverter mappingCosmosConverter; private final String databaseName; private final ResponseDiagnosticsProcessor responseDiagnosticsProcessor; private final boolean queryMetricsEnabled; private final CosmosAsyncClient cosmosAsyncClient; private final IsNewAwareAuditingHandler cosmosAuditingHandler; private final DatabaseThroughputConfig databaseThroughputConfig; private ApplicationContext applicationContext; /** * Initialization * * @param client must not be {@literal null} * @param databaseName must not be {@literal null} * @param cosmosConfig must not be {@literal null} * @param mappingCosmosConverter must not be {@literal null} * @param cosmosAuditingHandler can be {@literal null} */ public ReactiveCosmosTemplate(CosmosAsyncClient client, String databaseName, CosmosConfig cosmosConfig, MappingCosmosConverter mappingCosmosConverter, IsNewAwareAuditingHandler cosmosAuditingHandler) { this(new CosmosFactory(client, databaseName), cosmosConfig, mappingCosmosConverter, cosmosAuditingHandler); } /** * Initialization * * @param client must not be {@literal null} * @param databaseName must not be {@literal null} * @param cosmosConfig must not be {@literal null} * @param mappingCosmosConverter must not be {@literal null} */ public ReactiveCosmosTemplate(CosmosAsyncClient client, String databaseName, CosmosConfig cosmosConfig, MappingCosmosConverter mappingCosmosConverter) { this(new CosmosFactory(client, databaseName), cosmosConfig, mappingCosmosConverter, null); } /** * Constructor * * @param cosmosFactory the cosmos db factory * @param cosmosConfig the cosmos config * @param mappingCosmosConverter the mappingCosmosConverter * @param cosmosAuditingHandler the auditing handler */ public ReactiveCosmosTemplate(CosmosFactory cosmosFactory, CosmosConfig cosmosConfig, MappingCosmosConverter mappingCosmosConverter, IsNewAwareAuditingHandler cosmosAuditingHandler) { Assert.notNull(cosmosFactory, "CosmosFactory must not be null!"); Assert.notNull(cosmosConfig, "CosmosConfig must not be null!"); Assert.notNull(mappingCosmosConverter, "MappingCosmosConverter must not be null!"); this.mappingCosmosConverter = mappingCosmosConverter; this.cosmosAsyncClient = cosmosFactory.getCosmosAsyncClient(); this.databaseName = cosmosFactory.getDatabaseName(); this.responseDiagnosticsProcessor = cosmosConfig.getResponseDiagnosticsProcessor(); this.queryMetricsEnabled = cosmosConfig.isQueryMetricsEnabled(); this.cosmosAuditingHandler = cosmosAuditingHandler; this.databaseThroughputConfig = cosmosConfig.getDatabaseThroughputConfig(); } /** * Initialization * * @param cosmosFactory must not be {@literal null} * @param cosmosConfig must not be {@literal null} * @param mappingCosmosConverter must not be {@literal null} */ public ReactiveCosmosTemplate(CosmosFactory cosmosFactory, CosmosConfig cosmosConfig, MappingCosmosConverter mappingCosmosConverter) { this(cosmosFactory, cosmosConfig, mappingCosmosConverter, null); } /** * @param applicationContext the application context * @throws BeansException the bean exception */ public void setApplicationContext(@NonNull ApplicationContext applicationContext) throws BeansException { this.applicationContext = applicationContext; } /** * Creates a container if it doesn't already exist * * @param information the CosmosEntityInformation * @return Mono containing CosmosContainerResponse */ @Override private Mono<CosmosDatabaseResponse> createDatabaseIfNotExists() { if (databaseThroughputConfig == null) { return cosmosAsyncClient .createDatabaseIfNotExists(this.databaseName); } else { ThroughputProperties throughputProperties = databaseThroughputConfig.isAutoScale() ? ThroughputProperties.createAutoscaledThroughput(databaseThroughputConfig.getRequestUnits()) : ThroughputProperties.createManualThroughput(databaseThroughputConfig.getRequestUnits()); return cosmosAsyncClient .createDatabaseIfNotExists(this.databaseName, throughputProperties); } } @Override public Mono<CosmosContainerProperties> getContainerProperties(String containerName) { return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .read() .map(CosmosContainerResponse::getProperties); } @Override public Mono<CosmosContainerProperties> replaceContainerProperties(String containerName, CosmosContainerProperties properties) { return this.cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .replace(properties) .map(CosmosContainerResponse::getProperties); } /** * * Find all items in a given container * * @param containerName the containerName * @param domainType the domainType * @return Flux with all the found items or error */ @Override public <T> Flux<T> findAll(String containerName, Class<T> domainType) { final CosmosQuery query = new CosmosQuery(Criteria.getInstance(CriteriaType.ALL)); return find(query, domainType, containerName); } /** * Find all items in a given container * * @param domainType the domainType * @return Flux with all the found items or error */ @Override public <T> Flux<T> findAll(Class<T> domainType) { return findAll(domainType.getSimpleName(), domainType); } @Override public <T> Flux<T> findAll(PartitionKey partitionKey, Class<T> domainType) { Assert.notNull(partitionKey, "partitionKey should not be null"); Assert.notNull(domainType, "domainType should not be null"); final String containerName = getContainerName(domainType); final CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); cosmosQueryRequestOptions.setPartitionKey(partitionKey); cosmosQueryRequestOptions.setQueryMetricsEnabled(this.queryMetricsEnabled); return cosmosAsyncClient .getDatabase(this.databaseName) .getContainer(containerName) .queryItems("SELECT * FROM r", cosmosQueryRequestOptions, JsonNode.class) .byPage() .publishOn(Schedulers.parallel()) .flatMap(cosmosItemFeedResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemFeedResponse.getCosmosDiagnostics(), cosmosItemFeedResponse); return Flux.fromIterable(cosmosItemFeedResponse.getResults()); }) .map(cosmosItemProperties -> emitOnLoadEventAndConvertToDomainObject(domainType, cosmosItemProperties)) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to find items", throwable, this.responseDiagnosticsProcessor)); } /** * Find by id * * @param id the id * @param domainType the domainType * @return Mono with the item or error */ @Override public <T> Mono<T> findById(Object id, Class<T> domainType) { Assert.notNull(domainType, "domainType should not be null"); return findById(getContainerName(domainType), id, domainType); } /** * Find by id * * @param containerName the container name * @param id the id * @param domainType the entity class * @return Mono with the item or error */ @Override public <T> Mono<T> findById(String containerName, Object id, Class<T> domainType) { Assert.hasText(containerName, "containerName should not be null, empty or only whitespaces"); Assert.notNull(domainType, "domainType should not be null"); final String query = "select * from root where root.id = @ROOT_ID"; final SqlParameter param = new SqlParameter("@ROOT_ID", CosmosUtils.getStringIDValue(id)); final SqlQuerySpec sqlQuerySpec = new SqlQuerySpec(query, param); final CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setQueryMetricsEnabled(this.queryMetricsEnabled); return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .queryItems(sqlQuerySpec, options, JsonNode.class) .byPage() .publishOn(Schedulers.parallel()) .flatMap(cosmosItemFeedResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemFeedResponse.getCosmosDiagnostics(), cosmosItemFeedResponse); return Mono.justOrEmpty(cosmosItemFeedResponse .getResults() .stream() .map(cosmosItem -> emitOnLoadEventAndConvertToDomainObject(domainType, cosmosItem)) .findFirst()); }) .onErrorResume(throwable -> CosmosExceptionUtils.findAPIExceptionHandler("Failed to find item", throwable, this.responseDiagnosticsProcessor)) .next(); } /** * Find by id * * @param id the id * @param domainType the entity class * @param partitionKey partition Key * @return Mono with the item or error */ @Override public <T> Mono<T> findById(Object id, Class<T> domainType, PartitionKey partitionKey) { Assert.notNull(domainType, "domainType should not be null"); String idToFind = CosmosUtils.getStringIDValue(id); final String containerName = getContainerName(domainType); return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .readItem(idToFind, partitionKey, JsonNode.class) .publishOn(Schedulers.parallel()) .flatMap(cosmosItemResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemResponse.getDiagnostics(), null); return Mono.justOrEmpty(emitOnLoadEventAndConvertToDomainObject(domainType, cosmosItemResponse.getItem())); }) .onErrorResume(throwable -> CosmosExceptionUtils.findAPIExceptionHandler("Failed to find item", throwable, this.responseDiagnosticsProcessor)); } /** * Insert * * @param <T> type of inserted objectToSave * @param objectToSave the object to save * @param partitionKey the partition key * @return Mono with the item or error */ public <T> Mono<T> insert(T objectToSave, PartitionKey partitionKey) { return insert(getContainerName(objectToSave.getClass()), objectToSave, partitionKey); } /** * Insert * * @param objectToSave the object to save * @param <T> type of inserted objectToSave * @return Mono with the item or error */ public <T> Mono<T> insert(T objectToSave) { return insert(getContainerName(objectToSave.getClass()), objectToSave, null); } /** * Insert * * @param <T> type of inserted objectToSave * @param containerName the container name * @param objectToSave the object to save * @param partitionKey the partition key * @return Mono with the item or error */ public <T> Mono<T> insert(String containerName, Object objectToSave, PartitionKey partitionKey) { Assert.hasText(containerName, "containerName should not be null, empty or only whitespaces"); Assert.notNull(objectToSave, "objectToSave should not be null"); final Class<T> domainType = (Class<T>) objectToSave.getClass(); markAuditedIfConfigured(objectToSave); generateIdIfNullAndAutoGenerationEnabled(objectToSave, domainType); final JsonNode originalItem = mappingCosmosConverter.writeJsonNode(objectToSave); final CosmosItemRequestOptions options = new CosmosItemRequestOptions(); return cosmosAsyncClient .getDatabase(this.databaseName) .getContainer(containerName) .createItem(originalItem, partitionKey, options) .publishOn(Schedulers.parallel()) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to insert item", throwable, this.responseDiagnosticsProcessor)) .flatMap(cosmosItemResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemResponse.getDiagnostics(), null); return Mono.just(toDomainObject(domainType, cosmosItemResponse.getItem())); }); } /** * Insert * * @param <T> type of inserted objectToSave * @param containerName the container name * @param objectToSave the object to save * @return Mono with the item or error */ @Override public <T> Mono<T> insert(String containerName, T objectToSave) { return insert(containerName, objectToSave, null); } @SuppressWarnings("unchecked") private <T> void generateIdIfNullAndAutoGenerationEnabled(T originalItem, Class<?> type) { CosmosEntityInformation<?, ?> entityInfo = CosmosEntityInformation.getInstance(type); if (entityInfo.shouldGenerateId() && ReflectionUtils.getField(entityInfo.getIdField(), originalItem) == null) { ReflectionUtils.setField(entityInfo.getIdField(), originalItem, UUID.randomUUID().toString()); } } /** * Upsert * * @param object the object to upsert * @return Mono with the item or error */ @Override public <T> Mono<T> upsert(T object) { return upsert(getContainerName(object.getClass()), object); } /** * Upsert * * @param containerName the container name * @param object the object to save * @return Mono with the item or error */ @Override public <T> Mono<T> upsert(String containerName, T object) { final Class<T> domainType = (Class<T>) object.getClass(); markAuditedIfConfigured(object); final JsonNode originalItem = mappingCosmosConverter.writeJsonNode(object); final CosmosItemRequestOptions options = new CosmosItemRequestOptions(); applyVersioning(object.getClass(), originalItem, options); return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .upsertItem(originalItem, options) .publishOn(Schedulers.parallel()) .flatMap(cosmosItemResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemResponse.getDiagnostics(), null); return Mono.just(toDomainObject(domainType, cosmosItemResponse.getItem())); }) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to upsert item", throwable, this.responseDiagnosticsProcessor)); } /** * Deletes the item with id and partition key. * * @param containerName Container name of database * @param id item id * @param partitionKey the partition key */ @Override public Mono<Void> deleteById(String containerName, Object id, PartitionKey partitionKey) { return deleteById(containerName, id, partitionKey, new CosmosItemRequestOptions()); } private Mono<Void> deleteById(String containerName, Object id, PartitionKey partitionKey, CosmosItemRequestOptions cosmosItemRequestOptions) { Assert.hasText(containerName, "container name should not be null, empty or only whitespaces"); String idToDelete = CosmosUtils.getStringIDValue(id); if (partitionKey == null) { partitionKey = PartitionKey.NONE; } return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .deleteItem(idToDelete, partitionKey, cosmosItemRequestOptions) .publishOn(Schedulers.parallel()) .doOnNext(cosmosItemResponse -> CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemResponse.getDiagnostics(), null)) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to delete item", throwable, this.responseDiagnosticsProcessor)) .then(); } /** * Deletes the entity * * @param <T> type class of domain type * @param containerName Container name of database * @param entity the entity to delete * @return void Mono */ public <T> Mono<Void> deleteEntity(String containerName, T entity) { Assert.notNull(entity, "entity to be deleted should not be null"); @SuppressWarnings("unchecked") final Class<T> domainType = (Class<T>) entity.getClass(); final JsonNode originalItem = mappingCosmosConverter.writeJsonNode(entity); final CosmosItemRequestOptions options = new CosmosItemRequestOptions(); applyVersioning(entity.getClass(), originalItem, options); return deleteItem(originalItem, containerName, domainType).then(); } /** * Delete all items in a container * * @param containerName the container name * @param domainType the domainType * @return void Mono */ @Override public Mono<Void> deleteAll(@NonNull String containerName, @NonNull Class<?> domainType) { Assert.hasText(containerName, "container name should not be null, empty or only whitespaces"); final CosmosQuery query = new CosmosQuery(Criteria.getInstance(CriteriaType.ALL)); return this.delete(query, domainType, containerName).then(); } /** * Delete items matching query * * @param query the document query * @param domainType the entity class * @param containerName the container name * @return Mono */ @Override public <T> Flux<T> delete(CosmosQuery query, Class<T> domainType, String containerName) { Assert.notNull(query, "DocumentQuery should not be null."); Assert.notNull(domainType, "domainType should not be null."); Assert.hasText(containerName, "container name should not be null, empty or only whitespaces"); final Flux<JsonNode> results = findItems(query, containerName, domainType); return results.flatMap(d -> deleteItem(d, containerName, domainType)); } /** * Find items * * @param query the document query * @param domainType the entity class * @param containerName the container name * @return Flux with found items or error */ @Override public <T> Flux<T> find(CosmosQuery query, Class<T> domainType, String containerName) { return findItems(query, containerName, domainType) .map(cosmosItemProperties -> emitOnLoadEventAndConvertToDomainObject(domainType, cosmosItemProperties)); } /** * Exists * * @param query the document query * @param domainType the entity class * @param containerName the container name * @return Mono with a boolean or error */ @Override public Mono<Boolean> exists(CosmosQuery query, Class<?> domainType, String containerName) { return count(query, containerName).flatMap(count -> Mono.just(count > 0)); } /** * Exists * * @param id the id * @param domainType the entity class * @param containerName the container name * @return Mono with a boolean or error */ public Mono<Boolean> existsById(Object id, Class<?> domainType, String containerName) { return findById(containerName, id, domainType) .flatMap(o -> Mono.just(o != null)); } /** * Count * * @param containerName the container name * @return Mono with the count or error */ @Override public Mono<Long> count(String containerName) { final CosmosQuery query = new CosmosQuery(Criteria.getInstance(CriteriaType.ALL)); return count(query, containerName); } /** * Count * * @param query the document query * @param containerName the container name * @return Mono with count or error */ @Override public Mono<Long> count(CosmosQuery query, String containerName) { final SqlQuerySpec querySpec = new CountQueryGenerator().generateCosmos(query); return getCountValue(querySpec, containerName); } /** * Count * * @param querySpec the document query spec * @param containerName the container name * @return Mono with count or error */ @Override public Mono<Long> count(SqlQuerySpec querySpec, String containerName) { return getCountValue(querySpec, containerName); } @Override public MappingCosmosConverter getConverter() { return mappingCosmosConverter; } @Override public <T> Flux<T> runQuery(SqlQuerySpec querySpec, Class<?> domainType, Class<T> returnType) { return runQuery(querySpec, Sort.unsorted(), domainType, returnType); } @Override public <T> Flux<T> runQuery(SqlQuerySpec querySpec, Sort sort, Class<?> domainType, Class<T> returnType) { SqlQuerySpec sortedQuerySpec = NativeQueryGenerator.getInstance().generateSortedQuery(querySpec, sort); return runQuery(sortedQuerySpec, domainType) .map(cosmosItemProperties -> emitOnLoadEventAndConvertToDomainObject(returnType, cosmosItemProperties)); } private Flux<JsonNode> runQuery(SqlQuerySpec querySpec, Class<?> domainType) { String containerName = getContainerName(domainType); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .queryItems(querySpec, options, JsonNode.class) .byPage() .publishOn(Schedulers.parallel()) .flatMap(cosmosItemFeedResponse -> { CosmosUtils .fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemFeedResponse.getCosmosDiagnostics(), cosmosItemFeedResponse); return Flux.fromIterable(cosmosItemFeedResponse.getResults()); }) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to find items", throwable, this.responseDiagnosticsProcessor)); } private Mono<Long> getCountValue(SqlQuerySpec querySpec, String containerName) { final CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setQueryMetricsEnabled(this.queryMetricsEnabled); return executeQuery(querySpec, containerName, options) .doOnNext(feedResponse -> CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, feedResponse.getCosmosDiagnostics(), feedResponse)) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to get count value", throwable, this.responseDiagnosticsProcessor)) .next() .map(r -> r.getResults().get(0).asLong()); } private Flux<FeedResponse<JsonNode>> executeQuery(SqlQuerySpec sqlQuerySpec, String containerName, CosmosQueryRequestOptions options) { return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .queryItems(sqlQuerySpec, options, JsonNode.class) .byPage() .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to execute query", throwable, this.responseDiagnosticsProcessor)); } /** * Delete container with container name * * @param containerName the container name */ @Override public void deleteContainer(@NonNull String containerName) { Assert.hasText(containerName, "containerName should have text."); cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .delete() .doOnNext(cosmosContainerResponse -> CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosContainerResponse.getDiagnostics(), null)) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to delete container", throwable, this.responseDiagnosticsProcessor)) .block(); } /** * @param domainType the domain class * @return the container name */ public String getContainerName(Class<?> domainType) { Assert.notNull(domainType, "domainType should not be null"); return CosmosEntityInformation.getInstance(domainType).getContainerName(); } private void markAuditedIfConfigured(Object object) { if (cosmosAuditingHandler != null) { cosmosAuditingHandler.markAudited(object); } } private <T> Flux<JsonNode> findItems(@NonNull CosmosQuery query, @NonNull String containerName, @NonNull Class<T> domainType) { final SqlQuerySpec sqlQuerySpec = new FindQuerySpecGenerator().generateCosmos(query); final CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); cosmosQueryRequestOptions.setQueryMetricsEnabled(this.queryMetricsEnabled); Optional<Object> partitionKeyValue = query.getPartitionKeyValue(domainType); partitionKeyValue.ifPresent(o -> { LOGGER.debug("Setting partition key {}", o); cosmosQueryRequestOptions.setPartitionKey(new PartitionKey(o)); }); return cosmosAsyncClient .getDatabase(this.databaseName) .getContainer(containerName) .queryItems(sqlQuerySpec, cosmosQueryRequestOptions, JsonNode.class) .byPage() .publishOn(Schedulers.parallel()) .flatMap(cosmosItemFeedResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemFeedResponse.getCosmosDiagnostics(), cosmosItemFeedResponse); return Flux.fromIterable(cosmosItemFeedResponse.getResults()); }) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to query items", throwable, this.responseDiagnosticsProcessor)); } private <T> Mono<T> deleteItem(@NonNull JsonNode jsonNode, String containerName, @NonNull Class<T> domainType) { final CosmosItemRequestOptions options = new CosmosItemRequestOptions(); applyVersioning(domainType, jsonNode, options); return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .deleteItem(jsonNode, options) .publishOn(Schedulers.parallel()) .map(cosmosItemResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemResponse.getDiagnostics(), null); return cosmosItemResponse; }) .flatMap(objectCosmosItemResponse -> Mono.just(toDomainObject(domainType, jsonNode))) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to delete item", throwable, this.responseDiagnosticsProcessor)); } private <T> T emitOnLoadEventAndConvertToDomainObject(@NonNull Class<T> domainType, JsonNode responseJsonNode) { CosmosEntityInformation<?, ?> entityInformation = CosmosEntityInformation.getInstance(domainType); maybeEmitEvent(new AfterLoadEvent<>(responseJsonNode, domainType, entityInformation.getContainerName())); return toDomainObject(domainType, responseJsonNode); } private <T> T toDomainObject(@NonNull Class<T> domainType, JsonNode jsonNode) { return mappingCosmosConverter.read(domainType, jsonNode); } private void applyVersioning(Class<?> domainType, JsonNode jsonNode, CosmosItemRequestOptions options) { CosmosEntityInformation<?, ?> entityInformation = CosmosEntityInformation.getInstance(domainType); if (entityInformation.isVersioned()) { options.setIfMatchETag(jsonNode.get(Constants.ETAG_PROPERTY_DEFAULT_NAME).asText()); } } private void maybeEmitEvent(CosmosMappingEvent<?> event) { if (canPublishEvent()) { this.applicationContext.publishEvent(event); } } private boolean canPublishEvent() { return this.applicationContext != null; } }
what if customer already defined some uniqueKey policy on the container? Will this override what already defined? (just thinking about whether it will cause the same problem happened to indexPolicy sometime back:https://github.com/Azure/azure-sdk-for-java/issues/20330)
public CosmosContainerProperties createContainerIfNotExists(CosmosEntityInformation<?, ?> information) { final CosmosContainerResponse response = createDatabaseIfNotExists() .publishOn(Schedulers.parallel()) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to create database", throwable, this.responseDiagnosticsProcessor)) .flatMap(cosmosDatabaseResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosDatabaseResponse.getDiagnostics(), null); final CosmosContainerProperties cosmosContainerProperties = new CosmosContainerProperties(information.getContainerName(), information.getPartitionKeyPath()); cosmosContainerProperties.setDefaultTimeToLiveInSeconds(information.getTimeToLive()); cosmosContainerProperties.setIndexingPolicy(information.getIndexingPolicy()); final UniqueKeyPolicy uniqueKeyPolicy = information.getUniqueKeyPolicy(); if (uniqueKeyPolicy != null) { cosmosContainerProperties.setUniqueKeyPolicy(uniqueKeyPolicy); } CosmosAsyncDatabase cosmosAsyncDatabase = cosmosAsyncClient .getDatabase(cosmosDatabaseResponse.getProperties().getId()); Mono<CosmosContainerResponse> cosmosContainerResponseMono; if (information.getRequestUnit() == null) { cosmosContainerResponseMono = cosmosAsyncDatabase.createContainerIfNotExists(cosmosContainerProperties); } else { ThroughputProperties throughputProperties = information.isAutoScale() ? ThroughputProperties.createAutoscaledThroughput(information.getRequestUnit()) : ThroughputProperties.createManualThroughput(information.getRequestUnit()); cosmosContainerResponseMono = cosmosAsyncDatabase.createContainerIfNotExists(cosmosContainerProperties, throughputProperties); } return cosmosContainerResponseMono .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to create container", throwable, this.responseDiagnosticsProcessor)) .doOnNext(cosmosContainerResponse -> CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosContainerResponse.getDiagnostics(), null)); }) .block(); assert response != null; return response.getProperties(); }
cosmosContainerProperties.setUniqueKeyPolicy(uniqueKeyPolicy);
public CosmosContainerProperties createContainerIfNotExists(CosmosEntityInformation<?, ?> information) { final CosmosContainerResponse response = createDatabaseIfNotExists() .publishOn(Schedulers.parallel()) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to create database", throwable, this.responseDiagnosticsProcessor)) .flatMap(cosmosDatabaseResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosDatabaseResponse.getDiagnostics(), null); final CosmosContainerProperties cosmosContainerProperties = new CosmosContainerProperties(information.getContainerName(), information.getPartitionKeyPath()); cosmosContainerProperties.setDefaultTimeToLiveInSeconds(information.getTimeToLive()); cosmosContainerProperties.setIndexingPolicy(information.getIndexingPolicy()); final UniqueKeyPolicy uniqueKeyPolicy = information.getUniqueKeyPolicy(); if (uniqueKeyPolicy != null) { cosmosContainerProperties.setUniqueKeyPolicy(uniqueKeyPolicy); } CosmosAsyncDatabase cosmosAsyncDatabase = cosmosAsyncClient .getDatabase(cosmosDatabaseResponse.getProperties().getId()); Mono<CosmosContainerResponse> cosmosContainerResponseMono; if (information.getRequestUnit() == null) { cosmosContainerResponseMono = cosmosAsyncDatabase.createContainerIfNotExists(cosmosContainerProperties); } else { ThroughputProperties throughputProperties = information.isAutoScale() ? ThroughputProperties.createAutoscaledThroughput(information.getRequestUnit()) : ThroughputProperties.createManualThroughput(information.getRequestUnit()); cosmosContainerResponseMono = cosmosAsyncDatabase.createContainerIfNotExists(cosmosContainerProperties, throughputProperties); } return cosmosContainerResponseMono .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to create container", throwable, this.responseDiagnosticsProcessor)) .doOnNext(cosmosContainerResponse -> CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosContainerResponse.getDiagnostics(), null)); }) .block(); assert response != null; return response.getProperties(); }
class type of domain * @return found results in a List */ public <T> Iterable<T> findAll(String containerName, final Class<T> domainType) { Assert.hasText(containerName, "containerName should not be null, empty or only whitespaces"); Assert.notNull(domainType, "domainType should not be null"); final CosmosQuery query = new CosmosQuery(Criteria.getInstance(CriteriaType.ALL)); return findItems(query, containerName, domainType); }
class type of domain * @return found results in a List */ public <T> Iterable<T> findAll(String containerName, final Class<T> domainType) { Assert.hasText(containerName, "containerName should not be null, empty or only whitespaces"); Assert.notNull(domainType, "domainType should not be null"); final CosmosQuery query = new CosmosQuery(Criteria.getInstance(CriteriaType.ALL)); return findItems(query, containerName, domainType); }
If the customer already defined uniqueKey policy, then they won't use any annotation. Annotation is supposed to take precedence on their existing unique key policy. That's why the null check here :)
public CosmosContainerProperties createContainerIfNotExists(CosmosEntityInformation<?, ?> information) { final CosmosContainerResponse response = createDatabaseIfNotExists() .publishOn(Schedulers.parallel()) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to create database", throwable, this.responseDiagnosticsProcessor)) .flatMap(cosmosDatabaseResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosDatabaseResponse.getDiagnostics(), null); final CosmosContainerProperties cosmosContainerProperties = new CosmosContainerProperties(information.getContainerName(), information.getPartitionKeyPath()); cosmosContainerProperties.setDefaultTimeToLiveInSeconds(information.getTimeToLive()); cosmosContainerProperties.setIndexingPolicy(information.getIndexingPolicy()); final UniqueKeyPolicy uniqueKeyPolicy = information.getUniqueKeyPolicy(); if (uniqueKeyPolicy != null) { cosmosContainerProperties.setUniqueKeyPolicy(uniqueKeyPolicy); } CosmosAsyncDatabase cosmosAsyncDatabase = cosmosAsyncClient .getDatabase(cosmosDatabaseResponse.getProperties().getId()); Mono<CosmosContainerResponse> cosmosContainerResponseMono; if (information.getRequestUnit() == null) { cosmosContainerResponseMono = cosmosAsyncDatabase.createContainerIfNotExists(cosmosContainerProperties); } else { ThroughputProperties throughputProperties = information.isAutoScale() ? ThroughputProperties.createAutoscaledThroughput(information.getRequestUnit()) : ThroughputProperties.createManualThroughput(information.getRequestUnit()); cosmosContainerResponseMono = cosmosAsyncDatabase.createContainerIfNotExists(cosmosContainerProperties, throughputProperties); } return cosmosContainerResponseMono .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to create container", throwable, this.responseDiagnosticsProcessor)) .doOnNext(cosmosContainerResponse -> CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosContainerResponse.getDiagnostics(), null)); }) .block(); assert response != null; return response.getProperties(); }
cosmosContainerProperties.setUniqueKeyPolicy(uniqueKeyPolicy);
public CosmosContainerProperties createContainerIfNotExists(CosmosEntityInformation<?, ?> information) { final CosmosContainerResponse response = createDatabaseIfNotExists() .publishOn(Schedulers.parallel()) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to create database", throwable, this.responseDiagnosticsProcessor)) .flatMap(cosmosDatabaseResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosDatabaseResponse.getDiagnostics(), null); final CosmosContainerProperties cosmosContainerProperties = new CosmosContainerProperties(information.getContainerName(), information.getPartitionKeyPath()); cosmosContainerProperties.setDefaultTimeToLiveInSeconds(information.getTimeToLive()); cosmosContainerProperties.setIndexingPolicy(information.getIndexingPolicy()); final UniqueKeyPolicy uniqueKeyPolicy = information.getUniqueKeyPolicy(); if (uniqueKeyPolicy != null) { cosmosContainerProperties.setUniqueKeyPolicy(uniqueKeyPolicy); } CosmosAsyncDatabase cosmosAsyncDatabase = cosmosAsyncClient .getDatabase(cosmosDatabaseResponse.getProperties().getId()); Mono<CosmosContainerResponse> cosmosContainerResponseMono; if (information.getRequestUnit() == null) { cosmosContainerResponseMono = cosmosAsyncDatabase.createContainerIfNotExists(cosmosContainerProperties); } else { ThroughputProperties throughputProperties = information.isAutoScale() ? ThroughputProperties.createAutoscaledThroughput(information.getRequestUnit()) : ThroughputProperties.createManualThroughput(information.getRequestUnit()); cosmosContainerResponseMono = cosmosAsyncDatabase.createContainerIfNotExists(cosmosContainerProperties, throughputProperties); } return cosmosContainerResponseMono .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to create container", throwable, this.responseDiagnosticsProcessor)) .doOnNext(cosmosContainerResponse -> CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosContainerResponse.getDiagnostics(), null)); }) .block(); assert response != null; return response.getProperties(); }
class type of domain * @return found results in a List */ public <T> Iterable<T> findAll(String containerName, final Class<T> domainType) { Assert.hasText(containerName, "containerName should not be null, empty or only whitespaces"); Assert.notNull(domainType, "domainType should not be null"); final CosmosQuery query = new CosmosQuery(Criteria.getInstance(CriteriaType.ALL)); return findItems(query, containerName, domainType); }
class type of domain * @return found results in a List */ public <T> Iterable<T> findAll(String containerName, final Class<T> domainType) { Assert.hasText(containerName, "containerName should not be null, empty or only whitespaces"); Assert.notNull(domainType, "domainType should not be null"); final CosmosQuery query = new CosmosQuery(Criteria.getInstance(CriteriaType.ALL)); return findItems(query, containerName, domainType); }
If you see this PR - https://github.com/Azure/azure-sdk-for-java/pull/22246 which fixed the indexing policy override. Same intention here to add null check and only apply the indexing policy if it is not null. If you see `CosmosEntityInformation`, I am returning null if unique key policy is not specified or is empty.
public CosmosContainerProperties createContainerIfNotExists(CosmosEntityInformation<?, ?> information) { final CosmosContainerResponse response = createDatabaseIfNotExists() .publishOn(Schedulers.parallel()) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to create database", throwable, this.responseDiagnosticsProcessor)) .flatMap(cosmosDatabaseResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosDatabaseResponse.getDiagnostics(), null); final CosmosContainerProperties cosmosContainerProperties = new CosmosContainerProperties(information.getContainerName(), information.getPartitionKeyPath()); cosmosContainerProperties.setDefaultTimeToLiveInSeconds(information.getTimeToLive()); cosmosContainerProperties.setIndexingPolicy(information.getIndexingPolicy()); final UniqueKeyPolicy uniqueKeyPolicy = information.getUniqueKeyPolicy(); if (uniqueKeyPolicy != null) { cosmosContainerProperties.setUniqueKeyPolicy(uniqueKeyPolicy); } CosmosAsyncDatabase cosmosAsyncDatabase = cosmosAsyncClient .getDatabase(cosmosDatabaseResponse.getProperties().getId()); Mono<CosmosContainerResponse> cosmosContainerResponseMono; if (information.getRequestUnit() == null) { cosmosContainerResponseMono = cosmosAsyncDatabase.createContainerIfNotExists(cosmosContainerProperties); } else { ThroughputProperties throughputProperties = information.isAutoScale() ? ThroughputProperties.createAutoscaledThroughput(information.getRequestUnit()) : ThroughputProperties.createManualThroughput(information.getRequestUnit()); cosmosContainerResponseMono = cosmosAsyncDatabase.createContainerIfNotExists(cosmosContainerProperties, throughputProperties); } return cosmosContainerResponseMono .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to create container", throwable, this.responseDiagnosticsProcessor)) .doOnNext(cosmosContainerResponse -> CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosContainerResponse.getDiagnostics(), null)); }) .block(); assert response != null; return response.getProperties(); }
cosmosContainerProperties.setUniqueKeyPolicy(uniqueKeyPolicy);
public CosmosContainerProperties createContainerIfNotExists(CosmosEntityInformation<?, ?> information) { final CosmosContainerResponse response = createDatabaseIfNotExists() .publishOn(Schedulers.parallel()) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to create database", throwable, this.responseDiagnosticsProcessor)) .flatMap(cosmosDatabaseResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosDatabaseResponse.getDiagnostics(), null); final CosmosContainerProperties cosmosContainerProperties = new CosmosContainerProperties(information.getContainerName(), information.getPartitionKeyPath()); cosmosContainerProperties.setDefaultTimeToLiveInSeconds(information.getTimeToLive()); cosmosContainerProperties.setIndexingPolicy(information.getIndexingPolicy()); final UniqueKeyPolicy uniqueKeyPolicy = information.getUniqueKeyPolicy(); if (uniqueKeyPolicy != null) { cosmosContainerProperties.setUniqueKeyPolicy(uniqueKeyPolicy); } CosmosAsyncDatabase cosmosAsyncDatabase = cosmosAsyncClient .getDatabase(cosmosDatabaseResponse.getProperties().getId()); Mono<CosmosContainerResponse> cosmosContainerResponseMono; if (information.getRequestUnit() == null) { cosmosContainerResponseMono = cosmosAsyncDatabase.createContainerIfNotExists(cosmosContainerProperties); } else { ThroughputProperties throughputProperties = information.isAutoScale() ? ThroughputProperties.createAutoscaledThroughput(information.getRequestUnit()) : ThroughputProperties.createManualThroughput(information.getRequestUnit()); cosmosContainerResponseMono = cosmosAsyncDatabase.createContainerIfNotExists(cosmosContainerProperties, throughputProperties); } return cosmosContainerResponseMono .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to create container", throwable, this.responseDiagnosticsProcessor)) .doOnNext(cosmosContainerResponse -> CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosContainerResponse.getDiagnostics(), null)); }) .block(); assert response != null; return response.getProperties(); }
class type of domain * @return found results in a List */ public <T> Iterable<T> findAll(String containerName, final Class<T> domainType) { Assert.hasText(containerName, "containerName should not be null, empty or only whitespaces"); Assert.notNull(domainType, "domainType should not be null"); final CosmosQuery query = new CosmosQuery(Criteria.getInstance(CriteriaType.ALL)); return findItems(query, containerName, domainType); }
class type of domain * @return found results in a List */ public <T> Iterable<T> findAll(String containerName, final Class<T> domainType) { Assert.hasText(containerName, "containerName should not be null, empty or only whitespaces"); Assert.notNull(domainType, "domainType should not be null"); final CosmosQuery query = new CosmosQuery(Criteria.getInstance(CriteriaType.ALL)); return findItems(query, containerName, domainType); }
gotcha, thanks for the explanation 👍
public CosmosContainerProperties createContainerIfNotExists(CosmosEntityInformation<?, ?> information) { final CosmosContainerResponse response = createDatabaseIfNotExists() .publishOn(Schedulers.parallel()) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to create database", throwable, this.responseDiagnosticsProcessor)) .flatMap(cosmosDatabaseResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosDatabaseResponse.getDiagnostics(), null); final CosmosContainerProperties cosmosContainerProperties = new CosmosContainerProperties(information.getContainerName(), information.getPartitionKeyPath()); cosmosContainerProperties.setDefaultTimeToLiveInSeconds(information.getTimeToLive()); cosmosContainerProperties.setIndexingPolicy(information.getIndexingPolicy()); final UniqueKeyPolicy uniqueKeyPolicy = information.getUniqueKeyPolicy(); if (uniqueKeyPolicy != null) { cosmosContainerProperties.setUniqueKeyPolicy(uniqueKeyPolicy); } CosmosAsyncDatabase cosmosAsyncDatabase = cosmosAsyncClient .getDatabase(cosmosDatabaseResponse.getProperties().getId()); Mono<CosmosContainerResponse> cosmosContainerResponseMono; if (information.getRequestUnit() == null) { cosmosContainerResponseMono = cosmosAsyncDatabase.createContainerIfNotExists(cosmosContainerProperties); } else { ThroughputProperties throughputProperties = information.isAutoScale() ? ThroughputProperties.createAutoscaledThroughput(information.getRequestUnit()) : ThroughputProperties.createManualThroughput(information.getRequestUnit()); cosmosContainerResponseMono = cosmosAsyncDatabase.createContainerIfNotExists(cosmosContainerProperties, throughputProperties); } return cosmosContainerResponseMono .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to create container", throwable, this.responseDiagnosticsProcessor)) .doOnNext(cosmosContainerResponse -> CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosContainerResponse.getDiagnostics(), null)); }) .block(); assert response != null; return response.getProperties(); }
cosmosContainerProperties.setUniqueKeyPolicy(uniqueKeyPolicy);
public CosmosContainerProperties createContainerIfNotExists(CosmosEntityInformation<?, ?> information) { final CosmosContainerResponse response = createDatabaseIfNotExists() .publishOn(Schedulers.parallel()) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to create database", throwable, this.responseDiagnosticsProcessor)) .flatMap(cosmosDatabaseResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosDatabaseResponse.getDiagnostics(), null); final CosmosContainerProperties cosmosContainerProperties = new CosmosContainerProperties(information.getContainerName(), information.getPartitionKeyPath()); cosmosContainerProperties.setDefaultTimeToLiveInSeconds(information.getTimeToLive()); cosmosContainerProperties.setIndexingPolicy(information.getIndexingPolicy()); final UniqueKeyPolicy uniqueKeyPolicy = information.getUniqueKeyPolicy(); if (uniqueKeyPolicy != null) { cosmosContainerProperties.setUniqueKeyPolicy(uniqueKeyPolicy); } CosmosAsyncDatabase cosmosAsyncDatabase = cosmosAsyncClient .getDatabase(cosmosDatabaseResponse.getProperties().getId()); Mono<CosmosContainerResponse> cosmosContainerResponseMono; if (information.getRequestUnit() == null) { cosmosContainerResponseMono = cosmosAsyncDatabase.createContainerIfNotExists(cosmosContainerProperties); } else { ThroughputProperties throughputProperties = information.isAutoScale() ? ThroughputProperties.createAutoscaledThroughput(information.getRequestUnit()) : ThroughputProperties.createManualThroughput(information.getRequestUnit()); cosmosContainerResponseMono = cosmosAsyncDatabase.createContainerIfNotExists(cosmosContainerProperties, throughputProperties); } return cosmosContainerResponseMono .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to create container", throwable, this.responseDiagnosticsProcessor)) .doOnNext(cosmosContainerResponse -> CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosContainerResponse.getDiagnostics(), null)); }) .block(); assert response != null; return response.getProperties(); }
class type of domain * @return found results in a List */ public <T> Iterable<T> findAll(String containerName, final Class<T> domainType) { Assert.hasText(containerName, "containerName should not be null, empty or only whitespaces"); Assert.notNull(domainType, "domainType should not be null"); final CosmosQuery query = new CosmosQuery(Criteria.getInstance(CriteriaType.ALL)); return findItems(query, containerName, domainType); }
class type of domain * @return found results in a List */ public <T> Iterable<T> findAll(String containerName, final Class<T> domainType) { Assert.hasText(containerName, "containerName should not be null, empty or only whitespaces"); Assert.notNull(domainType, "domainType should not be null"); final CosmosQuery query = new CosmosQuery(Criteria.getInstance(CriteriaType.ALL)); return findItems(query, containerName, domainType); }
I think this can be the value property of a enum, similar like KeyEncryptionKeyResolverName
private static String getNameForKeyEncryptionKeyAlgorithm(KeyEncryptionKeyAlgorithm keyEncryptionKeyAlgorithm) { if(keyEncryptionKeyAlgorithm == KeyEncryptionKeyAlgorithm.RSA_OAEP) { return "RSA-OAEP"; } throw new IllegalArgumentException(String.format("Unexpected algorithm '%s'", keyEncryptionKeyAlgorithm)); }
return "RSA-OAEP";
private static String getNameForKeyEncryptionKeyAlgorithm(KeyEncryptionKeyAlgorithm keyEncryptionKeyAlgorithm) { if(keyEncryptionKeyAlgorithm == KeyEncryptionKeyAlgorithm.RSA_OAEP) { return "RSA-OAEP"; } throw new IllegalArgumentException(String.format("Unexpected algorithm '%s'", keyEncryptionKeyAlgorithm)); }
class EncryptionKeyStoreProviderImpl extends EncryptionKeyStoreProvider { private final KeyEncryptionKeyResolver keyEncryptionKeyResolver; private final String keyEncryptionKeyProviderName; public EncryptionKeyStoreProviderImpl(KeyEncryptionKeyResolver keyEncryptionKeyResolver, String keyEncryptionKeyProviderName) { this.keyEncryptionKeyResolver = keyEncryptionKeyResolver; this.keyEncryptionKeyProviderName = keyEncryptionKeyProviderName; } /** * Getter for provider name. * * @return provider name */ @Override public String getProviderName() { return this.keyEncryptionKeyProviderName; } /** * Unwraps the specified encryptedKey of a data encryption key. The encrypted value is expected to be encrypted * using the key encryption key with the specified encryptionKeyId and using the specified algorithm. * * @param encryptionKeyId * The key Id tells the provider where to find the key. * @param algorithm * The encryption algorithm. * @param encryptedKey * The ciphertext key. * @return The unwrapped data encryption key. */ @Override public byte[] unwrapKey(String encryptionKeyId, KeyEncryptionKeyAlgorithm algorithm, byte[] encryptedKey) { return this.keyEncryptionKeyResolver.buildKeyEncryptionKey(encryptionKeyId).unwrapKey(getNameForKeyEncryptionKeyAlgorithm(algorithm), encryptedKey); } /** * Wraps a data encryption key using the key encryption key with the specified encryptionKeyId and using the * specified algorithm. * * @param encryptionKeyId * The key Id tells the provider where to find the key. * @param algorithm * The encryption algorithm. * @param key * The plaintext key * @return The wrapped data encryption key. */ @Override public byte[] wrapKey(String encryptionKeyId, KeyEncryptionKeyAlgorithm algorithm, byte[] key) { return this.keyEncryptionKeyResolver.buildKeyEncryptionKey(encryptionKeyId).wrapKey(getNameForKeyEncryptionKeyAlgorithm(algorithm), key); } /** * The public facing Cosmos Encryption library interface does not expose this method, hence not supported. * * @param encryptionKeyId * The key Id tells the provider where to find the key. * @param allowEnclaveComputations * Indicates whether the key encryption key supports enclave computations. * @return The signature of the key encryption key metadata. */ @Override public byte[] sign(String encryptionKeyId, boolean allowEnclaveComputations) { return new byte[0]; } /** * The public facing Cosmos Encryption library interface does not expose this method, hence not supported. * * @param encryptionKeyId * The key Id tells the provider where to find the key. * @param allowEnclaveComputations * Indicates whether the key encryption key supports enclave computations. * @param signature * The signature of the key encryption key metadata. * @return true if matching, false if not. * @throws MicrosoftDataEncryptionException * on error */ @Override public boolean verify(String encryptionKeyId, boolean allowEnclaveComputations, byte[] signature) throws MicrosoftDataEncryptionException { throw new MicrosoftDataEncryptionException("The Verify operation is not supported. "); } }
class EncryptionKeyStoreProviderImpl extends EncryptionKeyStoreProvider { private final KeyEncryptionKeyResolver keyEncryptionKeyResolver; private final String keyEncryptionKeyProviderName; public EncryptionKeyStoreProviderImpl(KeyEncryptionKeyResolver keyEncryptionKeyResolver, String keyEncryptionKeyProviderName) { this.keyEncryptionKeyResolver = keyEncryptionKeyResolver; this.keyEncryptionKeyProviderName = keyEncryptionKeyProviderName; } /** * Getter for provider name. * * @return provider name */ @Override public String getProviderName() { return this.keyEncryptionKeyProviderName; } /** * Unwraps the specified encryptedKey of a data encryption key. The encrypted value is expected to be encrypted * using the key encryption key with the specified encryptionKeyId and using the specified algorithm. * * @param encryptionKeyId * The key Id tells the provider where to find the key. * @param algorithm * The encryption algorithm. * @param encryptedKey * The ciphertext key. * @return The unwrapped data encryption key. */ @Override public byte[] unwrapKey(String encryptionKeyId, KeyEncryptionKeyAlgorithm algorithm, byte[] encryptedKey) { return this.keyEncryptionKeyResolver.buildKeyEncryptionKey(encryptionKeyId).unwrapKey(getNameForKeyEncryptionKeyAlgorithm(algorithm), encryptedKey); } /** * Wraps a data encryption key using the key encryption key with the specified encryptionKeyId and using the * specified algorithm. * * @param encryptionKeyId * The key Id tells the provider where to find the key. * @param algorithm * The encryption algorithm. * @param key * The plaintext key * @return The wrapped data encryption key. */ @Override public byte[] wrapKey(String encryptionKeyId, KeyEncryptionKeyAlgorithm algorithm, byte[] key) { return this.keyEncryptionKeyResolver.buildKeyEncryptionKey(encryptionKeyId).wrapKey(getNameForKeyEncryptionKeyAlgorithm(algorithm), key); } /** * The public facing Cosmos Encryption library interface does not expose this method, hence not supported. * * @param encryptionKeyId * The key Id tells the provider where to find the key. * @param allowEnclaveComputations * Indicates whether the key encryption key supports enclave computations. * @return The signature of the key encryption key metadata. */ @Override public byte[] sign(String encryptionKeyId, boolean allowEnclaveComputations) { return new byte[0]; } /** * The public facing Cosmos Encryption library interface does not expose this method, hence not supported. * * @param encryptionKeyId * The key Id tells the provider where to find the key. * @param allowEnclaveComputations * Indicates whether the key encryption key supports enclave computations. * @param signature * The signature of the key encryption key metadata. * @return true if matching, false if not. * @throws MicrosoftDataEncryptionException * on error */ @Override public boolean verify(String encryptionKeyId, boolean allowEnclaveComputations, byte[] signature) throws MicrosoftDataEncryptionException { throw new MicrosoftDataEncryptionException("The Verify operation is not supported. "); } }
would `Mono.just` work? ```suggestion return Mono.just(new AccessToken(encodedCredential, OffsetDateTime.MAX)); ```
public Mono<AccessToken> getToken(TokenRequestContext request) { return Mono.fromCallable(() -> new AccessToken(encodedCredential, OffsetDateTime.MAX)); }
return Mono.fromCallable(() -> new AccessToken(encodedCredential, OffsetDateTime.MAX));
public Mono<AccessToken> getToken(TokenRequestContext request) { return Mono.fromCallable(() -> new AccessToken(encodedCredential, OffsetDateTime.MAX)); }
class BasicAuthenticationCredential implements TokenCredential { /** * Base64 encoded username-password credential. */ private final String encodedCredential; /** * Creates a basic authentication credential. * * @param username basic auth user name * @param password basic auth password */ public BasicAuthenticationCredential(String username, String password) { String credential = username + ":" + password; this.encodedCredential = Base64Util.encodeToString(credential.getBytes(StandardCharsets.UTF_8)); } /** * @throws RuntimeException If the UTF-8 encoding isn't supported. */ @Override }
class BasicAuthenticationCredential implements TokenCredential { /** * Base64 encoded username-password credential. */ private final String encodedCredential; /** * Creates a basic authentication credential. * * @param username basic auth user name * @param password basic auth password */ public BasicAuthenticationCredential(String username, String password) { String credential = username + ":" + password; this.encodedCredential = Base64Util.encodeToString(credential.getBytes(StandardCharsets.UTF_8)); } /** * @throws RuntimeException If the UTF-8 encoding isn't supported. */ @Override }
So AAP lib is only using enum with this value , for now we will go with this hard coding and in samples after I will expose key algo on encrytionKeyWrapMetadata in my next PR , we will show https://github.com/Azure/azure-sdk-for-java/blob/3f31d68eed6fbe11516ca3afe3955c8840a6e974/sdk/keyvault/azure-security-keyvault-keys/src/main/java/com/azure/security/keyvault/keys/cryptography/models/KeyWrapAlgorithm.java#L20 Also we are in conversation with AAP dev to use string instead of enum so we will just pass through the string from AAP cryptography to generic KeyEncryptionKeyResolver
private static String getNameForKeyEncryptionKeyAlgorithm(KeyEncryptionKeyAlgorithm keyEncryptionKeyAlgorithm) { if(keyEncryptionKeyAlgorithm == KeyEncryptionKeyAlgorithm.RSA_OAEP) { return "RSA-OAEP"; } throw new IllegalArgumentException(String.format("Unexpected algorithm '%s'", keyEncryptionKeyAlgorithm)); }
return "RSA-OAEP";
private static String getNameForKeyEncryptionKeyAlgorithm(KeyEncryptionKeyAlgorithm keyEncryptionKeyAlgorithm) { if(keyEncryptionKeyAlgorithm == KeyEncryptionKeyAlgorithm.RSA_OAEP) { return "RSA-OAEP"; } throw new IllegalArgumentException(String.format("Unexpected algorithm '%s'", keyEncryptionKeyAlgorithm)); }
class EncryptionKeyStoreProviderImpl extends EncryptionKeyStoreProvider { private final KeyEncryptionKeyResolver keyEncryptionKeyResolver; private final String keyEncryptionKeyProviderName; public EncryptionKeyStoreProviderImpl(KeyEncryptionKeyResolver keyEncryptionKeyResolver, String keyEncryptionKeyProviderName) { this.keyEncryptionKeyResolver = keyEncryptionKeyResolver; this.keyEncryptionKeyProviderName = keyEncryptionKeyProviderName; } /** * Getter for provider name. * * @return provider name */ @Override public String getProviderName() { return this.keyEncryptionKeyProviderName; } /** * Unwraps the specified encryptedKey of a data encryption key. The encrypted value is expected to be encrypted * using the key encryption key with the specified encryptionKeyId and using the specified algorithm. * * @param encryptionKeyId * The key Id tells the provider where to find the key. * @param algorithm * The encryption algorithm. * @param encryptedKey * The ciphertext key. * @return The unwrapped data encryption key. */ @Override public byte[] unwrapKey(String encryptionKeyId, KeyEncryptionKeyAlgorithm algorithm, byte[] encryptedKey) { return this.keyEncryptionKeyResolver.buildKeyEncryptionKey(encryptionKeyId).unwrapKey(getNameForKeyEncryptionKeyAlgorithm(algorithm), encryptedKey); } /** * Wraps a data encryption key using the key encryption key with the specified encryptionKeyId and using the * specified algorithm. * * @param encryptionKeyId * The key Id tells the provider where to find the key. * @param algorithm * The encryption algorithm. * @param key * The plaintext key * @return The wrapped data encryption key. */ @Override public byte[] wrapKey(String encryptionKeyId, KeyEncryptionKeyAlgorithm algorithm, byte[] key) { return this.keyEncryptionKeyResolver.buildKeyEncryptionKey(encryptionKeyId).wrapKey(getNameForKeyEncryptionKeyAlgorithm(algorithm), key); } /** * The public facing Cosmos Encryption library interface does not expose this method, hence not supported. * * @param encryptionKeyId * The key Id tells the provider where to find the key. * @param allowEnclaveComputations * Indicates whether the key encryption key supports enclave computations. * @return The signature of the key encryption key metadata. */ @Override public byte[] sign(String encryptionKeyId, boolean allowEnclaveComputations) { return new byte[0]; } /** * The public facing Cosmos Encryption library interface does not expose this method, hence not supported. * * @param encryptionKeyId * The key Id tells the provider where to find the key. * @param allowEnclaveComputations * Indicates whether the key encryption key supports enclave computations. * @param signature * The signature of the key encryption key metadata. * @return true if matching, false if not. * @throws MicrosoftDataEncryptionException * on error */ @Override public boolean verify(String encryptionKeyId, boolean allowEnclaveComputations, byte[] signature) throws MicrosoftDataEncryptionException { throw new MicrosoftDataEncryptionException("The Verify operation is not supported. "); } }
class EncryptionKeyStoreProviderImpl extends EncryptionKeyStoreProvider { private final KeyEncryptionKeyResolver keyEncryptionKeyResolver; private final String keyEncryptionKeyProviderName; public EncryptionKeyStoreProviderImpl(KeyEncryptionKeyResolver keyEncryptionKeyResolver, String keyEncryptionKeyProviderName) { this.keyEncryptionKeyResolver = keyEncryptionKeyResolver; this.keyEncryptionKeyProviderName = keyEncryptionKeyProviderName; } /** * Getter for provider name. * * @return provider name */ @Override public String getProviderName() { return this.keyEncryptionKeyProviderName; } /** * Unwraps the specified encryptedKey of a data encryption key. The encrypted value is expected to be encrypted * using the key encryption key with the specified encryptionKeyId and using the specified algorithm. * * @param encryptionKeyId * The key Id tells the provider where to find the key. * @param algorithm * The encryption algorithm. * @param encryptedKey * The ciphertext key. * @return The unwrapped data encryption key. */ @Override public byte[] unwrapKey(String encryptionKeyId, KeyEncryptionKeyAlgorithm algorithm, byte[] encryptedKey) { return this.keyEncryptionKeyResolver.buildKeyEncryptionKey(encryptionKeyId).unwrapKey(getNameForKeyEncryptionKeyAlgorithm(algorithm), encryptedKey); } /** * Wraps a data encryption key using the key encryption key with the specified encryptionKeyId and using the * specified algorithm. * * @param encryptionKeyId * The key Id tells the provider where to find the key. * @param algorithm * The encryption algorithm. * @param key * The plaintext key * @return The wrapped data encryption key. */ @Override public byte[] wrapKey(String encryptionKeyId, KeyEncryptionKeyAlgorithm algorithm, byte[] key) { return this.keyEncryptionKeyResolver.buildKeyEncryptionKey(encryptionKeyId).wrapKey(getNameForKeyEncryptionKeyAlgorithm(algorithm), key); } /** * The public facing Cosmos Encryption library interface does not expose this method, hence not supported. * * @param encryptionKeyId * The key Id tells the provider where to find the key. * @param allowEnclaveComputations * Indicates whether the key encryption key supports enclave computations. * @return The signature of the key encryption key metadata. */ @Override public byte[] sign(String encryptionKeyId, boolean allowEnclaveComputations) { return new byte[0]; } /** * The public facing Cosmos Encryption library interface does not expose this method, hence not supported. * * @param encryptionKeyId * The key Id tells the provider where to find the key. * @param allowEnclaveComputations * Indicates whether the key encryption key supports enclave computations. * @param signature * The signature of the key encryption key metadata. * @return true if matching, false if not. * @throws MicrosoftDataEncryptionException * on error */ @Override public boolean verify(String encryptionKeyId, boolean allowEnclaveComputations, byte[] signature) throws MicrosoftDataEncryptionException { throw new MicrosoftDataEncryptionException("The Verify operation is not supported. "); } }
Need we give the user bean a specificed name and then check the single bean name to make sure it is from the user configuration instead of ours?
void userDefinedEventHubsClientBuilderProvidedShouldNotConfigureTheAuto() { this.contextRunner .withPropertyValues( "spring.cloud.azure.eventhubs.connection-string=" + String.format(CONNECTION_STRING_FORMAT, "test-namespace"), "spring.cloud.azure.eventhubs.event-hub-name=test-event-hub" ) .withUserConfiguration(AzureEventHubsPropertiesTestConfiguration.class) .withBean(EventHubClientBuilder.class, EventHubClientBuilder::new) .run(context -> assertThat(context).hasSingleBean(EventHubClientBuilder.class)); }
.run(context -> assertThat(context).hasSingleBean(EventHubClientBuilder.class));
void userDefinedEventHubsClientBuilderProvidedShouldNotConfigureTheAuto() { this.contextRunner .withPropertyValues( "spring.cloud.azure.eventhubs.connection-string=" + String.format(CONNECTION_STRING_FORMAT, "test-namespace"), "spring.cloud.azure.eventhubs.event-hub-name=test-event-hub" ) .withUserConfiguration(AzureEventHubsPropertiesTestConfiguration.class) .withBean("user-defined-builder", EventHubClientBuilder.class, EventHubClientBuilder::new) .run(context -> { assertThat(context).hasSingleBean(EventHubClientBuilder.class); assertThat(context).hasBean("user-defined-builder"); }); }
class AzureEventHubsClientBuilderConfigurationTests { private final ApplicationContextRunner contextRunner = new ApplicationContextRunner() .withConfiguration(AutoConfigurations.of(AzureEventHubsClientBuilderConfiguration.class)); @Test void noConnectionInfoProvidedShouldNotConfigure() { contextRunner.run(context -> assertThat(context).doesNotHaveBean(AzureEventHubsClientBuilderConfiguration.class)); } @Test @SuppressWarnings("rawtypes") void connectionStringProvidedShouldConfigure() { contextRunner .withPropertyValues( "spring.cloud.azure.eventhubs.connection-string=" + String.format(CONNECTION_STRING_FORMAT, "test-namespace"), "spring.cloud.azure.eventhubs.event-hub-name=test-event-hub" ) .withUserConfiguration(AzureEventHubsPropertiesTestConfiguration.class) .run(context -> { assertThat(context).hasSingleBean(AzureEventHubsClientBuilderConfiguration.class); assertThat(context).hasSingleBean(EventHubClientBuilderFactory.class); assertThat(context).hasSingleBean(EventHubClientBuilder.class); }); } @Test void customizerShouldBeCalled() { EventHubBuilderCustomizer customizer = new EventHubBuilderCustomizer(); this.contextRunner .withPropertyValues( "spring.cloud.azure.eventhubs.connection-string=" + String.format(CONNECTION_STRING_FORMAT, "test-namespace"), "spring.cloud.azure.eventhubs.event-hub-name=test-event-hub" ) .withUserConfiguration(AzureEventHubsPropertiesTestConfiguration.class) .withBean("customizer1", EventHubBuilderCustomizer.class, () -> customizer) .withBean("customizer2", EventHubBuilderCustomizer.class, () -> customizer) .run(context -> assertThat(customizer.getCustomizedTimes()).isEqualTo(2)); } @Test void otherCustomizerShouldNotBeCalled() { EventHubBuilderCustomizer customizer = new EventHubBuilderCustomizer(); OtherBuilderCustomizer otherBuilderCustomizer = new OtherBuilderCustomizer(); this.contextRunner .withPropertyValues( "spring.cloud.azure.eventhubs.connection-string=" + String.format(CONNECTION_STRING_FORMAT, "test-namespace"), "spring.cloud.azure.eventhubs.event-hub-name=test-event-hub" ) .withUserConfiguration(AzureEventHubsPropertiesTestConfiguration.class) .withBean("customizer1", EventHubBuilderCustomizer.class, () -> customizer) .withBean("customizer2", EventHubBuilderCustomizer.class, () -> customizer) .withBean("customizer3", OtherBuilderCustomizer.class, () -> otherBuilderCustomizer) .run(context -> { assertThat(customizer.getCustomizedTimes()).isEqualTo(2); assertThat(otherBuilderCustomizer.getCustomizedTimes()).isEqualTo(0); }); } @Test private static class EventHubBuilderCustomizer extends TestBuilderCustomizer<EventHubClientBuilder> { } private static class OtherBuilderCustomizer extends TestBuilderCustomizer<ConfigurationClientBuilder> { } }
class AzureEventHubsClientBuilderConfigurationTests { private final ApplicationContextRunner contextRunner = new ApplicationContextRunner() .withConfiguration(AutoConfigurations.of(AzureEventHubsClientBuilderConfiguration.class)); @Test void noConnectionInfoProvidedShouldNotConfigure() { contextRunner.run(context -> assertThat(context).doesNotHaveBean(AzureEventHubsClientBuilderConfiguration.class)); } @Test @SuppressWarnings("rawtypes") void connectionStringProvidedShouldConfigure() { contextRunner .withPropertyValues( "spring.cloud.azure.eventhubs.connection-string=" + String.format(CONNECTION_STRING_FORMAT, "test-namespace"), "spring.cloud.azure.eventhubs.event-hub-name=test-event-hub" ) .withUserConfiguration(AzureEventHubsPropertiesTestConfiguration.class) .run(context -> { assertThat(context).hasSingleBean(AzureEventHubsClientBuilderConfiguration.class); assertThat(context).hasSingleBean(EventHubClientBuilderFactory.class); assertThat(context).hasSingleBean(EventHubClientBuilder.class); }); } @Test void customizerShouldBeCalled() { EventHubBuilderCustomizer customizer = new EventHubBuilderCustomizer(); this.contextRunner .withPropertyValues( "spring.cloud.azure.eventhubs.connection-string=" + String.format(CONNECTION_STRING_FORMAT, "test-namespace"), "spring.cloud.azure.eventhubs.event-hub-name=test-event-hub" ) .withUserConfiguration(AzureEventHubsPropertiesTestConfiguration.class) .withBean("customizer1", EventHubBuilderCustomizer.class, () -> customizer) .withBean("customizer2", EventHubBuilderCustomizer.class, () -> customizer) .run(context -> assertThat(customizer.getCustomizedTimes()).isEqualTo(2)); } @Test void otherCustomizerShouldNotBeCalled() { EventHubBuilderCustomizer customizer = new EventHubBuilderCustomizer(); OtherBuilderCustomizer otherBuilderCustomizer = new OtherBuilderCustomizer(); this.contextRunner .withPropertyValues( "spring.cloud.azure.eventhubs.connection-string=" + String.format(CONNECTION_STRING_FORMAT, "test-namespace"), "spring.cloud.azure.eventhubs.event-hub-name=test-event-hub" ) .withUserConfiguration(AzureEventHubsPropertiesTestConfiguration.class) .withBean("customizer1", EventHubBuilderCustomizer.class, () -> customizer) .withBean("customizer2", EventHubBuilderCustomizer.class, () -> customizer) .withBean("customizer3", OtherBuilderCustomizer.class, () -> otherBuilderCustomizer) .run(context -> { assertThat(customizer.getCustomizedTimes()).isEqualTo(2); assertThat(otherBuilderCustomizer.getCustomizedTimes()).isEqualTo(0); }); } @Test private static class EventHubBuilderCustomizer extends TestBuilderCustomizer<EventHubClientBuilder> { } private static class OtherBuilderCustomizer extends TestBuilderCustomizer<ConfigurationClientBuilder> { } }
yes, we can do that.
void userDefinedEventHubsClientBuilderProvidedShouldNotConfigureTheAuto() { this.contextRunner .withPropertyValues( "spring.cloud.azure.eventhubs.connection-string=" + String.format(CONNECTION_STRING_FORMAT, "test-namespace"), "spring.cloud.azure.eventhubs.event-hub-name=test-event-hub" ) .withUserConfiguration(AzureEventHubsPropertiesTestConfiguration.class) .withBean(EventHubClientBuilder.class, EventHubClientBuilder::new) .run(context -> assertThat(context).hasSingleBean(EventHubClientBuilder.class)); }
.run(context -> assertThat(context).hasSingleBean(EventHubClientBuilder.class));
void userDefinedEventHubsClientBuilderProvidedShouldNotConfigureTheAuto() { this.contextRunner .withPropertyValues( "spring.cloud.azure.eventhubs.connection-string=" + String.format(CONNECTION_STRING_FORMAT, "test-namespace"), "spring.cloud.azure.eventhubs.event-hub-name=test-event-hub" ) .withUserConfiguration(AzureEventHubsPropertiesTestConfiguration.class) .withBean("user-defined-builder", EventHubClientBuilder.class, EventHubClientBuilder::new) .run(context -> { assertThat(context).hasSingleBean(EventHubClientBuilder.class); assertThat(context).hasBean("user-defined-builder"); }); }
class AzureEventHubsClientBuilderConfigurationTests { private final ApplicationContextRunner contextRunner = new ApplicationContextRunner() .withConfiguration(AutoConfigurations.of(AzureEventHubsClientBuilderConfiguration.class)); @Test void noConnectionInfoProvidedShouldNotConfigure() { contextRunner.run(context -> assertThat(context).doesNotHaveBean(AzureEventHubsClientBuilderConfiguration.class)); } @Test @SuppressWarnings("rawtypes") void connectionStringProvidedShouldConfigure() { contextRunner .withPropertyValues( "spring.cloud.azure.eventhubs.connection-string=" + String.format(CONNECTION_STRING_FORMAT, "test-namespace"), "spring.cloud.azure.eventhubs.event-hub-name=test-event-hub" ) .withUserConfiguration(AzureEventHubsPropertiesTestConfiguration.class) .run(context -> { assertThat(context).hasSingleBean(AzureEventHubsClientBuilderConfiguration.class); assertThat(context).hasSingleBean(EventHubClientBuilderFactory.class); assertThat(context).hasSingleBean(EventHubClientBuilder.class); }); } @Test void customizerShouldBeCalled() { EventHubBuilderCustomizer customizer = new EventHubBuilderCustomizer(); this.contextRunner .withPropertyValues( "spring.cloud.azure.eventhubs.connection-string=" + String.format(CONNECTION_STRING_FORMAT, "test-namespace"), "spring.cloud.azure.eventhubs.event-hub-name=test-event-hub" ) .withUserConfiguration(AzureEventHubsPropertiesTestConfiguration.class) .withBean("customizer1", EventHubBuilderCustomizer.class, () -> customizer) .withBean("customizer2", EventHubBuilderCustomizer.class, () -> customizer) .run(context -> assertThat(customizer.getCustomizedTimes()).isEqualTo(2)); } @Test void otherCustomizerShouldNotBeCalled() { EventHubBuilderCustomizer customizer = new EventHubBuilderCustomizer(); OtherBuilderCustomizer otherBuilderCustomizer = new OtherBuilderCustomizer(); this.contextRunner .withPropertyValues( "spring.cloud.azure.eventhubs.connection-string=" + String.format(CONNECTION_STRING_FORMAT, "test-namespace"), "spring.cloud.azure.eventhubs.event-hub-name=test-event-hub" ) .withUserConfiguration(AzureEventHubsPropertiesTestConfiguration.class) .withBean("customizer1", EventHubBuilderCustomizer.class, () -> customizer) .withBean("customizer2", EventHubBuilderCustomizer.class, () -> customizer) .withBean("customizer3", OtherBuilderCustomizer.class, () -> otherBuilderCustomizer) .run(context -> { assertThat(customizer.getCustomizedTimes()).isEqualTo(2); assertThat(otherBuilderCustomizer.getCustomizedTimes()).isEqualTo(0); }); } @Test private static class EventHubBuilderCustomizer extends TestBuilderCustomizer<EventHubClientBuilder> { } private static class OtherBuilderCustomizer extends TestBuilderCustomizer<ConfigurationClientBuilder> { } }
class AzureEventHubsClientBuilderConfigurationTests { private final ApplicationContextRunner contextRunner = new ApplicationContextRunner() .withConfiguration(AutoConfigurations.of(AzureEventHubsClientBuilderConfiguration.class)); @Test void noConnectionInfoProvidedShouldNotConfigure() { contextRunner.run(context -> assertThat(context).doesNotHaveBean(AzureEventHubsClientBuilderConfiguration.class)); } @Test @SuppressWarnings("rawtypes") void connectionStringProvidedShouldConfigure() { contextRunner .withPropertyValues( "spring.cloud.azure.eventhubs.connection-string=" + String.format(CONNECTION_STRING_FORMAT, "test-namespace"), "spring.cloud.azure.eventhubs.event-hub-name=test-event-hub" ) .withUserConfiguration(AzureEventHubsPropertiesTestConfiguration.class) .run(context -> { assertThat(context).hasSingleBean(AzureEventHubsClientBuilderConfiguration.class); assertThat(context).hasSingleBean(EventHubClientBuilderFactory.class); assertThat(context).hasSingleBean(EventHubClientBuilder.class); }); } @Test void customizerShouldBeCalled() { EventHubBuilderCustomizer customizer = new EventHubBuilderCustomizer(); this.contextRunner .withPropertyValues( "spring.cloud.azure.eventhubs.connection-string=" + String.format(CONNECTION_STRING_FORMAT, "test-namespace"), "spring.cloud.azure.eventhubs.event-hub-name=test-event-hub" ) .withUserConfiguration(AzureEventHubsPropertiesTestConfiguration.class) .withBean("customizer1", EventHubBuilderCustomizer.class, () -> customizer) .withBean("customizer2", EventHubBuilderCustomizer.class, () -> customizer) .run(context -> assertThat(customizer.getCustomizedTimes()).isEqualTo(2)); } @Test void otherCustomizerShouldNotBeCalled() { EventHubBuilderCustomizer customizer = new EventHubBuilderCustomizer(); OtherBuilderCustomizer otherBuilderCustomizer = new OtherBuilderCustomizer(); this.contextRunner .withPropertyValues( "spring.cloud.azure.eventhubs.connection-string=" + String.format(CONNECTION_STRING_FORMAT, "test-namespace"), "spring.cloud.azure.eventhubs.event-hub-name=test-event-hub" ) .withUserConfiguration(AzureEventHubsPropertiesTestConfiguration.class) .withBean("customizer1", EventHubBuilderCustomizer.class, () -> customizer) .withBean("customizer2", EventHubBuilderCustomizer.class, () -> customizer) .withBean("customizer3", OtherBuilderCustomizer.class, () -> otherBuilderCustomizer) .run(context -> { assertThat(customizer.getCustomizedTimes()).isEqualTo(2); assertThat(otherBuilderCustomizer.getCustomizedTimes()).isEqualTo(0); }); } @Test private static class EventHubBuilderCustomizer extends TestBuilderCustomizer<EventHubClientBuilder> { } private static class OtherBuilderCustomizer extends TestBuilderCustomizer<ConfigurationClientBuilder> { } }
We created the key vault which makes us the owner. Why we reassign the role here?
public void canCreateVirtualMachineWithDiskEncryptionSet() { String clientId = this.clientIdFromFile(); String vaultName = generateRandomResourceName("kv", 8); Vault vault = azureResourceManager.vaults().define(vaultName) .withRegion(region) .withNewResourceGroup(rgName) .withRoleBasedAccessControl() .withPurgeProtectionEnabled() .create(); azureResourceManager.accessManagement().roleAssignments().define(UUID.randomUUID().toString()) .forServicePrincipal(clientId) .withBuiltInRole(BuiltInRole.KEY_VAULT_ADMINISTRATOR) .withResourceScope(vault) .create(); ResourceManagerUtils.sleep(Duration.ofMinutes(1)); Key key = vault.keys().define("key1") .withKeyTypeToCreate(KeyType.RSA) .withKeySize(4096) .create(); DiskEncryptionSetInner diskEncryptionSet = azureResourceManager.disks().manager().serviceClient() .getDiskEncryptionSets().createOrUpdate(rgName, "des1", new DiskEncryptionSetInner() .withLocation(region.name()) .withEncryptionType(DiskEncryptionSetType.ENCRYPTION_AT_REST_WITH_PLATFORM_AND_CUSTOMER_KEYS) .withIdentity(new EncryptionSetIdentity().withType(DiskEncryptionSetIdentityType.SYSTEM_ASSIGNED)) .withActiveKey(new KeyForDiskEncryptionSet() .withSourceVault(new SourceVault().withId(vault.id())) .withKeyUrl(key.id()))); DiskEncryptionSetInner diskEncryptionSet2 = azureResourceManager.disks().manager().serviceClient() .getDiskEncryptionSets().createOrUpdate(rgName, "des2", new DiskEncryptionSetInner() .withLocation(region.name()) .withEncryptionType(DiskEncryptionSetType.ENCRYPTION_AT_REST_WITH_CUSTOMER_KEY) .withIdentity(new EncryptionSetIdentity().withType(DiskEncryptionSetIdentityType.SYSTEM_ASSIGNED)) .withActiveKey(new KeyForDiskEncryptionSet() .withSourceVault(new SourceVault().withId(vault.id())) .withKeyUrl(key.id()))); azureResourceManager.accessManagement().roleAssignments().define(UUID.randomUUID().toString()) .forObjectId(diskEncryptionSet.identity().principalId()) .withBuiltInRole(BuiltInRole.KEY_VAULT_CRYPTO_SERVICE_ENCRYPTION_USER) .withResourceScope(vault) .create(); azureResourceManager.accessManagement().roleAssignments().define(UUID.randomUUID().toString()) .forObjectId(diskEncryptionSet2.identity().principalId()) .withBuiltInRole(BuiltInRole.KEY_VAULT_CRYPTO_SERVICE_ENCRYPTION_USER) .withResourceScope(vault) .create(); ResourceManagerUtils.sleep(Duration.ofMinutes(1)); Disk disk1 = azureResourceManager.disks().define("disk1") .withRegion(region) .withExistingResourceGroup(rgName) .withData() .withSizeInGB(32) .withDiskEncryptionSet(diskEncryptionSet.id()) .create(); Assertions.assertEquals(EncryptionType.ENCRYPTION_AT_REST_WITH_PLATFORM_AND_CUSTOMER_KEYS, disk1.encryption().type()); Assertions.assertEquals(diskEncryptionSet.id().toLowerCase(Locale.ROOT), disk1.encryption().diskEncryptionSetId().toLowerCase(Locale.ROOT)); VirtualMachine vm = azureResourceManager.virtualMachines().define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/27") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(16, 0, new VirtualMachineDiskOptions() .withDeleteOptions(DeleteOptions.DETACH) .withDiskEncryptionSet(null)) .withExistingDataDisk(disk1) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .withDataDiskDefaultDiskEncryptionSet(diskEncryptionSet.id()) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withOSDiskDiskEncryptionSet(diskEncryptionSet.id()) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(diskEncryptionSet.id(), vm.osDiskDiskEncryptionSetId()); Assertions.assertNull(vm.dataDisks().get(0).diskEncryptionSetId()); Assertions.assertEquals(diskEncryptionSet.id().toLowerCase(Locale.ROOT), vm.dataDisks().get(1).diskEncryptionSetId().toLowerCase(Locale.ROOT)); Assertions.assertEquals(DeleteOptions.DETACH, vm.dataDisks().get(0).deleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm.dataDisks().get(1).deleteOptions()); Disk disk2 = azureResourceManager.disks().define("disk2") .withRegion(region) .withExistingResourceGroup(rgName) .withData() .withSizeInGB(32) .create(); Assertions.assertEquals(EncryptionType.ENCRYPTION_AT_REST_WITH_PLATFORM_KEY, disk2.encryption().type()); Assertions.assertNull(disk2.encryption().diskEncryptionSetId()); disk2.update() .withDiskEncryptionSet(diskEncryptionSet.id(), EncryptionType.ENCRYPTION_AT_REST_WITH_PLATFORM_AND_CUSTOMER_KEYS) .apply(); Assertions.assertEquals(EncryptionType.ENCRYPTION_AT_REST_WITH_PLATFORM_AND_CUSTOMER_KEYS, disk2.encryption().type()); Assertions.assertEquals(diskEncryptionSet.id().toLowerCase(Locale.ROOT), disk2.encryption().diskEncryptionSetId().toLowerCase(Locale.ROOT)); vm.update() .withoutDataDisk(0) .withoutDataDisk(1) .withExistingDataDisk(disk2, 32, 2, new VirtualMachineDiskOptions() .withDeleteOptions(DeleteOptions.DELETE)) .withNewDataDisk(16, 3, CachingTypes.NONE) .withDataDiskDefaultDeleteOptions(DeleteOptions.DETACH) .apply(); Assertions.assertEquals(diskEncryptionSet.id().toLowerCase(Locale.ROOT), vm.dataDisks().get(2).diskEncryptionSetId().toLowerCase(Locale.ROOT)); Assertions.assertNull(vm.dataDisks().get(3).diskEncryptionSetId()); Assertions.assertEquals(DeleteOptions.DELETE, vm.dataDisks().get(2).deleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm.dataDisks().get(3).deleteOptions()); vm.deallocate(); Disk disk = azureResourceManager.disks().getById(vm.dataDisks().get(3).id()); disk.update() .withDiskEncryptionSet(diskEncryptionSet.id(), EncryptionType.ENCRYPTION_AT_REST_WITH_PLATFORM_AND_CUSTOMER_KEYS) .apply(); vm.start(); vm.refresh(); Assertions.assertEquals(diskEncryptionSet.id().toLowerCase(Locale.ROOT), vm.dataDisks().get(3).diskEncryptionSetId().toLowerCase(Locale.ROOT)); vm.update() .withoutDataDisk(2) .withoutDataDisk(3) .withNewDataDisk(16, 0, new VirtualMachineDiskOptions() .withDeleteOptions(DeleteOptions.DELETE) .withDiskEncryptionSet(diskEncryptionSet.id())) .withNewDataDisk(32, 1, CachingTypes.NONE) .withDataDiskDefaultDiskEncryptionSet(diskEncryptionSet2.id()) .apply(); Assertions.assertEquals(diskEncryptionSet.id().toLowerCase(Locale.ROOT), vm.dataDisks().get(0).diskEncryptionSetId().toLowerCase(Locale.ROOT)); Assertions.assertEquals(diskEncryptionSet2.id().toLowerCase(Locale.ROOT), vm.dataDisks().get(1).diskEncryptionSetId().toLowerCase(Locale.ROOT)); Assertions.assertEquals(DeleteOptions.DELETE, vm.dataDisks().get(0).deleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm.dataDisks().get(1).deleteOptions()); disk = azureResourceManager.disks().getById(vm.dataDisks().get(0).id()); Assertions.assertEquals(EncryptionType.ENCRYPTION_AT_REST_WITH_PLATFORM_AND_CUSTOMER_KEYS, disk.encryption().type()); disk = azureResourceManager.disks().getById(vm.dataDisks().get(1).id()); Assertions.assertEquals(EncryptionType.ENCRYPTION_AT_REST_WITH_CUSTOMER_KEY, disk.encryption().type()); azureResourceManager.virtualMachines().deleteById(vm.id()); }
.withBuiltInRole(BuiltInRole.KEY_VAULT_ADMINISTRATOR)
public void canCreateVirtualMachineWithDiskEncryptionSet() { String clientId = this.clientIdFromFile(); String vaultName = generateRandomResourceName("kv", 8); Vault vault = azureResourceManager.vaults().define(vaultName) .withRegion(region) .withNewResourceGroup(rgName) .withRoleBasedAccessControl() .withPurgeProtectionEnabled() .create(); azureResourceManager.accessManagement().roleAssignments().define(UUID.randomUUID().toString()) .forServicePrincipal(clientId) .withBuiltInRole(BuiltInRole.KEY_VAULT_ADMINISTRATOR) .withResourceScope(vault) .create(); ResourceManagerUtils.sleep(Duration.ofMinutes(1)); Key key = vault.keys().define("key1") .withKeyTypeToCreate(KeyType.RSA) .withKeySize(4096) .create(); DiskEncryptionSetInner diskEncryptionSet = azureResourceManager.disks().manager().serviceClient() .getDiskEncryptionSets().createOrUpdate(rgName, "des1", new DiskEncryptionSetInner() .withLocation(region.name()) .withEncryptionType(DiskEncryptionSetType.ENCRYPTION_AT_REST_WITH_PLATFORM_AND_CUSTOMER_KEYS) .withIdentity(new EncryptionSetIdentity().withType(DiskEncryptionSetIdentityType.SYSTEM_ASSIGNED)) .withActiveKey(new KeyForDiskEncryptionSet() .withSourceVault(new SourceVault().withId(vault.id())) .withKeyUrl(key.id()))); DiskEncryptionSetInner diskEncryptionSet2 = azureResourceManager.disks().manager().serviceClient() .getDiskEncryptionSets().createOrUpdate(rgName, "des2", new DiskEncryptionSetInner() .withLocation(region.name()) .withEncryptionType(DiskEncryptionSetType.ENCRYPTION_AT_REST_WITH_CUSTOMER_KEY) .withIdentity(new EncryptionSetIdentity().withType(DiskEncryptionSetIdentityType.SYSTEM_ASSIGNED)) .withActiveKey(new KeyForDiskEncryptionSet() .withSourceVault(new SourceVault().withId(vault.id())) .withKeyUrl(key.id()))); azureResourceManager.accessManagement().roleAssignments().define(UUID.randomUUID().toString()) .forObjectId(diskEncryptionSet.identity().principalId()) .withBuiltInRole(BuiltInRole.KEY_VAULT_CRYPTO_SERVICE_ENCRYPTION_USER) .withResourceScope(vault) .create(); azureResourceManager.accessManagement().roleAssignments().define(UUID.randomUUID().toString()) .forObjectId(diskEncryptionSet2.identity().principalId()) .withBuiltInRole(BuiltInRole.KEY_VAULT_CRYPTO_SERVICE_ENCRYPTION_USER) .withResourceScope(vault) .create(); ResourceManagerUtils.sleep(Duration.ofMinutes(1)); Disk disk1 = azureResourceManager.disks().define("disk1") .withRegion(region) .withExistingResourceGroup(rgName) .withData() .withSizeInGB(32) .withDiskEncryptionSet(diskEncryptionSet.id()) .create(); Assertions.assertEquals(EncryptionType.ENCRYPTION_AT_REST_WITH_PLATFORM_AND_CUSTOMER_KEYS, disk1.encryption().type()); Assertions.assertEquals(diskEncryptionSet.id().toLowerCase(Locale.ROOT), disk1.encryption().diskEncryptionSetId().toLowerCase(Locale.ROOT)); VirtualMachine vm = azureResourceManager.virtualMachines().define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/27") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(16, 0, new VirtualMachineDiskOptions() .withDeleteOptions(DeleteOptions.DETACH) .withDiskEncryptionSet(null)) .withExistingDataDisk(disk1) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .withDataDiskDefaultDiskEncryptionSet(diskEncryptionSet.id()) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withOSDiskDiskEncryptionSet(diskEncryptionSet.id()) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(diskEncryptionSet.id(), vm.osDiskDiskEncryptionSetId()); Assertions.assertNull(vm.dataDisks().get(0).diskEncryptionSetId()); Assertions.assertEquals(diskEncryptionSet.id().toLowerCase(Locale.ROOT), vm.dataDisks().get(1).diskEncryptionSetId().toLowerCase(Locale.ROOT)); Assertions.assertEquals(DeleteOptions.DETACH, vm.dataDisks().get(0).deleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm.dataDisks().get(1).deleteOptions()); Disk disk2 = azureResourceManager.disks().define("disk2") .withRegion(region) .withExistingResourceGroup(rgName) .withData() .withSizeInGB(32) .create(); Assertions.assertEquals(EncryptionType.ENCRYPTION_AT_REST_WITH_PLATFORM_KEY, disk2.encryption().type()); Assertions.assertNull(disk2.encryption().diskEncryptionSetId()); disk2.update() .withDiskEncryptionSet(diskEncryptionSet.id(), EncryptionType.ENCRYPTION_AT_REST_WITH_PLATFORM_AND_CUSTOMER_KEYS) .apply(); Assertions.assertEquals(EncryptionType.ENCRYPTION_AT_REST_WITH_PLATFORM_AND_CUSTOMER_KEYS, disk2.encryption().type()); Assertions.assertEquals(diskEncryptionSet.id().toLowerCase(Locale.ROOT), disk2.encryption().diskEncryptionSetId().toLowerCase(Locale.ROOT)); vm.update() .withoutDataDisk(0) .withoutDataDisk(1) .withExistingDataDisk(disk2, 32, 2, new VirtualMachineDiskOptions() .withDeleteOptions(DeleteOptions.DELETE)) .withNewDataDisk(16, 3, CachingTypes.NONE) .withDataDiskDefaultDeleteOptions(DeleteOptions.DETACH) .apply(); Assertions.assertEquals(diskEncryptionSet.id().toLowerCase(Locale.ROOT), vm.dataDisks().get(2).diskEncryptionSetId().toLowerCase(Locale.ROOT)); Assertions.assertNull(vm.dataDisks().get(3).diskEncryptionSetId()); Assertions.assertEquals(DeleteOptions.DELETE, vm.dataDisks().get(2).deleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm.dataDisks().get(3).deleteOptions()); vm.deallocate(); Disk disk = azureResourceManager.disks().getById(vm.dataDisks().get(3).id()); disk.update() .withDiskEncryptionSet(diskEncryptionSet.id(), EncryptionType.ENCRYPTION_AT_REST_WITH_PLATFORM_AND_CUSTOMER_KEYS) .apply(); vm.start(); vm.refresh(); Assertions.assertEquals(diskEncryptionSet.id().toLowerCase(Locale.ROOT), vm.dataDisks().get(3).diskEncryptionSetId().toLowerCase(Locale.ROOT)); vm.update() .withoutDataDisk(2) .withoutDataDisk(3) .withNewDataDisk(16, 0, new VirtualMachineDiskOptions() .withDeleteOptions(DeleteOptions.DELETE) .withDiskEncryptionSet(diskEncryptionSet.id())) .withNewDataDisk(32, 1, CachingTypes.NONE) .withDataDiskDefaultDiskEncryptionSet(diskEncryptionSet2.id()) .apply(); Assertions.assertEquals(diskEncryptionSet.id().toLowerCase(Locale.ROOT), vm.dataDisks().get(0).diskEncryptionSetId().toLowerCase(Locale.ROOT)); Assertions.assertEquals(diskEncryptionSet2.id().toLowerCase(Locale.ROOT), vm.dataDisks().get(1).diskEncryptionSetId().toLowerCase(Locale.ROOT)); Assertions.assertEquals(DeleteOptions.DELETE, vm.dataDisks().get(0).deleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm.dataDisks().get(1).deleteOptions()); disk = azureResourceManager.disks().getById(vm.dataDisks().get(0).id()); Assertions.assertEquals(EncryptionType.ENCRYPTION_AT_REST_WITH_PLATFORM_AND_CUSTOMER_KEYS, disk.encryption().type()); disk = azureResourceManager.disks().getById(vm.dataDisks().get(1).id()); Assertions.assertEquals(EncryptionType.ENCRYPTION_AT_REST_WITH_CUSTOMER_KEY, disk.encryption().type()); azureResourceManager.virtualMachines().deleteById(vm.id()); }
class VirtualMachineEncryptionTests extends ResourceManagerTestBase { private AzureResourceManager azureResourceManager; private String rgName = ""; private final String vmName = "javavm"; private final Region region = Region.US_EAST; @Override protected HttpPipeline buildHttpPipeline( TokenCredential credential, AzureProfile profile, HttpLogOptions httpLogOptions, List<HttpPipelinePolicy> policies, HttpClient httpClient) { return HttpPipelineProvider.buildHttpPipeline( credential, profile, null, httpLogOptions, null, new RetryPolicy("Retry-After", ChronoUnit.SECONDS), policies, httpClient); } @Override protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) { ResourceManagerUtils.InternalRuntimeContext.setDelayProvider(new TestDelayProvider(!isPlaybackMode())); ResourceManagerUtils.InternalRuntimeContext internalContext = new ResourceManagerUtils.InternalRuntimeContext(); internalContext.setIdentifierFunction(name -> new TestIdentifierProvider(testResourceNamer)); azureResourceManager = buildManager(AzureResourceManager.class, httpPipeline, profile); setInternalContext(internalContext, azureResourceManager); rgName = generateRandomResourceName("javacsmrg", 15); } @Override protected void cleanUpResources() { try { azureResourceManager.resourceGroups().beginDeleteByName(rgName); } catch (Exception e) { } } @Test @DoNotRecord(skipInPlayback = true) }
class VirtualMachineEncryptionTests extends ResourceManagerTestBase { private AzureResourceManager azureResourceManager; private String rgName = ""; private final String vmName = "javavm"; private final Region region = Region.US_EAST; @Override protected HttpPipeline buildHttpPipeline( TokenCredential credential, AzureProfile profile, HttpLogOptions httpLogOptions, List<HttpPipelinePolicy> policies, HttpClient httpClient) { return HttpPipelineProvider.buildHttpPipeline( credential, profile, null, httpLogOptions, null, new RetryPolicy("Retry-After", ChronoUnit.SECONDS), policies, httpClient); } @Override protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) { ResourceManagerUtils.InternalRuntimeContext.setDelayProvider(new TestDelayProvider(!isPlaybackMode())); ResourceManagerUtils.InternalRuntimeContext internalContext = new ResourceManagerUtils.InternalRuntimeContext(); internalContext.setIdentifierFunction(name -> new TestIdentifierProvider(testResourceNamer)); azureResourceManager = buildManager(AzureResourceManager.class, httpPipeline, profile); setInternalContext(internalContext, azureResourceManager); rgName = generateRandomResourceName("javacsmrg", 15); } @Override protected void cleanUpResources() { try { azureResourceManager.resourceGroups().beginDeleteByName(rgName); } catch (Exception e) { } } @Test @DoNotRecord(skipInPlayback = true) }
nit: Since this is a really simple `String.format` we could drop that and just use string concatenation and save on the performance hit of `String.format`
static List<FormSelectionMark> getReadResultFormSelectionMarks(ReadResult readResultItem, int pageNumber) { return readResultItem.getSelectionMarks().stream() .map(selectionMark -> { final FormSelectionMark formSelectionMark = new FormSelectionMark( null, toBoundingBox(selectionMark.getBoundingBox()), pageNumber); final com.azure.ai.formrecognizer.implementation.models.SelectionMarkState selectionMarkStateImpl = selectionMark.getState(); com.azure.ai.formrecognizer.models.SelectionMarkState selectionMarkState; if (SelectionMarkState.SELECTED.toString().equals(selectionMarkStateImpl.toString())) { selectionMarkState = com.azure.ai.formrecognizer.models.SelectionMarkState.SELECTED; } else if (SelectionMarkState.UNSELECTED.toString().equals(selectionMarkStateImpl.toString())) { selectionMarkState = com.azure.ai.formrecognizer.models.SelectionMarkState.UNSELECTED; } else { throw LOGGER.logThrowableAsError(new RuntimeException( String.format("%s, unsupported selection mark state.", selectionMarkStateImpl))); } FormSelectionMarkHelper.setConfidence(formSelectionMark, selectionMark.getConfidence()); FormSelectionMarkHelper.setState(formSelectionMark, selectionMarkState); return formSelectionMark; }) .collect(Collectors.toList()); }
String.format("%s, unsupported selection mark state.", selectionMarkStateImpl)));
static List<FormSelectionMark> getReadResultFormSelectionMarks(ReadResult readResultItem, int pageNumber) { return readResultItem.getSelectionMarks().stream() .map(selectionMark -> { final FormSelectionMark formSelectionMark = new FormSelectionMark( null, toBoundingBox(selectionMark.getBoundingBox()), pageNumber); final com.azure.ai.formrecognizer.implementation.models.SelectionMarkState selectionMarkStateImpl = selectionMark.getState(); com.azure.ai.formrecognizer.models.SelectionMarkState selectionMarkState; if (SelectionMarkState.SELECTED.toString().equals(selectionMarkStateImpl.toString())) { selectionMarkState = com.azure.ai.formrecognizer.models.SelectionMarkState.SELECTED; } else if (SelectionMarkState.UNSELECTED.toString().equals(selectionMarkStateImpl.toString())) { selectionMarkState = com.azure.ai.formrecognizer.models.SelectionMarkState.UNSELECTED; } else { throw LOGGER.logThrowableAsError(new RuntimeException( selectionMarkStateImpl + ", unsupported selection mark state.")); } FormSelectionMarkHelper.setConfidence(formSelectionMark, selectionMark.getConfidence()); FormSelectionMarkHelper.setState(formSelectionMark, selectionMarkState); return formSelectionMark; }) .collect(Collectors.toList()); }
class Transforms { private static final ClientLogger LOGGER = new ClientLogger(Transforms.class); private static final String WORD_REGEX = "/readResults/(\\d+)/lines/(\\d+)/words/(\\d+)"; private static final String LINE_REGEX = "/readResults/(\\d+)/lines/(\\d+)"; private static final String SELECTION_MARK_REGEX = "/readResults/(\\d+)/selectionMarks/(\\d+)"; private static final float DEFAULT_CONFIDENCE_VALUE = 1.0f; private static final int DEFAULT_TABLE_SPAN = 1; private Transforms() { } /** * Helper method to transform the service returned {@link AnalyzeResult} to SDK model {@link RecognizedForm}. * * @param analyzeResult The service returned result for analyze custom forms. * @param includeFieldElements Boolean to indicate if to set reference elements data on fields. * @param modelId the unlabeled model Id used for recognition. * @return The List of {@code RecognizedForm}. */ static List<RecognizedForm> toRecognizedForm(AnalyzeResult analyzeResult, boolean includeFieldElements, String modelId) { List<ReadResult> readResults = analyzeResult.getReadResults(); List<DocumentResult> documentResults = analyzeResult.getDocumentResults(); List<PageResult> pageResults = analyzeResult.getPageResults(); List<RecognizedForm> extractedFormList; List<FormPage> formPages = toRecognizedLayout(analyzeResult, includeFieldElements); if (!CoreUtils.isNullOrEmpty(documentResults)) { extractedFormList = new ArrayList<>(); for (DocumentResult documentResultItem : documentResults) { FormPageRange formPageRange; List<Integer> documentPageRange = documentResultItem.getPageRange(); if (documentPageRange.size() == 2) { formPageRange = new FormPageRange(documentPageRange.get(0), documentPageRange.get(1)); } else { formPageRange = new FormPageRange(1, 1); } Map<String, FormField> extractedFieldMap = getLabeledFieldMap(documentResultItem, readResults); final RecognizedForm recognizedForm = new RecognizedForm( extractedFieldMap, documentResultItem.getDocType(), formPageRange, formPages.subList(formPageRange.getFirstPageNumber() - 1, formPageRange.getLastPageNumber())); RecognizedFormHelper.setFormTypeConfidence(recognizedForm, documentResultItem.getDocTypeConfidence()); if (documentResultItem.getModelId() != null) { RecognizedFormHelper.setModelId(recognizedForm, documentResultItem.getModelId().toString()); } extractedFormList.add(recognizedForm); } } else { extractedFormList = new ArrayList<>(); if (!CoreUtils.isNullOrEmpty(pageResults)) { forEachWithIndex(pageResults, ((index, pageResultItem) -> { StringBuilder formType = new StringBuilder("form-"); int pageNumber = pageResultItem.getPage(); Integer clusterId = pageResultItem.getClusterId(); if (clusterId != null) { formType.append(clusterId); } Map<String, FormField> extractedFieldMap = getUnlabeledFieldMap(includeFieldElements, readResults, pageResultItem, pageNumber); final RecognizedForm recognizedForm = new RecognizedForm( extractedFieldMap, formType.toString(), new FormPageRange(pageNumber, pageNumber), Collections.singletonList(formPages.get(index))); RecognizedFormHelper.setModelId(recognizedForm, modelId); extractedFormList.add(recognizedForm); })); } } return extractedFormList; } /** * Helper method to transform the service returned {@link AnalyzeResult} to SDK model {@link FormPage}. * * @param analyzeResult The service returned result for analyze layouts. * @param includeFieldElements Boolean to indicate if to set reference elements data on fields. * @return The List of {@code FormPage}. */ static List<FormPage> toRecognizedLayout(AnalyzeResult analyzeResult, boolean includeFieldElements) { List<ReadResult> readResults = analyzeResult.getReadResults(); List<PageResult> pageResults = analyzeResult.getPageResults(); List<FormPage> formPages = new ArrayList<>(); boolean pageResultsIsNullOrEmpty = CoreUtils.isNullOrEmpty(pageResults); forEachWithIndex(readResults, ((index, readResultItem) -> { List<FormTable> perPageTableList = new ArrayList<>(); if (!pageResultsIsNullOrEmpty) { PageResult pageResultItem = pageResults.get(index); if (pageResultItem != null) { perPageTableList = getPageTables(pageResultItem, readResults, pageResultItem.getPage()); } } List<FormLine> perPageFormLineList = new ArrayList<>(); if (includeFieldElements && !CoreUtils.isNullOrEmpty(readResultItem.getLines())) { perPageFormLineList = getReadResultFormLines(readResultItem); } List<FormSelectionMark> perPageFormSelectionMarkList = new ArrayList<>(); if (includeFieldElements && !CoreUtils.isNullOrEmpty(readResultItem.getSelectionMarks())) { PageResult pageResultItem = pageResults.get(index); perPageFormSelectionMarkList = getReadResultFormSelectionMarks(readResultItem, pageResultItem.getPage()); } formPages.add(getFormPage(readResultItem, perPageTableList, perPageFormLineList, perPageFormSelectionMarkList)); })); return formPages; } /** * Helper method to convert the per page {@link ReadResult} item to {@link FormSelectionMark}. * * @param readResultItem The per page text extraction item result returned by the service. * @param pageNumber The page number. * @return A list of {@code FormSelectionMark}. */ /** * Helper method to get per-page table information. * * @param pageResultItem The extracted page level information returned by the service. * @param readResults The text extraction result returned by the service. * @param pageNumber The 1 based page number on which these fields exist. * @return The list of per page {@code FormTable}. */ static List<FormTable> getPageTables(PageResult pageResultItem, List<ReadResult> readResults, int pageNumber) { if (pageResultItem.getTables() == null) { return new ArrayList<>(); } else { return pageResultItem.getTables().stream() .map(dataTable -> { FormTable formTable = new FormTable(dataTable.getRows(), dataTable.getColumns(), dataTable.getCells() .stream() .map(dataTableCell -> new FormTableCell( dataTableCell.getRowIndex(), dataTableCell.getColumnIndex(), dataTableCell.getRowSpan() == null ? DEFAULT_TABLE_SPAN : dataTableCell.getRowSpan(), dataTableCell.getColumnSpan() == null ? DEFAULT_TABLE_SPAN : dataTableCell.getColumnSpan(), dataTableCell.getText(), toBoundingBox(dataTableCell.getBoundingBox()), dataTableCell.getConfidence(), dataTableCell.isHeader() != null && dataTableCell.isHeader(), dataTableCell.isFooter() != null && dataTableCell.isFooter(), pageNumber, setReferenceElements(dataTableCell.getElements(), readResults))) .collect(Collectors.toList()), pageNumber); FormTableHelper.setBoundingBox(formTable, toBoundingBox(dataTable.getBoundingBox())); return formTable; }) .collect(Collectors.toList()); } } /** * Helper method to convert the per page {@link ReadResult} item to {@link FormLine}. * * @param readResultItem The per page text extraction item result returned by the service. * @return The list of {@code FormLine}. */ static List<FormLine> getReadResultFormLines(ReadResult readResultItem) { return readResultItem.getLines().stream() .map(textLine -> { FormLine formLine = new FormLine( textLine.getText(), toBoundingBox(textLine.getBoundingBox()), readResultItem.getPage(), toWords(textLine.getWords(), readResultItem.getPage())); FormLineHelper.setAppearance(formLine, getTextAppearance(textLine)); return formLine; }) .collect(Collectors.toList()); } /** * Private method to get the appearance from the service side text line object. * * @param textLine The service side text line object. * @return the custom type TextAppearance model. */ private static TextAppearance getTextAppearance(TextLine textLine) { TextAppearance textAppearance = new TextAppearance(); if (textLine.getAppearance() != null && textLine.getAppearance().getStyle() != null) { if (textLine.getAppearance().getStyle().getName() != null) { TextAppearanceHelper.setStyleName(textAppearance, TextStyleName.fromString(textLine.getAppearance().getStyle().getName().toString())); } TextAppearanceHelper.setStyleConfidence(textAppearance, textLine.getAppearance().getStyle().getConfidence()); } else { return null; } return textAppearance; } /** * The field map returned on analyze with an unlabeled model id. * * @param documentResultItem The extracted document level information. * @param readResults The text extraction result returned by the service. * @return The {@link RecognizedForm */ private static Map<String, FormField> getLabeledFieldMap(DocumentResult documentResultItem, List<ReadResult> readResults) { Map<String, FormField> recognizedFieldMap = new LinkedHashMap<>(); if (!CoreUtils.isNullOrEmpty(documentResultItem.getFields())) { documentResultItem.getFields().forEach((key, fieldValue) -> { if (fieldValue != null) { List<FormElement> formElementList = setReferenceElements(fieldValue.getElements(), readResults); FieldData valueData; if (fieldValue.getPage() == null && CoreUtils.isNullOrEmpty(fieldValue.getBoundingBox())) { valueData = null; } else { valueData = new FieldData(fieldValue.getText(), toBoundingBox(fieldValue.getBoundingBox()), fieldValue.getPage(), formElementList); } recognizedFieldMap.put(key, setFormField(key, valueData, fieldValue, readResults)); } else { recognizedFieldMap.put(key, new FormField(key, null, null, null, DEFAULT_CONFIDENCE_VALUE)); } }); } return recognizedFieldMap; } /** * Helper method that converts the incoming service field value to one of the strongly typed SDK level * {@link FormField} with reference elements set when {@code includeFieldElements} is set to true. * * @param name The name of the field. * @param valueData The value text of the field. * @param fieldValue The named field values returned by the service. * @param readResults The text extraction result returned by the service. * @return The strongly typed {@link FormField} for the field input. */ private static FormField setFormField(String name, FieldData valueData, FieldValue fieldValue, List<ReadResult> readResults) { com.azure.ai.formrecognizer.models.FieldValue value; switch (fieldValue.getType()) { case PHONE_NUMBER: value = new com.azure.ai.formrecognizer.models.FieldValue(fieldValue.getValuePhoneNumber(), FieldValueType.PHONE_NUMBER); break; case STRING: value = new com.azure.ai.formrecognizer.models.FieldValue(fieldValue.getValueString(), FieldValueType.STRING); break; case TIME: if (fieldValue.getValueTime() != null) { LocalTime fieldTime = LocalTime.parse(fieldValue.getValueTime(), DateTimeFormatter.ofPattern("HH:mm:ss")); value = new com.azure.ai.formrecognizer.models.FieldValue(fieldTime, FieldValueType.TIME); } else { value = new com.azure.ai.formrecognizer.models.FieldValue(null, FieldValueType.TIME); } break; case DATE: value = new com.azure.ai.formrecognizer.models.FieldValue(fieldValue.getValueDate(), FieldValueType.DATE); break; case INTEGER: if (fieldValue.getValueInteger() != null) { value = new com.azure.ai.formrecognizer.models.FieldValue(fieldValue.getValueInteger().longValue(), FieldValueType.LONG); } else { value = new com.azure.ai.formrecognizer.models.FieldValue(null, FieldValueType.LONG); } break; case NUMBER: value = new com.azure.ai.formrecognizer.models.FieldValue(fieldValue.getValueNumber(), FieldValueType.FLOAT); break; case ARRAY: if (fieldValue.getValueArray() != null) { value = new com.azure.ai.formrecognizer.models.FieldValue( toFieldValueArray(fieldValue.getValueArray(), readResults), FieldValueType.LIST); } else { value = new com.azure.ai.formrecognizer.models.FieldValue(null, FieldValueType.LIST); } break; case OBJECT: if (fieldValue.getValueObject() != null) { value = new com.azure.ai.formrecognizer.models.FieldValue( toFieldValueObject(fieldValue.getValueObject(), readResults), FieldValueType.MAP); } else { value = new com.azure.ai.formrecognizer.models.FieldValue(null, FieldValueType.MAP); } break; case SELECTION_MARK: if (fieldValue.getValueSelectionMark() != null) { com.azure.ai.formrecognizer.models.SelectionMarkState selectionMarkState; final FieldValueSelectionMark fieldValueSelectionMarkState = fieldValue.getValueSelectionMark(); if (FieldValueSelectionMark.SELECTED.equals(fieldValueSelectionMarkState)) { selectionMarkState = com.azure.ai.formrecognizer.models.SelectionMarkState.SELECTED; } else if (FieldValueSelectionMark.UNSELECTED.equals(fieldValueSelectionMarkState)) { selectionMarkState = com.azure.ai.formrecognizer.models.SelectionMarkState.UNSELECTED; } else { selectionMarkState = com.azure.ai.formrecognizer.models.SelectionMarkState.fromString( fieldValue.getValueSelectionMark().toString()); } value = new com.azure.ai.formrecognizer.models.FieldValue(selectionMarkState, FieldValueType.SELECTION_MARK_STATE); } else { value = new com.azure.ai.formrecognizer.models.FieldValue(null, FieldValueType.SELECTION_MARK_STATE); } break; case COUNTRY_REGION: value = new com.azure.ai.formrecognizer.models.FieldValue(fieldValue.getValueCountryRegion(), FieldValueType.COUNTRY_REGION); break; default: throw LOGGER.logExceptionAsError(new RuntimeException("FieldValue Type not supported")); } return new FormField(name, null, valueData, value, setDefaultConfidenceValue(fieldValue.getConfidence())); } /** * Helper method to set default confidence value if confidence returned by service is null. * * @param confidence the confidence returned by service. * @return the field confidence value. */ private static float setDefaultConfidenceValue(Float confidence) { return confidence == null ? DEFAULT_CONFIDENCE_VALUE : confidence; } /** * Helper method to convert the service returned * {@link com.azure.ai.formrecognizer.implementation.models.FieldValue * to a SDK level map of {@link FormField}. * * @param valueObject The array of field values returned by the service in {@link FieldValue * @return The Map of {@link FormField}. */ private static Map<String, FormField> toFieldValueObject(Map<String, FieldValue> valueObject, List<ReadResult> readResults) { Map<String, FormField> fieldValueObjectMap = new TreeMap<>(); valueObject.forEach((key, fieldValue) -> { FieldData valueData = null; if (fieldValue.getPage() != null && fieldValue.getBoundingBox() != null) { valueData = new FieldData(fieldValue.getText(), toBoundingBox(fieldValue.getBoundingBox()), fieldValue.getPage(), setReferenceElements(fieldValue.getElements(), readResults)); } fieldValueObjectMap.put(key, setFormField(key, valueData, fieldValue, readResults)); }); return fieldValueObjectMap; } /** * Helper method to convert the service returned * {@link com.azure.ai.formrecognizer.implementation.models.FieldValue * to a SDK level List of {@link FormField}. * * @param valueArray The array of field values returned by the service in {@link FieldValue * @param readResults The text extraction result returned by the service. * @return The List of {@link FormField}. */ private static List<FormField> toFieldValueArray(List<FieldValue> valueArray, List<ReadResult> readResults) { return valueArray.stream() .map(fieldValue -> { FieldData valueData = null; if (ARRAY != fieldValue.getType() && (fieldValue.getPage() != null && fieldValue.getBoundingBox() != null)) { valueData = new FieldData(fieldValue.getText(), toBoundingBox(fieldValue.getBoundingBox()), fieldValue.getPage(), setReferenceElements(fieldValue.getElements(), readResults)); } return setFormField(null, valueData, fieldValue, readResults); }) .collect(Collectors.toList()); } /** * Helper method to convert the page results to {@code FormPage form pages}. * * @param readResultItem The per page text extraction item result returned by the service. * @param perPageTableList The per page tables list. * @param perPageLineList The per page form lines. * @param perPageSelectionMarkList The per page selection marks. * @return The per page {@code FormPage}. */ private static FormPage getFormPage(ReadResult readResultItem, List<FormTable> perPageTableList, List<FormLine> perPageLineList, List<FormSelectionMark> perPageSelectionMarkList) { FormPage formPage = new FormPage( readResultItem.getHeight(), readResultItem.getAngle(), LengthUnit.fromString(readResultItem.getUnit().toString()), readResultItem.getWidth(), perPageLineList, perPageTableList, readResultItem.getPage()); FormPageHelper.setSelectionMarks(formPage, perPageSelectionMarkList); return formPage; } /** * Helper method to set the {@link RecognizedForm * service. * * @param includeFieldElements Boolean to indicate if to set reference elements data on fields. * @param readResults The text extraction result returned by the service. * @param pageResultItem The extracted page level information returned by the service. * @param pageNumber The 1 based page number on which these fields exist. * @return The fields populated on {@link RecognizedForm */ private static Map<String, FormField> getUnlabeledFieldMap(boolean includeFieldElements, List<ReadResult> readResults, PageResult pageResultItem, int pageNumber) { Map<String, FormField> formFieldMap = new LinkedHashMap<>(); List<KeyValuePair> keyValuePairs = pageResultItem.getKeyValuePairs(); forEachWithIndex(keyValuePairs, ((index, keyValuePair) -> { List<FormElement> formKeyContentList = new ArrayList<>(); List<FormElement> formValueContentList = new ArrayList<>(); if (includeFieldElements) { formKeyContentList = setReferenceElements(keyValuePair.getKey().getElements(), readResults); formValueContentList = setReferenceElements(keyValuePair.getValue().getElements(), readResults ); } FieldData labelData = new FieldData(keyValuePair.getKey().getText(), toBoundingBox(keyValuePair.getKey().getBoundingBox()), pageNumber, formKeyContentList); FieldData valueData = new FieldData(keyValuePair.getValue().getText(), toBoundingBox(keyValuePair.getValue().getBoundingBox()), pageNumber, formValueContentList); String fieldName = "field-" + index; FormField formField = new FormField(fieldName, labelData, valueData, new com.azure.ai.formrecognizer.models.FieldValue(keyValuePair.getValue().getText(), FieldValueType.STRING), setDefaultConfidenceValue(keyValuePair.getConfidence()) ); formFieldMap.put(fieldName, formField); })); return formFieldMap; } /** * Helper method to set the text reference elements on FieldValue/fields when {@code includeFieldElements} set to * true. * * @return The list if referenced elements. */ private static List<FormElement> setReferenceElements(List<String> elements, List<ReadResult> readResults) { if (CoreUtils.isNullOrEmpty(elements)) { return new ArrayList<>(); } List<FormElement> formElementList = new ArrayList<>(); elements.forEach(elementString -> { Matcher wordMatcher = Pattern.compile(WORD_REGEX).matcher(elementString); Matcher lineMatcher = Pattern.compile(LINE_REGEX).matcher(elementString); Matcher selectionMarkMatcher = Pattern.compile(SELECTION_MARK_REGEX).matcher(elementString); if (wordMatcher.find() && wordMatcher.groupCount() == 3) { int pageIndex = Integer.parseInt(wordMatcher.group(1)); int lineIndex = Integer.parseInt(wordMatcher.group(2)); int wordIndex = Integer.parseInt(wordMatcher.group(3)); TextWord textWord = readResults.get(pageIndex).getLines().get(lineIndex).getWords().get(wordIndex); FormWord wordElement = new FormWord(textWord.getText(), toBoundingBox(textWord.getBoundingBox()), pageIndex + 1, setDefaultConfidenceValue(textWord.getConfidence())); formElementList.add(wordElement); } else if (lineMatcher.find() && lineMatcher.groupCount() == 2) { int pageIndex = Integer.parseInt(lineMatcher.group(1)); int lineIndex = Integer.parseInt(lineMatcher.group(2)); TextLine textLine = readResults.get(pageIndex).getLines().get(lineIndex); FormLine lineElement = new FormLine(textLine.getText(), toBoundingBox(textLine.getBoundingBox()), pageIndex + 1, toWords(textLine.getWords(), pageIndex + 1)); FormLineHelper.setAppearance(lineElement, getTextAppearance(textLine)); formElementList.add(lineElement); } else if (selectionMarkMatcher.find() && selectionMarkMatcher.groupCount() == 2) { int pageIndex = Integer.parseInt(selectionMarkMatcher.group(1)); int selectionMarkIndex = Integer.parseInt(selectionMarkMatcher.group(2)); SelectionMark selectionMark = readResults.get(pageIndex).getSelectionMarks().get(selectionMarkIndex); FormSelectionMark selectionMarkElement = new FormSelectionMark(null, toBoundingBox(selectionMark.getBoundingBox()), pageIndex + 1); FormSelectionMarkHelper.setState(selectionMarkElement, SelectionMarkState.fromString(selectionMark.getState().toString())); formElementList.add(selectionMarkElement); } else { throw LOGGER.logExceptionAsError(new RuntimeException("Cannot find corresponding reference elements " + "for the field value.")); } }); return formElementList; } /** * Helper method to convert the service level {@link TextWord} to list of SDK level model {@link FormWord}. * * @param words A list of word reference elements returned by the service. * @param pageNumber The 1 based page number on which this word element exists. * @return The list of {@code FormWord words}. */ private static List<FormWord> toWords(List<TextWord> words, int pageNumber) { return words.stream() .map(textWord -> new FormWord( textWord.getText(), toBoundingBox(textWord.getBoundingBox()), pageNumber, setDefaultConfidenceValue(textWord.getConfidence())) ).collect(Collectors.toList()); } /** * Helper method to convert the service level modeled eight numbers representing the four points to SDK level * {@link FieldBoundingBox}. * * @param serviceBoundingBox A list of eight numbers representing the four points of a box. * @return A {@link FieldBoundingBox}. */ private static FieldBoundingBox toBoundingBox(List<Float> serviceBoundingBox) { if (CoreUtils.isNullOrEmpty(serviceBoundingBox) || (serviceBoundingBox.size() % 2) != 0) { return null; } List<Point> pointList = new ArrayList<>(); for (int i = 0; i < serviceBoundingBox.size(); i++) { pointList.add(new Point(serviceBoundingBox.get(i), serviceBoundingBox.get(++i))); } return new FieldBoundingBox(pointList); } }
class Transforms { private static final ClientLogger LOGGER = new ClientLogger(Transforms.class); private static final String WORD_REGEX = "/readResults/(\\d+)/lines/(\\d+)/words/(\\d+)"; private static final String LINE_REGEX = "/readResults/(\\d+)/lines/(\\d+)"; private static final String SELECTION_MARK_REGEX = "/readResults/(\\d+)/selectionMarks/(\\d+)"; private static final Pattern WORD_PATTERN = Pattern.compile(WORD_REGEX); private static final Pattern LINE_PATTERN = Pattern.compile(LINE_REGEX); private static final Pattern SELECTION_MARK_PATTERN = Pattern.compile(SELECTION_MARK_REGEX); private static final float DEFAULT_CONFIDENCE_VALUE = 1.0f; private static final int DEFAULT_TABLE_SPAN = 1; private Transforms() { } /** * Helper method to transform the service returned {@link AnalyzeResult} to SDK model {@link RecognizedForm}. * * @param analyzeResult The service returned result for analyze custom forms. * @param includeFieldElements Boolean to indicate if to set reference elements data on fields. * @param modelId the unlabeled model Id used for recognition. * @return The List of {@code RecognizedForm}. */ static List<RecognizedForm> toRecognizedForm(AnalyzeResult analyzeResult, boolean includeFieldElements, String modelId) { List<ReadResult> readResults = analyzeResult.getReadResults(); List<DocumentResult> documentResults = analyzeResult.getDocumentResults(); List<PageResult> pageResults = analyzeResult.getPageResults(); List<RecognizedForm> extractedFormList; List<FormPage> formPages = toRecognizedLayout(analyzeResult, includeFieldElements); if (!CoreUtils.isNullOrEmpty(documentResults)) { extractedFormList = new ArrayList<>(); for (DocumentResult documentResultItem : documentResults) { FormPageRange formPageRange; List<Integer> documentPageRange = documentResultItem.getPageRange(); if (documentPageRange.size() == 2) { formPageRange = new FormPageRange(documentPageRange.get(0), documentPageRange.get(1)); } else { formPageRange = new FormPageRange(1, 1); } Map<String, FormField> extractedFieldMap = getLabeledFieldMap(documentResultItem, readResults); final RecognizedForm recognizedForm = new RecognizedForm( extractedFieldMap, documentResultItem.getDocType(), formPageRange, formPages.subList(formPageRange.getFirstPageNumber() - 1, formPageRange.getLastPageNumber())); RecognizedFormHelper.setFormTypeConfidence(recognizedForm, documentResultItem.getDocTypeConfidence()); if (documentResultItem.getModelId() != null) { RecognizedFormHelper.setModelId(recognizedForm, documentResultItem.getModelId().toString()); } extractedFormList.add(recognizedForm); } } else { extractedFormList = new ArrayList<>(); if (!CoreUtils.isNullOrEmpty(pageResults)) { forEachWithIndex(pageResults, ((index, pageResultItem) -> { StringBuilder formType = new StringBuilder("form-"); int pageNumber = pageResultItem.getPage(); Integer clusterId = pageResultItem.getClusterId(); if (clusterId != null) { formType.append(clusterId); } Map<String, FormField> extractedFieldMap = getUnlabeledFieldMap(includeFieldElements, readResults, pageResultItem, pageNumber); final RecognizedForm recognizedForm = new RecognizedForm( extractedFieldMap, formType.toString(), new FormPageRange(pageNumber, pageNumber), Collections.singletonList(formPages.get(index))); RecognizedFormHelper.setModelId(recognizedForm, modelId); extractedFormList.add(recognizedForm); })); } } return extractedFormList; } /** * Helper method to transform the service returned {@link AnalyzeResult} to SDK model {@link FormPage}. * * @param analyzeResult The service returned result for analyze layouts. * @param includeFieldElements Boolean to indicate if to set reference elements data on fields. * @return The List of {@code FormPage}. */ static List<FormPage> toRecognizedLayout(AnalyzeResult analyzeResult, boolean includeFieldElements) { List<ReadResult> readResults = analyzeResult.getReadResults(); List<PageResult> pageResults = analyzeResult.getPageResults(); List<FormPage> formPages = new ArrayList<>(); boolean pageResultsIsNullOrEmpty = CoreUtils.isNullOrEmpty(pageResults); forEachWithIndex(readResults, ((index, readResultItem) -> { List<FormTable> perPageTableList = new ArrayList<>(); if (!pageResultsIsNullOrEmpty) { PageResult pageResultItem = pageResults.get(index); if (pageResultItem != null) { perPageTableList = getPageTables(pageResultItem, readResults, pageResultItem.getPage()); } } List<FormLine> perPageFormLineList = new ArrayList<>(); if (includeFieldElements && !CoreUtils.isNullOrEmpty(readResultItem.getLines())) { perPageFormLineList = getReadResultFormLines(readResultItem); } List<FormSelectionMark> perPageFormSelectionMarkList = new ArrayList<>(); if (includeFieldElements && !CoreUtils.isNullOrEmpty(readResultItem.getSelectionMarks())) { PageResult pageResultItem = pageResults.get(index); perPageFormSelectionMarkList = getReadResultFormSelectionMarks(readResultItem, pageResultItem.getPage()); } formPages.add(getFormPage(readResultItem, perPageTableList, perPageFormLineList, perPageFormSelectionMarkList)); })); return formPages; } /** * Helper method to convert the per page {@link ReadResult} item to {@link FormSelectionMark}. * * @param readResultItem The per page text extraction item result returned by the service. * @param pageNumber The page number. * @return A list of {@code FormSelectionMark}. */ /** * Helper method to get per-page table information. * * @param pageResultItem The extracted page level information returned by the service. * @param readResults The text extraction result returned by the service. * @param pageNumber The 1 based page number on which these fields exist. * @return The list of per page {@code FormTable}. */ static List<FormTable> getPageTables(PageResult pageResultItem, List<ReadResult> readResults, int pageNumber) { if (pageResultItem.getTables() == null) { return new ArrayList<>(); } else { return pageResultItem.getTables().stream() .map(dataTable -> { FormTable formTable = new FormTable(dataTable.getRows(), dataTable.getColumns(), dataTable.getCells() .stream() .map(dataTableCell -> new FormTableCell( dataTableCell.getRowIndex(), dataTableCell.getColumnIndex(), dataTableCell.getRowSpan() == null ? DEFAULT_TABLE_SPAN : dataTableCell.getRowSpan(), dataTableCell.getColumnSpan() == null ? DEFAULT_TABLE_SPAN : dataTableCell.getColumnSpan(), dataTableCell.getText(), toBoundingBox(dataTableCell.getBoundingBox()), dataTableCell.getConfidence(), dataTableCell.isHeader() != null && dataTableCell.isHeader(), dataTableCell.isFooter() != null && dataTableCell.isFooter(), pageNumber, setReferenceElements(dataTableCell.getElements(), readResults))) .collect(Collectors.toList()), pageNumber); FormTableHelper.setBoundingBox(formTable, toBoundingBox(dataTable.getBoundingBox())); return formTable; }) .collect(Collectors.toList()); } } /** * Helper method to convert the per page {@link ReadResult} item to {@link FormLine}. * * @param readResultItem The per page text extraction item result returned by the service. * @return The list of {@code FormLine}. */ static List<FormLine> getReadResultFormLines(ReadResult readResultItem) { return readResultItem.getLines().stream() .map(textLine -> { FormLine formLine = new FormLine( textLine.getText(), toBoundingBox(textLine.getBoundingBox()), readResultItem.getPage(), toWords(textLine.getWords(), readResultItem.getPage())); FormLineHelper.setAppearance(formLine, getTextAppearance(textLine)); return formLine; }) .collect(Collectors.toList()); } /** * Private method to get the appearance from the service side text line object. * * @param textLine The service side text line object. * @return the custom type TextAppearance model. */ private static TextAppearance getTextAppearance(TextLine textLine) { TextAppearance textAppearance = new TextAppearance(); if (textLine.getAppearance() != null && textLine.getAppearance().getStyle() != null) { if (textLine.getAppearance().getStyle().getName() != null) { TextAppearanceHelper.setStyleName(textAppearance, TextStyleName.fromString(textLine.getAppearance().getStyle().getName().toString())); } TextAppearanceHelper.setStyleConfidence(textAppearance, textLine.getAppearance().getStyle().getConfidence()); } else { return null; } return textAppearance; } /** * The field map returned on analyze with an unlabeled model id. * * @param documentResultItem The extracted document level information. * @param readResults The text extraction result returned by the service. * @return The {@link RecognizedForm */ private static Map<String, FormField> getLabeledFieldMap(DocumentResult documentResultItem, List<ReadResult> readResults) { Map<String, FormField> recognizedFieldMap = new LinkedHashMap<>(); if (!CoreUtils.isNullOrEmpty(documentResultItem.getFields())) { documentResultItem.getFields().forEach((key, fieldValue) -> { if (fieldValue != null) { List<FormElement> formElementList = setReferenceElements(fieldValue.getElements(), readResults); FieldData valueData; if (fieldValue.getPage() == null && CoreUtils.isNullOrEmpty(fieldValue.getBoundingBox())) { valueData = null; } else { valueData = new FieldData(fieldValue.getText(), toBoundingBox(fieldValue.getBoundingBox()), fieldValue.getPage(), formElementList); } recognizedFieldMap.put(key, setFormField(key, valueData, fieldValue, readResults)); } else { recognizedFieldMap.put(key, new FormField(key, null, null, null, DEFAULT_CONFIDENCE_VALUE)); } }); } return recognizedFieldMap; } /** * Helper method that converts the incoming service field value to one of the strongly typed SDK level * {@link FormField} with reference elements set when {@code includeFieldElements} is set to true. * * @param name The name of the field. * @param valueData The value text of the field. * @param fieldValue The named field values returned by the service. * @param readResults The text extraction result returned by the service. * @return The strongly typed {@link FormField} for the field input. */ private static FormField setFormField(String name, FieldData valueData, FieldValue fieldValue, List<ReadResult> readResults) { com.azure.ai.formrecognizer.models.FieldValue value; switch (fieldValue.getType()) { case PHONE_NUMBER: value = new com.azure.ai.formrecognizer.models.FieldValue(fieldValue.getValuePhoneNumber(), FieldValueType.PHONE_NUMBER); break; case STRING: value = new com.azure.ai.formrecognizer.models.FieldValue(fieldValue.getValueString(), FieldValueType.STRING); break; case TIME: if (fieldValue.getValueTime() != null) { LocalTime fieldTime = LocalTime.parse(fieldValue.getValueTime(), DateTimeFormatter.ofPattern("HH:mm:ss")); value = new com.azure.ai.formrecognizer.models.FieldValue(fieldTime, FieldValueType.TIME); } else { value = new com.azure.ai.formrecognizer.models.FieldValue(null, FieldValueType.TIME); } break; case DATE: value = new com.azure.ai.formrecognizer.models.FieldValue(fieldValue.getValueDate(), FieldValueType.DATE); break; case INTEGER: if (fieldValue.getValueInteger() != null) { value = new com.azure.ai.formrecognizer.models.FieldValue(fieldValue.getValueInteger().longValue(), FieldValueType.LONG); } else { value = new com.azure.ai.formrecognizer.models.FieldValue(null, FieldValueType.LONG); } break; case NUMBER: value = new com.azure.ai.formrecognizer.models.FieldValue(fieldValue.getValueNumber(), FieldValueType.FLOAT); break; case ARRAY: if (fieldValue.getValueArray() != null) { value = new com.azure.ai.formrecognizer.models.FieldValue( toFieldValueArray(fieldValue.getValueArray(), readResults), FieldValueType.LIST); } else { value = new com.azure.ai.formrecognizer.models.FieldValue(null, FieldValueType.LIST); } break; case OBJECT: if (fieldValue.getValueObject() != null) { value = new com.azure.ai.formrecognizer.models.FieldValue( toFieldValueObject(fieldValue.getValueObject(), readResults), FieldValueType.MAP); } else { value = new com.azure.ai.formrecognizer.models.FieldValue(null, FieldValueType.MAP); } break; case SELECTION_MARK: if (fieldValue.getValueSelectionMark() != null) { com.azure.ai.formrecognizer.models.SelectionMarkState selectionMarkState; final FieldValueSelectionMark fieldValueSelectionMarkState = fieldValue.getValueSelectionMark(); if (FieldValueSelectionMark.SELECTED.equals(fieldValueSelectionMarkState)) { selectionMarkState = com.azure.ai.formrecognizer.models.SelectionMarkState.SELECTED; } else if (FieldValueSelectionMark.UNSELECTED.equals(fieldValueSelectionMarkState)) { selectionMarkState = com.azure.ai.formrecognizer.models.SelectionMarkState.UNSELECTED; } else { selectionMarkState = com.azure.ai.formrecognizer.models.SelectionMarkState.fromString( fieldValue.getValueSelectionMark().toString()); } value = new com.azure.ai.formrecognizer.models.FieldValue(selectionMarkState, FieldValueType.SELECTION_MARK_STATE); } else { value = new com.azure.ai.formrecognizer.models.FieldValue(null, FieldValueType.SELECTION_MARK_STATE); } break; case COUNTRY_REGION: value = new com.azure.ai.formrecognizer.models.FieldValue(fieldValue.getValueCountryRegion(), FieldValueType.COUNTRY_REGION); break; default: throw LOGGER.logExceptionAsError(new RuntimeException("FieldValue Type not supported")); } return new FormField(name, null, valueData, value, setDefaultConfidenceValue(fieldValue.getConfidence())); } /** * Helper method to set default confidence value if confidence returned by service is null. * * @param confidence the confidence returned by service. * @return the field confidence value. */ private static float setDefaultConfidenceValue(Float confidence) { return confidence == null ? DEFAULT_CONFIDENCE_VALUE : confidence; } /** * Helper method to convert the service returned * {@link com.azure.ai.formrecognizer.implementation.models.FieldValue * to a SDK level map of {@link FormField}. * * @param valueObject The array of field values returned by the service in {@link FieldValue * @return The Map of {@link FormField}. */ private static Map<String, FormField> toFieldValueObject(Map<String, FieldValue> valueObject, List<ReadResult> readResults) { Map<String, FormField> fieldValueObjectMap = new TreeMap<>(); valueObject.forEach((key, fieldValue) -> { FieldData valueData = null; if (fieldValue.getPage() != null && fieldValue.getBoundingBox() != null) { valueData = new FieldData(fieldValue.getText(), toBoundingBox(fieldValue.getBoundingBox()), fieldValue.getPage(), setReferenceElements(fieldValue.getElements(), readResults)); } fieldValueObjectMap.put(key, setFormField(key, valueData, fieldValue, readResults)); }); return fieldValueObjectMap; } /** * Helper method to convert the service returned * {@link com.azure.ai.formrecognizer.implementation.models.FieldValue * to a SDK level List of {@link FormField}. * * @param valueArray The array of field values returned by the service in {@link FieldValue * @param readResults The text extraction result returned by the service. * @return The List of {@link FormField}. */ private static List<FormField> toFieldValueArray(List<FieldValue> valueArray, List<ReadResult> readResults) { return valueArray.stream() .map(fieldValue -> { FieldData valueData = null; if (ARRAY != fieldValue.getType() && (fieldValue.getPage() != null && fieldValue.getBoundingBox() != null)) { valueData = new FieldData(fieldValue.getText(), toBoundingBox(fieldValue.getBoundingBox()), fieldValue.getPage(), setReferenceElements(fieldValue.getElements(), readResults)); } return setFormField(null, valueData, fieldValue, readResults); }) .collect(Collectors.toList()); } /** * Helper method to convert the page results to {@code FormPage form pages}. * * @param readResultItem The per page text extraction item result returned by the service. * @param perPageTableList The per page tables list. * @param perPageLineList The per page form lines. * @param perPageSelectionMarkList The per page selection marks. * @return The per page {@code FormPage}. */ private static FormPage getFormPage(ReadResult readResultItem, List<FormTable> perPageTableList, List<FormLine> perPageLineList, List<FormSelectionMark> perPageSelectionMarkList) { FormPage formPage = new FormPage( readResultItem.getHeight(), readResultItem.getAngle(), LengthUnit.fromString(readResultItem.getUnit().toString()), readResultItem.getWidth(), perPageLineList, perPageTableList, readResultItem.getPage()); FormPageHelper.setSelectionMarks(formPage, perPageSelectionMarkList); return formPage; } /** * Helper method to set the {@link RecognizedForm * service. * * @param includeFieldElements Boolean to indicate if to set reference elements data on fields. * @param readResults The text extraction result returned by the service. * @param pageResultItem The extracted page level information returned by the service. * @param pageNumber The 1 based page number on which these fields exist. * @return The fields populated on {@link RecognizedForm */ private static Map<String, FormField> getUnlabeledFieldMap(boolean includeFieldElements, List<ReadResult> readResults, PageResult pageResultItem, int pageNumber) { Map<String, FormField> formFieldMap = new LinkedHashMap<>(); List<KeyValuePair> keyValuePairs = pageResultItem.getKeyValuePairs(); forEachWithIndex(keyValuePairs, ((index, keyValuePair) -> { List<FormElement> formKeyContentList = new ArrayList<>(); List<FormElement> formValueContentList = new ArrayList<>(); if (includeFieldElements) { formKeyContentList = setReferenceElements(keyValuePair.getKey().getElements(), readResults); formValueContentList = setReferenceElements(keyValuePair.getValue().getElements(), readResults ); } FieldData labelData = new FieldData(keyValuePair.getKey().getText(), toBoundingBox(keyValuePair.getKey().getBoundingBox()), pageNumber, formKeyContentList); FieldData valueData = new FieldData(keyValuePair.getValue().getText(), toBoundingBox(keyValuePair.getValue().getBoundingBox()), pageNumber, formValueContentList); String fieldName = "field-" + index; FormField formField = new FormField(fieldName, labelData, valueData, new com.azure.ai.formrecognizer.models.FieldValue(keyValuePair.getValue().getText(), FieldValueType.STRING), setDefaultConfidenceValue(keyValuePair.getConfidence()) ); formFieldMap.put(fieldName, formField); })); return formFieldMap; } /** * Helper method to set the text reference elements on FieldValue/fields when {@code includeFieldElements} set to * true. * * @return The list if referenced elements. */ private static List<FormElement> setReferenceElements(List<String> elements, List<ReadResult> readResults) { if (CoreUtils.isNullOrEmpty(elements)) { return new ArrayList<>(); } List<FormElement> formElementList = new ArrayList<>(); elements.forEach(elementString -> { Matcher wordMatcher = WORD_PATTERN.matcher(elementString); if (wordMatcher.find() && wordMatcher.groupCount() == 3) { int pageIndex = Integer.parseInt(wordMatcher.group(1)); int lineIndex = Integer.parseInt(wordMatcher.group(2)); int wordIndex = Integer.parseInt(wordMatcher.group(3)); TextWord textWord = readResults.get(pageIndex).getLines().get(lineIndex).getWords().get(wordIndex); FormWord wordElement = new FormWord(textWord.getText(), toBoundingBox(textWord.getBoundingBox()), pageIndex + 1, setDefaultConfidenceValue(textWord.getConfidence())); formElementList.add(wordElement); } Matcher lineMatcher = LINE_PATTERN.matcher(elementString); if (lineMatcher.find() && lineMatcher.groupCount() == 2) { int pageIndex = Integer.parseInt(lineMatcher.group(1)); int lineIndex = Integer.parseInt(lineMatcher.group(2)); TextLine textLine = readResults.get(pageIndex).getLines().get(lineIndex); FormLine lineElement = new FormLine(textLine.getText(), toBoundingBox(textLine.getBoundingBox()), pageIndex + 1, toWords(textLine.getWords(), pageIndex + 1)); FormLineHelper.setAppearance(lineElement, getTextAppearance(textLine)); formElementList.add(lineElement); } Matcher selectionMarkMatcher = SELECTION_MARK_PATTERN.matcher(elementString); if (selectionMarkMatcher.find() && selectionMarkMatcher.groupCount() == 2) { int pageIndex = Integer.parseInt(selectionMarkMatcher.group(1)); int selectionMarkIndex = Integer.parseInt(selectionMarkMatcher.group(2)); SelectionMark selectionMark = readResults.get(pageIndex).getSelectionMarks().get(selectionMarkIndex); FormSelectionMark selectionMarkElement = new FormSelectionMark(null, toBoundingBox(selectionMark.getBoundingBox()), pageIndex + 1); FormSelectionMarkHelper.setState(selectionMarkElement, SelectionMarkState.fromString(selectionMark.getState().toString())); formElementList.add(selectionMarkElement); } }); return formElementList; } /** * Helper method to convert the service level {@link TextWord} to list of SDK level model {@link FormWord}. * * @param words A list of word reference elements returned by the service. * @param pageNumber The 1 based page number on which this word element exists. * @return The list of {@code FormWord words}. */ private static List<FormWord> toWords(List<TextWord> words, int pageNumber) { return words.stream() .map(textWord -> new FormWord( textWord.getText(), toBoundingBox(textWord.getBoundingBox()), pageNumber, setDefaultConfidenceValue(textWord.getConfidence())) ).collect(Collectors.toList()); } /** * Helper method to convert the service level modeled eight numbers representing the four points to SDK level * {@link FieldBoundingBox}. * * @param serviceBoundingBox A list of eight numbers representing the four points of a box. * @return A {@link FieldBoundingBox}. */ private static FieldBoundingBox toBoundingBox(List<Float> serviceBoundingBox) { if (CoreUtils.isNullOrEmpty(serviceBoundingBox) || (serviceBoundingBox.size() % 2) != 0) { return null; } List<Point> pointList = new ArrayList<>(); for (int i = 0; i < serviceBoundingBox.size(); i++) { pointList.add(new Point(serviceBoundingBox.get(i), serviceBoundingBox.get(++i))); } return new FieldBoundingBox(pointList); } }
should we enforce static usage instead (after everything is converted)?
public void visitToken(DetailAST ast) { switch (ast.getType()) { case TokenTypes.IMPORT: final String importClassPath = FullIdent.createFullIdentBelow(ast).getText(); hasClientLoggerImported = hasClientLoggerImported || importClassPath.equals(CLIENT_LOGGER_PATH); INVALID_LOGS.forEach(item -> { if (importClassPath.startsWith(item)) { log(ast, String.format(NOT_CLIENT_LOGGER_ERROR, "external logger", CLIENT_LOGGER_PATH, item)); } }); break; case TokenTypes.CLASS_DEF: case TokenTypes.INTERFACE_DEF: classNameDeque.offer(ast.findFirstToken(TokenTypes.IDENT).getText()); break; case TokenTypes.LITERAL_NEW: checkLoggerInstantiation(ast); break; case TokenTypes.VARIABLE_DEF: checkLoggerNameMatch(ast); break; case TokenTypes.METHOD_CALL: final DetailAST dotToken = ast.findFirstToken(TokenTypes.DOT); if (dotToken == null) { return; } final String methodCallName = FullIdent.createFullIdentBelow(dotToken).getText(); if (methodCallName.startsWith("System.out") || methodCallName.startsWith("System.err")) { log(ast, String.format(NOT_CLIENT_LOGGER_ERROR, "Java System", CLIENT_LOGGER_PATH, methodCallName)); } break; default: break; } }
break;
public void visitToken(DetailAST ast) { switch (ast.getType()) { case TokenTypes.IMPORT: final String importClassPath = FullIdent.createFullIdentBelow(ast).getText(); hasClientLoggerImported = hasClientLoggerImported || importClassPath.equals(CLIENT_LOGGER_PATH); INVALID_LOGS.forEach(item -> { if (importClassPath.startsWith(item)) { log(ast, String.format(NOT_CLIENT_LOGGER_ERROR, "external logger", CLIENT_LOGGER_PATH, item)); } }); break; case TokenTypes.CLASS_DEF: case TokenTypes.INTERFACE_DEF: classNameDeque.offer(ast.findFirstToken(TokenTypes.IDENT).getText()); break; case TokenTypes.LITERAL_NEW: checkLoggerInstantiation(ast); break; case TokenTypes.VARIABLE_DEF: checkLoggerNameMatch(ast); break; case TokenTypes.METHOD_CALL: final DetailAST dotToken = ast.findFirstToken(TokenTypes.DOT); if (dotToken == null) { return; } final String methodCallName = FullIdent.createFullIdentBelow(dotToken).getText(); if (methodCallName.startsWith("System.out") || methodCallName.startsWith("System.err")) { log(ast, String.format(NOT_CLIENT_LOGGER_ERROR, "Java System", CLIENT_LOGGER_PATH, methodCallName)); } break; default: break; } }
class name AST node private final Queue<String> classNameDeque = Collections.asLifoQueue(new ArrayDeque<>()); private static final Set<String> INVALID_LOGS = Collections.unmodifiableSet(new HashSet<>(Arrays.asList( "org.slf4j", "org.apache.logging.log4j", "java.util.logging" ))); @Override public int[] getDefaultTokens() { return getRequiredTokens(); }
class name AST node private final Queue<String> classNameDeque = Collections.asLifoQueue(new ArrayDeque<>()); private static final Set<String> INVALID_LOGS = Collections.unmodifiableSet(new HashSet<>(Arrays.asList( "org.slf4j", "org.apache.logging.log4j", "java.util.logging" ))); @Override public int[] getDefaultTokens() { return getRequiredTokens(); }
Yes, I think it would be good to enforce the inverse of what this was previously checking after a while.
public void visitToken(DetailAST ast) { switch (ast.getType()) { case TokenTypes.IMPORT: final String importClassPath = FullIdent.createFullIdentBelow(ast).getText(); hasClientLoggerImported = hasClientLoggerImported || importClassPath.equals(CLIENT_LOGGER_PATH); INVALID_LOGS.forEach(item -> { if (importClassPath.startsWith(item)) { log(ast, String.format(NOT_CLIENT_LOGGER_ERROR, "external logger", CLIENT_LOGGER_PATH, item)); } }); break; case TokenTypes.CLASS_DEF: case TokenTypes.INTERFACE_DEF: classNameDeque.offer(ast.findFirstToken(TokenTypes.IDENT).getText()); break; case TokenTypes.LITERAL_NEW: checkLoggerInstantiation(ast); break; case TokenTypes.VARIABLE_DEF: checkLoggerNameMatch(ast); break; case TokenTypes.METHOD_CALL: final DetailAST dotToken = ast.findFirstToken(TokenTypes.DOT); if (dotToken == null) { return; } final String methodCallName = FullIdent.createFullIdentBelow(dotToken).getText(); if (methodCallName.startsWith("System.out") || methodCallName.startsWith("System.err")) { log(ast, String.format(NOT_CLIENT_LOGGER_ERROR, "Java System", CLIENT_LOGGER_PATH, methodCallName)); } break; default: break; } }
break;
public void visitToken(DetailAST ast) { switch (ast.getType()) { case TokenTypes.IMPORT: final String importClassPath = FullIdent.createFullIdentBelow(ast).getText(); hasClientLoggerImported = hasClientLoggerImported || importClassPath.equals(CLIENT_LOGGER_PATH); INVALID_LOGS.forEach(item -> { if (importClassPath.startsWith(item)) { log(ast, String.format(NOT_CLIENT_LOGGER_ERROR, "external logger", CLIENT_LOGGER_PATH, item)); } }); break; case TokenTypes.CLASS_DEF: case TokenTypes.INTERFACE_DEF: classNameDeque.offer(ast.findFirstToken(TokenTypes.IDENT).getText()); break; case TokenTypes.LITERAL_NEW: checkLoggerInstantiation(ast); break; case TokenTypes.VARIABLE_DEF: checkLoggerNameMatch(ast); break; case TokenTypes.METHOD_CALL: final DetailAST dotToken = ast.findFirstToken(TokenTypes.DOT); if (dotToken == null) { return; } final String methodCallName = FullIdent.createFullIdentBelow(dotToken).getText(); if (methodCallName.startsWith("System.out") || methodCallName.startsWith("System.err")) { log(ast, String.format(NOT_CLIENT_LOGGER_ERROR, "Java System", CLIENT_LOGGER_PATH, methodCallName)); } break; default: break; } }
class name AST node private final Queue<String> classNameDeque = Collections.asLifoQueue(new ArrayDeque<>()); private static final Set<String> INVALID_LOGS = Collections.unmodifiableSet(new HashSet<>(Arrays.asList( "org.slf4j", "org.apache.logging.log4j", "java.util.logging" ))); @Override public int[] getDefaultTokens() { return getRequiredTokens(); }
class name AST node private final Queue<String> classNameDeque = Collections.asLifoQueue(new ArrayDeque<>()); private static final Set<String> INVALID_LOGS = Collections.unmodifiableSet(new HashSet<>(Arrays.asList( "org.slf4j", "org.apache.logging.log4j", "java.util.logging" ))); @Override public int[] getDefaultTokens() { return getRequiredTokens(); }
Yes, `Mono.just` would work but `Mono.fromCallable` further defers the creation of `AccessToken`, so if the reactive stream is never subscribed the `AccessToken` is never instantiated.
public Mono<AccessToken> getToken(TokenRequestContext request) { return Mono.fromCallable(() -> new AccessToken(encodedCredential, OffsetDateTime.MAX)); }
return Mono.fromCallable(() -> new AccessToken(encodedCredential, OffsetDateTime.MAX));
public Mono<AccessToken> getToken(TokenRequestContext request) { return Mono.fromCallable(() -> new AccessToken(encodedCredential, OffsetDateTime.MAX)); }
class BasicAuthenticationCredential implements TokenCredential { /** * Base64 encoded username-password credential. */ private final String encodedCredential; /** * Creates a basic authentication credential. * * @param username basic auth user name * @param password basic auth password */ public BasicAuthenticationCredential(String username, String password) { String credential = username + ":" + password; this.encodedCredential = Base64Util.encodeToString(credential.getBytes(StandardCharsets.UTF_8)); } /** * @throws RuntimeException If the UTF-8 encoding isn't supported. */ @Override }
class BasicAuthenticationCredential implements TokenCredential { /** * Base64 encoded username-password credential. */ private final String encodedCredential; /** * Creates a basic authentication credential. * * @param username basic auth user name * @param password basic auth password */ public BasicAuthenticationCredential(String username, String password) { String credential = username + ":" + password; this.encodedCredential = Base64Util.encodeToString(credential.getBytes(StandardCharsets.UTF_8)); } /** * @throws RuntimeException If the UTF-8 encoding isn't supported. */ @Override }
wellKnown
public KeyVaultKeyStore() { LOGGER.log(FINE, "Constructing KeyVaultKeyStore."); creationDate = new Date(); String keyVaultUri = System.getProperty("azure.keyvault.uri"); String tenantId = System.getProperty("azure.keyvault.tenant-id"); String clientId = System.getProperty("azure.keyvault.client-id"); String clientSecret = System.getProperty("azure.keyvault.client-secret"); String managedIdentity = System.getProperty("azure.keyvault.managed-identity"); long refreshInterval = getRefreshInterval(); refreshCertificatesWhenHaveUnTrustCertificate = Optional.of("azure.keyvault.jca.refresh-certificates-when-have-un-trust-certificate") .map(System::getProperty) .map(Boolean::parseBoolean) .orElse(false); jreCertificates = JreCertificates.getInstance(); LOGGER.log(FINE, String.format("Loaded jre certificates: %s.", jreCertificates.getAliases())); wellKnowCertificates = SpecificPathCertificates.getSpecificPathCertificates(wellKnowPath); LOGGER.log(FINE, String.format("Loaded well know certificates: %s.", wellKnowCertificates.getAliases())); customCertificates = SpecificPathCertificates.getSpecificPathCertificates(customPath); LOGGER.log(FINE, String.format("Loaded custom certificates: %s.", customCertificates.getAliases())); keyVaultCertificates = new KeyVaultCertificates( refreshInterval, keyVaultUri, tenantId, clientId, clientSecret, managedIdentity); LOGGER.log(FINE, String.format("Loaded keyVault certificates: %s.", keyVaultCertificates.getAliases())); classpathCertificates = new ClasspathCertificates(); LOGGER.log(FINE, String.format("Loaded classpath certificates: %s.", classpathCertificates.getAliases())); allCertificates = Arrays.asList( jreCertificates, wellKnowCertificates, customCertificates, keyVaultCertificates, classpathCertificates); }
LOGGER.log(FINE, String.format("Loaded well know certificates: %s.", wellKnowCertificates.getAliases()));
public KeyVaultKeyStore() { LOGGER.log(FINE, "Constructing KeyVaultKeyStore."); creationDate = new Date(); String keyVaultUri = System.getProperty("azure.keyvault.uri"); String tenantId = System.getProperty("azure.keyvault.tenant-id"); String clientId = System.getProperty("azure.keyvault.client-id"); String clientSecret = System.getProperty("azure.keyvault.client-secret"); String managedIdentity = System.getProperty("azure.keyvault.managed-identity"); long refreshInterval = getRefreshInterval(); refreshCertificatesWhenHaveUnTrustCertificate = Optional.of("azure.keyvault.jca.refresh-certificates-when-have-un-trust-certificate") .map(System::getProperty) .map(Boolean::parseBoolean) .orElse(false); jreCertificates = JreCertificates.getInstance(); LOGGER.log(FINE, String.format("Loaded jre certificates: %s.", jreCertificates.getAliases())); wellKnowCertificates = SpecificPathCertificates.getSpecificPathCertificates(wellKnowPath); LOGGER.log(FINE, String.format("Loaded well know certificates: %s.", wellKnowCertificates.getAliases())); customCertificates = SpecificPathCertificates.getSpecificPathCertificates(customPath); LOGGER.log(FINE, String.format("Loaded custom certificates: %s.", customCertificates.getAliases())); keyVaultCertificates = new KeyVaultCertificates( refreshInterval, keyVaultUri, tenantId, clientId, clientSecret, managedIdentity); LOGGER.log(FINE, String.format("Loaded keyVault certificates: %s.", keyVaultCertificates.getAliases())); classpathCertificates = new ClasspathCertificates(); LOGGER.log(FINE, String.format("Loaded classpath certificates: %s.", classpathCertificates.getAliases())); allCertificates = Arrays.asList( jreCertificates, wellKnowCertificates, customCertificates, keyVaultCertificates, classpathCertificates); }
class KeyVaultKeyStore extends KeyStoreSpi { /** * Stores the key-store name. */ public static final String KEY_STORE_TYPE = "AzureKeyVault"; /** * Stores the algorithm name. */ public static final String ALGORITHM_NAME = KEY_STORE_TYPE; /** * Stores the logger. */ private static final Logger LOGGER = Logger.getLogger(KeyVaultKeyStore.class.getName()); /** * Stores the Jre key store certificates. */ private final JreCertificates jreCertificates; /** * Store well Know certificates loaded from specific path. */ private final SpecificPathCertificates wellKnowCertificates; /** * Store custom certificates loaded from specific path. */ private final SpecificPathCertificates customCertificates; /** * Store certificates loaded from KeyVault. */ private final KeyVaultCertificates keyVaultCertificates; /** * Store certificates loaded from classpath. */ private final ClasspathCertificates classpathCertificates; /** * Stores all the certificates. */ private final List<AzureCertificates> allCertificates; /** * Stores the creation date. */ private final Date creationDate; private final boolean refreshCertificatesWhenHaveUnTrustCertificate; /** * Store the path where the well know certificate is placed */ final String wellKnowPath = Optional.ofNullable(System.getProperty("azure.cert-path.well-known")) .orElse("/etc/certs/well-known/"); /** * Store the path where the custom certificate is placed */ final String customPath = Optional.ofNullable(System.getProperty("azure.cert-path.custom")) .orElse("/etc/certs/custom/"); /** * Constructor. * * <p> * The constructor uses System.getProperty for * <code>azure.keyvault.uri</code>, * <code>azure.keyvault.aadAuthenticationUrl</code>, * <code>azure.keyvault.tenantId</code>, * <code>azure.keyvault.clientId</code>, * <code>azure.keyvault.clientSecret</code> and * <code>azure.keyvault.managedIdentity</code> to initialize the * Key Vault client. * </p> */ Long getRefreshInterval() { return Stream.of("azure.keyvault.jca.certificates-refresh-interval-in-ms", "azure.keyvault.jca.certificates-refresh-interval") .map(System::getProperty) .filter(Objects::nonNull) .map(Long::valueOf) .findFirst() .orElse(0L); } /** * get key vault key store by system property * * @return KeyVault key store * @throws CertificateException if any of the certificates in the * keystore could not be loaded * @throws NoSuchAlgorithmException when algorithm is unavailable. * @throws KeyStoreException when no Provider supports a KeyStoreSpi implementation for the specified type * @throws IOException when an I/O error occurs. */ public static KeyStore getKeyVaultKeyStoreBySystemProperty() throws CertificateException, NoSuchAlgorithmException, KeyStoreException, IOException { KeyStore keyStore = KeyStore.getInstance(KeyVaultJcaProvider.PROVIDER_NAME); KeyVaultLoadStoreParameter parameter = new KeyVaultLoadStoreParameter( System.getProperty("azure.keyvault.uri"), System.getProperty("azure.keyvault.tenant-id"), System.getProperty("azure.keyvault.client-id"), System.getProperty("azure.keyvault.client-secret"), System.getProperty("azure.keyvault.managed-identity")); keyStore.load(parameter); return keyStore; } @Override public Enumeration<String> engineAliases() { return Collections.enumeration(getAllAliases()); } @Override public boolean engineContainsAlias(String alias) { return engineIsCertificateEntry(alias); } @Override public void engineDeleteEntry(String alias) { allCertificates.forEach(a -> a.deleteEntry(alias)); } @Override public boolean engineEntryInstanceOf(String alias, Class<? extends KeyStore.Entry> entryClass) { return super.engineEntryInstanceOf(alias, entryClass); } @Override public Certificate engineGetCertificate(String alias) { Certificate certificate = allCertificates.stream() .map(AzureCertificates::getCertificates) .filter(a -> a.containsKey(alias)) .findFirst() .map(certificates -> certificates.get(alias)) .orElse(null); if (refreshCertificatesWhenHaveUnTrustCertificate && certificate == null) { keyVaultCertificates.refreshCertificates(); certificate = keyVaultCertificates.getCertificates().get(alias); } return certificate; } @Override public String engineGetCertificateAlias(Certificate cert) { String alias = null; if (cert != null) { List<String> aliasList = getAllAliases(); for (String candidateAlias : aliasList) { Certificate certificate = engineGetCertificate(candidateAlias); if (certificate.equals(cert)) { alias = candidateAlias; break; } } } if (refreshCertificatesWhenHaveUnTrustCertificate && alias == null) { alias = keyVaultCertificates.refreshAndGetAliasByCertificate(cert); } return alias; } @Override public Certificate[] engineGetCertificateChain(String alias) { Certificate[] chain = null; Certificate certificate = engineGetCertificate(alias); if (certificate != null) { chain = new Certificate[1]; chain[0] = certificate; } return chain; } @Override public Date engineGetCreationDate(String alias) { return new Date(creationDate.getTime()); } @Override public KeyStore.Entry engineGetEntry(String alias, KeyStore.ProtectionParameter protParam) throws KeyStoreException, NoSuchAlgorithmException, UnrecoverableEntryException { return super.engineGetEntry(alias, protParam); } @Override public Key engineGetKey(String alias, char[] password) { return allCertificates.stream() .map(AzureCertificates::getCertificateKeys) .filter(a -> a.containsKey(alias)) .findFirst() .map(certificateKeys -> certificateKeys.get(alias)) .orElse(null); } @Override public boolean engineIsCertificateEntry(String alias) { return getAllAliases().contains(alias); } @Override public boolean engineIsKeyEntry(String alias) { return engineIsCertificateEntry(alias); } @Override public void engineLoad(KeyStore.LoadStoreParameter param) { if (param instanceof KeyVaultLoadStoreParameter) { KeyVaultLoadStoreParameter parameter = (KeyVaultLoadStoreParameter) param; keyVaultCertificates.updateKeyVaultClient(parameter.getUri(), parameter.getTenantId(), parameter.getClientId(), parameter.getClientSecret(), parameter.getManagedIdentity()); } classpathCertificates.loadCertificatesFromClasspath(); } @Override public void engineLoad(InputStream stream, char[] password) { classpathCertificates.loadCertificatesFromClasspath(); } private List<String> getAllAliases() { List<String> allAliases = new ArrayList<>(jreCertificates.getAliases()); Map<String, List<String>> aliasLists = new HashMap<>(); aliasLists.put("well known certificates", wellKnowCertificates.getAliases()); aliasLists.put("custom certificates", customCertificates.getAliases()); aliasLists.put("key vault certificates", keyVaultCertificates.getAliases()); aliasLists.put("class path certificates", classpathCertificates.getAliases()); aliasLists.forEach((certificatesType, certificates) -> certificates.forEach(alias -> { if (allAliases.contains(alias)) { LOGGER.log(FINE, String.format("The certificate %s under %s already exists", alias, certificatesType)); } else { allAliases.add(alias); } })); return allAliases; } @Override public void engineSetCertificateEntry(String alias, Certificate certificate) { if (getAllAliases().contains(alias)) { LOGGER.log(WARNING, "Cannot overwrite own certificate"); return; } classpathCertificates.setCertificateEntry(alias, certificate); } @Override public void engineSetEntry(String alias, KeyStore.Entry entry, KeyStore.ProtectionParameter protParam) throws KeyStoreException { super.engineSetEntry(alias, entry, protParam); } @Override public void engineSetKeyEntry(String alias, Key key, char[] password, Certificate[] chain) { } @Override public void engineSetKeyEntry(String alias, byte[] key, Certificate[] chain) { } @Override public int engineSize() { return getAllAliases().size(); } @Override public void engineStore(OutputStream stream, char[] password) { } @Override public void engineStore(KeyStore.LoadStoreParameter param) { } }
class KeyVaultKeyStore extends KeyStoreSpi { /** * Stores the key-store name. */ public static final String KEY_STORE_TYPE = "AzureKeyVault"; /** * Stores the algorithm name. */ public static final String ALGORITHM_NAME = KEY_STORE_TYPE; /** * Stores the logger. */ private static final Logger LOGGER = Logger.getLogger(KeyVaultKeyStore.class.getName()); /** * Stores the Jre key store certificates. */ private final JreCertificates jreCertificates; /** * Store well Know certificates loaded from specific path. */ private final SpecificPathCertificates wellKnowCertificates; /** * Store custom certificates loaded from specific path. */ private final SpecificPathCertificates customCertificates; /** * Store certificates loaded from KeyVault. */ private final KeyVaultCertificates keyVaultCertificates; /** * Store certificates loaded from classpath. */ private final ClasspathCertificates classpathCertificates; /** * Stores all the certificates. */ private final List<AzureCertificates> allCertificates; /** * Stores the creation date. */ private final Date creationDate; private final boolean refreshCertificatesWhenHaveUnTrustCertificate; /** * Store the path where the well know certificate is placed */ final String wellKnowPath = Optional.ofNullable(System.getProperty("azure.cert-path.well-known")) .orElse("/etc/certs/well-known/"); /** * Store the path where the custom certificate is placed */ final String customPath = Optional.ofNullable(System.getProperty("azure.cert-path.custom")) .orElse("/etc/certs/custom/"); /** * Constructor. * * <p> * The constructor uses System.getProperty for * <code>azure.keyvault.uri</code>, * <code>azure.keyvault.aadAuthenticationUrl</code>, * <code>azure.keyvault.tenantId</code>, * <code>azure.keyvault.clientId</code>, * <code>azure.keyvault.clientSecret</code> and * <code>azure.keyvault.managedIdentity</code> to initialize the * Key Vault client. * </p> */ Long getRefreshInterval() { return Stream.of("azure.keyvault.jca.certificates-refresh-interval-in-ms", "azure.keyvault.jca.certificates-refresh-interval") .map(System::getProperty) .filter(Objects::nonNull) .map(Long::valueOf) .findFirst() .orElse(0L); } /** * get key vault key store by system property * * @return KeyVault key store * @throws CertificateException if any of the certificates in the * keystore could not be loaded * @throws NoSuchAlgorithmException when algorithm is unavailable. * @throws KeyStoreException when no Provider supports a KeyStoreSpi implementation for the specified type * @throws IOException when an I/O error occurs. */ public static KeyStore getKeyVaultKeyStoreBySystemProperty() throws CertificateException, NoSuchAlgorithmException, KeyStoreException, IOException { KeyStore keyStore = KeyStore.getInstance(KeyVaultJcaProvider.PROVIDER_NAME); KeyVaultLoadStoreParameter parameter = new KeyVaultLoadStoreParameter( System.getProperty("azure.keyvault.uri"), System.getProperty("azure.keyvault.tenant-id"), System.getProperty("azure.keyvault.client-id"), System.getProperty("azure.keyvault.client-secret"), System.getProperty("azure.keyvault.managed-identity")); keyStore.load(parameter); return keyStore; } @Override public Enumeration<String> engineAliases() { return Collections.enumeration(getAllAliases()); } @Override public boolean engineContainsAlias(String alias) { return engineIsCertificateEntry(alias); } @Override public void engineDeleteEntry(String alias) { allCertificates.forEach(a -> a.deleteEntry(alias)); } @Override public boolean engineEntryInstanceOf(String alias, Class<? extends KeyStore.Entry> entryClass) { return super.engineEntryInstanceOf(alias, entryClass); } @Override public Certificate engineGetCertificate(String alias) { Certificate certificate = allCertificates.stream() .map(AzureCertificates::getCertificates) .filter(a -> a.containsKey(alias)) .findFirst() .map(certificates -> certificates.get(alias)) .orElse(null); if (refreshCertificatesWhenHaveUnTrustCertificate && certificate == null) { keyVaultCertificates.refreshCertificates(); certificate = keyVaultCertificates.getCertificates().get(alias); } return certificate; } @Override public String engineGetCertificateAlias(Certificate cert) { String alias = null; if (cert != null) { List<String> aliasList = getAllAliases(); for (String candidateAlias : aliasList) { Certificate certificate = engineGetCertificate(candidateAlias); if (certificate.equals(cert)) { alias = candidateAlias; break; } } } if (refreshCertificatesWhenHaveUnTrustCertificate && alias == null) { alias = keyVaultCertificates.refreshAndGetAliasByCertificate(cert); } return alias; } @Override public Certificate[] engineGetCertificateChain(String alias) { Certificate[] chain = null; Certificate certificate = engineGetCertificate(alias); if (certificate != null) { chain = new Certificate[1]; chain[0] = certificate; } return chain; } @Override public Date engineGetCreationDate(String alias) { return new Date(creationDate.getTime()); } @Override public KeyStore.Entry engineGetEntry(String alias, KeyStore.ProtectionParameter protParam) throws KeyStoreException, NoSuchAlgorithmException, UnrecoverableEntryException { return super.engineGetEntry(alias, protParam); } @Override public Key engineGetKey(String alias, char[] password) { return allCertificates.stream() .map(AzureCertificates::getCertificateKeys) .filter(a -> a.containsKey(alias)) .findFirst() .map(certificateKeys -> certificateKeys.get(alias)) .orElse(null); } @Override public boolean engineIsCertificateEntry(String alias) { return getAllAliases().contains(alias); } @Override public boolean engineIsKeyEntry(String alias) { return engineIsCertificateEntry(alias); } @Override public void engineLoad(KeyStore.LoadStoreParameter param) { if (param instanceof KeyVaultLoadStoreParameter) { KeyVaultLoadStoreParameter parameter = (KeyVaultLoadStoreParameter) param; keyVaultCertificates.updateKeyVaultClient(parameter.getUri(), parameter.getTenantId(), parameter.getClientId(), parameter.getClientSecret(), parameter.getManagedIdentity()); } classpathCertificates.loadCertificatesFromClasspath(); } @Override public void engineLoad(InputStream stream, char[] password) { classpathCertificates.loadCertificatesFromClasspath(); } private List<String> getAllAliases() { List<String> allAliases = new ArrayList<>(jreCertificates.getAliases()); Map<String, List<String>> aliasLists = new HashMap<>(); aliasLists.put("well known certificates", wellKnowCertificates.getAliases()); aliasLists.put("custom certificates", customCertificates.getAliases()); aliasLists.put("key vault certificates", keyVaultCertificates.getAliases()); aliasLists.put("class path certificates", classpathCertificates.getAliases()); aliasLists.forEach((certificatesType, certificates) -> certificates.forEach(alias -> { if (allAliases.contains(alias)) { LOGGER.log(FINE, String.format("The certificate %s under %s already exists", alias, certificatesType)); } else { allAliases.add(alias); } })); return allAliases; } @Override public void engineSetCertificateEntry(String alias, Certificate certificate) { if (getAllAliases().contains(alias)) { LOGGER.log(WARNING, "Cannot overwrite own certificate"); return; } classpathCertificates.setCertificateEntry(alias, certificate); } @Override public void engineSetEntry(String alias, KeyStore.Entry entry, KeyStore.ProtectionParameter protParam) throws KeyStoreException { super.engineSetEntry(alias, entry, protParam); } @Override public void engineSetKeyEntry(String alias, Key key, char[] password, Certificate[] chain) { } @Override public void engineSetKeyEntry(String alias, byte[] key, Certificate[] chain) { } @Override public int engineSize() { return getAllAliases().size(); } @Override public void engineStore(OutputStream stream, char[] password) { } @Override public void engineStore(KeyStore.LoadStoreParameter param) { } }
Thank you for your review. But `well known` can also be used. Refs: https://dictionary.cambridge.org/dictionary/english/well-known
public KeyVaultKeyStore() { LOGGER.log(FINE, "Constructing KeyVaultKeyStore."); creationDate = new Date(); String keyVaultUri = System.getProperty("azure.keyvault.uri"); String tenantId = System.getProperty("azure.keyvault.tenant-id"); String clientId = System.getProperty("azure.keyvault.client-id"); String clientSecret = System.getProperty("azure.keyvault.client-secret"); String managedIdentity = System.getProperty("azure.keyvault.managed-identity"); long refreshInterval = getRefreshInterval(); refreshCertificatesWhenHaveUnTrustCertificate = Optional.of("azure.keyvault.jca.refresh-certificates-when-have-un-trust-certificate") .map(System::getProperty) .map(Boolean::parseBoolean) .orElse(false); jreCertificates = JreCertificates.getInstance(); LOGGER.log(FINE, String.format("Loaded jre certificates: %s.", jreCertificates.getAliases())); wellKnowCertificates = SpecificPathCertificates.getSpecificPathCertificates(wellKnowPath); LOGGER.log(FINE, String.format("Loaded well know certificates: %s.", wellKnowCertificates.getAliases())); customCertificates = SpecificPathCertificates.getSpecificPathCertificates(customPath); LOGGER.log(FINE, String.format("Loaded custom certificates: %s.", customCertificates.getAliases())); keyVaultCertificates = new KeyVaultCertificates( refreshInterval, keyVaultUri, tenantId, clientId, clientSecret, managedIdentity); LOGGER.log(FINE, String.format("Loaded keyVault certificates: %s.", keyVaultCertificates.getAliases())); classpathCertificates = new ClasspathCertificates(); LOGGER.log(FINE, String.format("Loaded classpath certificates: %s.", classpathCertificates.getAliases())); allCertificates = Arrays.asList( jreCertificates, wellKnowCertificates, customCertificates, keyVaultCertificates, classpathCertificates); }
LOGGER.log(FINE, String.format("Loaded well know certificates: %s.", wellKnowCertificates.getAliases()));
public KeyVaultKeyStore() { LOGGER.log(FINE, "Constructing KeyVaultKeyStore."); creationDate = new Date(); String keyVaultUri = System.getProperty("azure.keyvault.uri"); String tenantId = System.getProperty("azure.keyvault.tenant-id"); String clientId = System.getProperty("azure.keyvault.client-id"); String clientSecret = System.getProperty("azure.keyvault.client-secret"); String managedIdentity = System.getProperty("azure.keyvault.managed-identity"); long refreshInterval = getRefreshInterval(); refreshCertificatesWhenHaveUnTrustCertificate = Optional.of("azure.keyvault.jca.refresh-certificates-when-have-un-trust-certificate") .map(System::getProperty) .map(Boolean::parseBoolean) .orElse(false); jreCertificates = JreCertificates.getInstance(); LOGGER.log(FINE, String.format("Loaded jre certificates: %s.", jreCertificates.getAliases())); wellKnowCertificates = SpecificPathCertificates.getSpecificPathCertificates(wellKnowPath); LOGGER.log(FINE, String.format("Loaded well know certificates: %s.", wellKnowCertificates.getAliases())); customCertificates = SpecificPathCertificates.getSpecificPathCertificates(customPath); LOGGER.log(FINE, String.format("Loaded custom certificates: %s.", customCertificates.getAliases())); keyVaultCertificates = new KeyVaultCertificates( refreshInterval, keyVaultUri, tenantId, clientId, clientSecret, managedIdentity); LOGGER.log(FINE, String.format("Loaded keyVault certificates: %s.", keyVaultCertificates.getAliases())); classpathCertificates = new ClasspathCertificates(); LOGGER.log(FINE, String.format("Loaded classpath certificates: %s.", classpathCertificates.getAliases())); allCertificates = Arrays.asList( jreCertificates, wellKnowCertificates, customCertificates, keyVaultCertificates, classpathCertificates); }
class KeyVaultKeyStore extends KeyStoreSpi { /** * Stores the key-store name. */ public static final String KEY_STORE_TYPE = "AzureKeyVault"; /** * Stores the algorithm name. */ public static final String ALGORITHM_NAME = KEY_STORE_TYPE; /** * Stores the logger. */ private static final Logger LOGGER = Logger.getLogger(KeyVaultKeyStore.class.getName()); /** * Stores the Jre key store certificates. */ private final JreCertificates jreCertificates; /** * Store well Know certificates loaded from specific path. */ private final SpecificPathCertificates wellKnowCertificates; /** * Store custom certificates loaded from specific path. */ private final SpecificPathCertificates customCertificates; /** * Store certificates loaded from KeyVault. */ private final KeyVaultCertificates keyVaultCertificates; /** * Store certificates loaded from classpath. */ private final ClasspathCertificates classpathCertificates; /** * Stores all the certificates. */ private final List<AzureCertificates> allCertificates; /** * Stores the creation date. */ private final Date creationDate; private final boolean refreshCertificatesWhenHaveUnTrustCertificate; /** * Store the path where the well know certificate is placed */ final String wellKnowPath = Optional.ofNullable(System.getProperty("azure.cert-path.well-known")) .orElse("/etc/certs/well-known/"); /** * Store the path where the custom certificate is placed */ final String customPath = Optional.ofNullable(System.getProperty("azure.cert-path.custom")) .orElse("/etc/certs/custom/"); /** * Constructor. * * <p> * The constructor uses System.getProperty for * <code>azure.keyvault.uri</code>, * <code>azure.keyvault.aadAuthenticationUrl</code>, * <code>azure.keyvault.tenantId</code>, * <code>azure.keyvault.clientId</code>, * <code>azure.keyvault.clientSecret</code> and * <code>azure.keyvault.managedIdentity</code> to initialize the * Key Vault client. * </p> */ Long getRefreshInterval() { return Stream.of("azure.keyvault.jca.certificates-refresh-interval-in-ms", "azure.keyvault.jca.certificates-refresh-interval") .map(System::getProperty) .filter(Objects::nonNull) .map(Long::valueOf) .findFirst() .orElse(0L); } /** * get key vault key store by system property * * @return KeyVault key store * @throws CertificateException if any of the certificates in the * keystore could not be loaded * @throws NoSuchAlgorithmException when algorithm is unavailable. * @throws KeyStoreException when no Provider supports a KeyStoreSpi implementation for the specified type * @throws IOException when an I/O error occurs. */ public static KeyStore getKeyVaultKeyStoreBySystemProperty() throws CertificateException, NoSuchAlgorithmException, KeyStoreException, IOException { KeyStore keyStore = KeyStore.getInstance(KeyVaultJcaProvider.PROVIDER_NAME); KeyVaultLoadStoreParameter parameter = new KeyVaultLoadStoreParameter( System.getProperty("azure.keyvault.uri"), System.getProperty("azure.keyvault.tenant-id"), System.getProperty("azure.keyvault.client-id"), System.getProperty("azure.keyvault.client-secret"), System.getProperty("azure.keyvault.managed-identity")); keyStore.load(parameter); return keyStore; } @Override public Enumeration<String> engineAliases() { return Collections.enumeration(getAllAliases()); } @Override public boolean engineContainsAlias(String alias) { return engineIsCertificateEntry(alias); } @Override public void engineDeleteEntry(String alias) { allCertificates.forEach(a -> a.deleteEntry(alias)); } @Override public boolean engineEntryInstanceOf(String alias, Class<? extends KeyStore.Entry> entryClass) { return super.engineEntryInstanceOf(alias, entryClass); } @Override public Certificate engineGetCertificate(String alias) { Certificate certificate = allCertificates.stream() .map(AzureCertificates::getCertificates) .filter(a -> a.containsKey(alias)) .findFirst() .map(certificates -> certificates.get(alias)) .orElse(null); if (refreshCertificatesWhenHaveUnTrustCertificate && certificate == null) { keyVaultCertificates.refreshCertificates(); certificate = keyVaultCertificates.getCertificates().get(alias); } return certificate; } @Override public String engineGetCertificateAlias(Certificate cert) { String alias = null; if (cert != null) { List<String> aliasList = getAllAliases(); for (String candidateAlias : aliasList) { Certificate certificate = engineGetCertificate(candidateAlias); if (certificate.equals(cert)) { alias = candidateAlias; break; } } } if (refreshCertificatesWhenHaveUnTrustCertificate && alias == null) { alias = keyVaultCertificates.refreshAndGetAliasByCertificate(cert); } return alias; } @Override public Certificate[] engineGetCertificateChain(String alias) { Certificate[] chain = null; Certificate certificate = engineGetCertificate(alias); if (certificate != null) { chain = new Certificate[1]; chain[0] = certificate; } return chain; } @Override public Date engineGetCreationDate(String alias) { return new Date(creationDate.getTime()); } @Override public KeyStore.Entry engineGetEntry(String alias, KeyStore.ProtectionParameter protParam) throws KeyStoreException, NoSuchAlgorithmException, UnrecoverableEntryException { return super.engineGetEntry(alias, protParam); } @Override public Key engineGetKey(String alias, char[] password) { return allCertificates.stream() .map(AzureCertificates::getCertificateKeys) .filter(a -> a.containsKey(alias)) .findFirst() .map(certificateKeys -> certificateKeys.get(alias)) .orElse(null); } @Override public boolean engineIsCertificateEntry(String alias) { return getAllAliases().contains(alias); } @Override public boolean engineIsKeyEntry(String alias) { return engineIsCertificateEntry(alias); } @Override public void engineLoad(KeyStore.LoadStoreParameter param) { if (param instanceof KeyVaultLoadStoreParameter) { KeyVaultLoadStoreParameter parameter = (KeyVaultLoadStoreParameter) param; keyVaultCertificates.updateKeyVaultClient(parameter.getUri(), parameter.getTenantId(), parameter.getClientId(), parameter.getClientSecret(), parameter.getManagedIdentity()); } classpathCertificates.loadCertificatesFromClasspath(); } @Override public void engineLoad(InputStream stream, char[] password) { classpathCertificates.loadCertificatesFromClasspath(); } private List<String> getAllAliases() { List<String> allAliases = new ArrayList<>(jreCertificates.getAliases()); Map<String, List<String>> aliasLists = new HashMap<>(); aliasLists.put("well known certificates", wellKnowCertificates.getAliases()); aliasLists.put("custom certificates", customCertificates.getAliases()); aliasLists.put("key vault certificates", keyVaultCertificates.getAliases()); aliasLists.put("class path certificates", classpathCertificates.getAliases()); aliasLists.forEach((certificatesType, certificates) -> certificates.forEach(alias -> { if (allAliases.contains(alias)) { LOGGER.log(FINE, String.format("The certificate %s under %s already exists", alias, certificatesType)); } else { allAliases.add(alias); } })); return allAliases; } @Override public void engineSetCertificateEntry(String alias, Certificate certificate) { if (getAllAliases().contains(alias)) { LOGGER.log(WARNING, "Cannot overwrite own certificate"); return; } classpathCertificates.setCertificateEntry(alias, certificate); } @Override public void engineSetEntry(String alias, KeyStore.Entry entry, KeyStore.ProtectionParameter protParam) throws KeyStoreException { super.engineSetEntry(alias, entry, protParam); } @Override public void engineSetKeyEntry(String alias, Key key, char[] password, Certificate[] chain) { } @Override public void engineSetKeyEntry(String alias, byte[] key, Certificate[] chain) { } @Override public int engineSize() { return getAllAliases().size(); } @Override public void engineStore(OutputStream stream, char[] password) { } @Override public void engineStore(KeyStore.LoadStoreParameter param) { } }
class KeyVaultKeyStore extends KeyStoreSpi { /** * Stores the key-store name. */ public static final String KEY_STORE_TYPE = "AzureKeyVault"; /** * Stores the algorithm name. */ public static final String ALGORITHM_NAME = KEY_STORE_TYPE; /** * Stores the logger. */ private static final Logger LOGGER = Logger.getLogger(KeyVaultKeyStore.class.getName()); /** * Stores the Jre key store certificates. */ private final JreCertificates jreCertificates; /** * Store well Know certificates loaded from specific path. */ private final SpecificPathCertificates wellKnowCertificates; /** * Store custom certificates loaded from specific path. */ private final SpecificPathCertificates customCertificates; /** * Store certificates loaded from KeyVault. */ private final KeyVaultCertificates keyVaultCertificates; /** * Store certificates loaded from classpath. */ private final ClasspathCertificates classpathCertificates; /** * Stores all the certificates. */ private final List<AzureCertificates> allCertificates; /** * Stores the creation date. */ private final Date creationDate; private final boolean refreshCertificatesWhenHaveUnTrustCertificate; /** * Store the path where the well know certificate is placed */ final String wellKnowPath = Optional.ofNullable(System.getProperty("azure.cert-path.well-known")) .orElse("/etc/certs/well-known/"); /** * Store the path where the custom certificate is placed */ final String customPath = Optional.ofNullable(System.getProperty("azure.cert-path.custom")) .orElse("/etc/certs/custom/"); /** * Constructor. * * <p> * The constructor uses System.getProperty for * <code>azure.keyvault.uri</code>, * <code>azure.keyvault.aadAuthenticationUrl</code>, * <code>azure.keyvault.tenantId</code>, * <code>azure.keyvault.clientId</code>, * <code>azure.keyvault.clientSecret</code> and * <code>azure.keyvault.managedIdentity</code> to initialize the * Key Vault client. * </p> */ Long getRefreshInterval() { return Stream.of("azure.keyvault.jca.certificates-refresh-interval-in-ms", "azure.keyvault.jca.certificates-refresh-interval") .map(System::getProperty) .filter(Objects::nonNull) .map(Long::valueOf) .findFirst() .orElse(0L); } /** * get key vault key store by system property * * @return KeyVault key store * @throws CertificateException if any of the certificates in the * keystore could not be loaded * @throws NoSuchAlgorithmException when algorithm is unavailable. * @throws KeyStoreException when no Provider supports a KeyStoreSpi implementation for the specified type * @throws IOException when an I/O error occurs. */ public static KeyStore getKeyVaultKeyStoreBySystemProperty() throws CertificateException, NoSuchAlgorithmException, KeyStoreException, IOException { KeyStore keyStore = KeyStore.getInstance(KeyVaultJcaProvider.PROVIDER_NAME); KeyVaultLoadStoreParameter parameter = new KeyVaultLoadStoreParameter( System.getProperty("azure.keyvault.uri"), System.getProperty("azure.keyvault.tenant-id"), System.getProperty("azure.keyvault.client-id"), System.getProperty("azure.keyvault.client-secret"), System.getProperty("azure.keyvault.managed-identity")); keyStore.load(parameter); return keyStore; } @Override public Enumeration<String> engineAliases() { return Collections.enumeration(getAllAliases()); } @Override public boolean engineContainsAlias(String alias) { return engineIsCertificateEntry(alias); } @Override public void engineDeleteEntry(String alias) { allCertificates.forEach(a -> a.deleteEntry(alias)); } @Override public boolean engineEntryInstanceOf(String alias, Class<? extends KeyStore.Entry> entryClass) { return super.engineEntryInstanceOf(alias, entryClass); } @Override public Certificate engineGetCertificate(String alias) { Certificate certificate = allCertificates.stream() .map(AzureCertificates::getCertificates) .filter(a -> a.containsKey(alias)) .findFirst() .map(certificates -> certificates.get(alias)) .orElse(null); if (refreshCertificatesWhenHaveUnTrustCertificate && certificate == null) { keyVaultCertificates.refreshCertificates(); certificate = keyVaultCertificates.getCertificates().get(alias); } return certificate; } @Override public String engineGetCertificateAlias(Certificate cert) { String alias = null; if (cert != null) { List<String> aliasList = getAllAliases(); for (String candidateAlias : aliasList) { Certificate certificate = engineGetCertificate(candidateAlias); if (certificate.equals(cert)) { alias = candidateAlias; break; } } } if (refreshCertificatesWhenHaveUnTrustCertificate && alias == null) { alias = keyVaultCertificates.refreshAndGetAliasByCertificate(cert); } return alias; } @Override public Certificate[] engineGetCertificateChain(String alias) { Certificate[] chain = null; Certificate certificate = engineGetCertificate(alias); if (certificate != null) { chain = new Certificate[1]; chain[0] = certificate; } return chain; } @Override public Date engineGetCreationDate(String alias) { return new Date(creationDate.getTime()); } @Override public KeyStore.Entry engineGetEntry(String alias, KeyStore.ProtectionParameter protParam) throws KeyStoreException, NoSuchAlgorithmException, UnrecoverableEntryException { return super.engineGetEntry(alias, protParam); } @Override public Key engineGetKey(String alias, char[] password) { return allCertificates.stream() .map(AzureCertificates::getCertificateKeys) .filter(a -> a.containsKey(alias)) .findFirst() .map(certificateKeys -> certificateKeys.get(alias)) .orElse(null); } @Override public boolean engineIsCertificateEntry(String alias) { return getAllAliases().contains(alias); } @Override public boolean engineIsKeyEntry(String alias) { return engineIsCertificateEntry(alias); } @Override public void engineLoad(KeyStore.LoadStoreParameter param) { if (param instanceof KeyVaultLoadStoreParameter) { KeyVaultLoadStoreParameter parameter = (KeyVaultLoadStoreParameter) param; keyVaultCertificates.updateKeyVaultClient(parameter.getUri(), parameter.getTenantId(), parameter.getClientId(), parameter.getClientSecret(), parameter.getManagedIdentity()); } classpathCertificates.loadCertificatesFromClasspath(); } @Override public void engineLoad(InputStream stream, char[] password) { classpathCertificates.loadCertificatesFromClasspath(); } private List<String> getAllAliases() { List<String> allAliases = new ArrayList<>(jreCertificates.getAliases()); Map<String, List<String>> aliasLists = new HashMap<>(); aliasLists.put("well known certificates", wellKnowCertificates.getAliases()); aliasLists.put("custom certificates", customCertificates.getAliases()); aliasLists.put("key vault certificates", keyVaultCertificates.getAliases()); aliasLists.put("class path certificates", classpathCertificates.getAliases()); aliasLists.forEach((certificatesType, certificates) -> certificates.forEach(alias -> { if (allAliases.contains(alias)) { LOGGER.log(FINE, String.format("The certificate %s under %s already exists", alias, certificatesType)); } else { allAliases.add(alias); } })); return allAliases; } @Override public void engineSetCertificateEntry(String alias, Certificate certificate) { if (getAllAliases().contains(alias)) { LOGGER.log(WARNING, "Cannot overwrite own certificate"); return; } classpathCertificates.setCertificateEntry(alias, certificate); } @Override public void engineSetEntry(String alias, KeyStore.Entry entry, KeyStore.ProtectionParameter protParam) throws KeyStoreException { super.engineSetEntry(alias, entry, protParam); } @Override public void engineSetKeyEntry(String alias, Key key, char[] password, Certificate[] chain) { } @Override public void engineSetKeyEntry(String alias, byte[] key, Certificate[] chain) { } @Override public int engineSize() { return getAllAliases().size(); } @Override public void engineStore(OutputStream stream, char[] password) { } @Override public void engineStore(KeyStore.LoadStoreParameter param) { } }
Shouldn't this be well known? Well know is not correct, right?
public KeyVaultKeyStore() { LOGGER.log(FINE, "Constructing KeyVaultKeyStore."); creationDate = new Date(); String keyVaultUri = System.getProperty("azure.keyvault.uri"); String tenantId = System.getProperty("azure.keyvault.tenant-id"); String clientId = System.getProperty("azure.keyvault.client-id"); String clientSecret = System.getProperty("azure.keyvault.client-secret"); String managedIdentity = System.getProperty("azure.keyvault.managed-identity"); long refreshInterval = getRefreshInterval(); refreshCertificatesWhenHaveUnTrustCertificate = Optional.of("azure.keyvault.jca.refresh-certificates-when-have-un-trust-certificate") .map(System::getProperty) .map(Boolean::parseBoolean) .orElse(false); jreCertificates = JreCertificates.getInstance(); LOGGER.log(FINE, String.format("Loaded jre certificates: %s.", jreCertificates.getAliases())); wellKnowCertificates = SpecificPathCertificates.getSpecificPathCertificates(wellKnowPath); LOGGER.log(FINE, String.format("Loaded well know certificates: %s.", wellKnowCertificates.getAliases())); customCertificates = SpecificPathCertificates.getSpecificPathCertificates(customPath); LOGGER.log(FINE, String.format("Loaded custom certificates: %s.", customCertificates.getAliases())); keyVaultCertificates = new KeyVaultCertificates( refreshInterval, keyVaultUri, tenantId, clientId, clientSecret, managedIdentity); LOGGER.log(FINE, String.format("Loaded keyVault certificates: %s.", keyVaultCertificates.getAliases())); classpathCertificates = new ClasspathCertificates(); LOGGER.log(FINE, String.format("Loaded classpath certificates: %s.", classpathCertificates.getAliases())); allCertificates = Arrays.asList( jreCertificates, wellKnowCertificates, customCertificates, keyVaultCertificates, classpathCertificates); }
LOGGER.log(FINE, String.format("Loaded well know certificates: %s.", wellKnowCertificates.getAliases()));
public KeyVaultKeyStore() { LOGGER.log(FINE, "Constructing KeyVaultKeyStore."); creationDate = new Date(); String keyVaultUri = System.getProperty("azure.keyvault.uri"); String tenantId = System.getProperty("azure.keyvault.tenant-id"); String clientId = System.getProperty("azure.keyvault.client-id"); String clientSecret = System.getProperty("azure.keyvault.client-secret"); String managedIdentity = System.getProperty("azure.keyvault.managed-identity"); long refreshInterval = getRefreshInterval(); refreshCertificatesWhenHaveUnTrustCertificate = Optional.of("azure.keyvault.jca.refresh-certificates-when-have-un-trust-certificate") .map(System::getProperty) .map(Boolean::parseBoolean) .orElse(false); jreCertificates = JreCertificates.getInstance(); LOGGER.log(FINE, String.format("Loaded jre certificates: %s.", jreCertificates.getAliases())); wellKnowCertificates = SpecificPathCertificates.getSpecificPathCertificates(wellKnowPath); LOGGER.log(FINE, String.format("Loaded well know certificates: %s.", wellKnowCertificates.getAliases())); customCertificates = SpecificPathCertificates.getSpecificPathCertificates(customPath); LOGGER.log(FINE, String.format("Loaded custom certificates: %s.", customCertificates.getAliases())); keyVaultCertificates = new KeyVaultCertificates( refreshInterval, keyVaultUri, tenantId, clientId, clientSecret, managedIdentity); LOGGER.log(FINE, String.format("Loaded keyVault certificates: %s.", keyVaultCertificates.getAliases())); classpathCertificates = new ClasspathCertificates(); LOGGER.log(FINE, String.format("Loaded classpath certificates: %s.", classpathCertificates.getAliases())); allCertificates = Arrays.asList( jreCertificates, wellKnowCertificates, customCertificates, keyVaultCertificates, classpathCertificates); }
class KeyVaultKeyStore extends KeyStoreSpi { /** * Stores the key-store name. */ public static final String KEY_STORE_TYPE = "AzureKeyVault"; /** * Stores the algorithm name. */ public static final String ALGORITHM_NAME = KEY_STORE_TYPE; /** * Stores the logger. */ private static final Logger LOGGER = Logger.getLogger(KeyVaultKeyStore.class.getName()); /** * Stores the Jre key store certificates. */ private final JreCertificates jreCertificates; /** * Store well Know certificates loaded from specific path. */ private final SpecificPathCertificates wellKnowCertificates; /** * Store custom certificates loaded from specific path. */ private final SpecificPathCertificates customCertificates; /** * Store certificates loaded from KeyVault. */ private final KeyVaultCertificates keyVaultCertificates; /** * Store certificates loaded from classpath. */ private final ClasspathCertificates classpathCertificates; /** * Stores all the certificates. */ private final List<AzureCertificates> allCertificates; /** * Stores the creation date. */ private final Date creationDate; private final boolean refreshCertificatesWhenHaveUnTrustCertificate; /** * Store the path where the well know certificate is placed */ final String wellKnowPath = Optional.ofNullable(System.getProperty("azure.cert-path.well-known")) .orElse("/etc/certs/well-known/"); /** * Store the path where the custom certificate is placed */ final String customPath = Optional.ofNullable(System.getProperty("azure.cert-path.custom")) .orElse("/etc/certs/custom/"); /** * Constructor. * * <p> * The constructor uses System.getProperty for * <code>azure.keyvault.uri</code>, * <code>azure.keyvault.aadAuthenticationUrl</code>, * <code>azure.keyvault.tenantId</code>, * <code>azure.keyvault.clientId</code>, * <code>azure.keyvault.clientSecret</code> and * <code>azure.keyvault.managedIdentity</code> to initialize the * Key Vault client. * </p> */ Long getRefreshInterval() { return Stream.of("azure.keyvault.jca.certificates-refresh-interval-in-ms", "azure.keyvault.jca.certificates-refresh-interval") .map(System::getProperty) .filter(Objects::nonNull) .map(Long::valueOf) .findFirst() .orElse(0L); } /** * get key vault key store by system property * * @return KeyVault key store * @throws CertificateException if any of the certificates in the * keystore could not be loaded * @throws NoSuchAlgorithmException when algorithm is unavailable. * @throws KeyStoreException when no Provider supports a KeyStoreSpi implementation for the specified type * @throws IOException when an I/O error occurs. */ public static KeyStore getKeyVaultKeyStoreBySystemProperty() throws CertificateException, NoSuchAlgorithmException, KeyStoreException, IOException { KeyStore keyStore = KeyStore.getInstance(KeyVaultJcaProvider.PROVIDER_NAME); KeyVaultLoadStoreParameter parameter = new KeyVaultLoadStoreParameter( System.getProperty("azure.keyvault.uri"), System.getProperty("azure.keyvault.tenant-id"), System.getProperty("azure.keyvault.client-id"), System.getProperty("azure.keyvault.client-secret"), System.getProperty("azure.keyvault.managed-identity")); keyStore.load(parameter); return keyStore; } @Override public Enumeration<String> engineAliases() { return Collections.enumeration(getAllAliases()); } @Override public boolean engineContainsAlias(String alias) { return engineIsCertificateEntry(alias); } @Override public void engineDeleteEntry(String alias) { allCertificates.forEach(a -> a.deleteEntry(alias)); } @Override public boolean engineEntryInstanceOf(String alias, Class<? extends KeyStore.Entry> entryClass) { return super.engineEntryInstanceOf(alias, entryClass); } @Override public Certificate engineGetCertificate(String alias) { Certificate certificate = allCertificates.stream() .map(AzureCertificates::getCertificates) .filter(a -> a.containsKey(alias)) .findFirst() .map(certificates -> certificates.get(alias)) .orElse(null); if (refreshCertificatesWhenHaveUnTrustCertificate && certificate == null) { keyVaultCertificates.refreshCertificates(); certificate = keyVaultCertificates.getCertificates().get(alias); } return certificate; } @Override public String engineGetCertificateAlias(Certificate cert) { String alias = null; if (cert != null) { List<String> aliasList = getAllAliases(); for (String candidateAlias : aliasList) { Certificate certificate = engineGetCertificate(candidateAlias); if (certificate.equals(cert)) { alias = candidateAlias; break; } } } if (refreshCertificatesWhenHaveUnTrustCertificate && alias == null) { alias = keyVaultCertificates.refreshAndGetAliasByCertificate(cert); } return alias; } @Override public Certificate[] engineGetCertificateChain(String alias) { Certificate[] chain = null; Certificate certificate = engineGetCertificate(alias); if (certificate != null) { chain = new Certificate[1]; chain[0] = certificate; } return chain; } @Override public Date engineGetCreationDate(String alias) { return new Date(creationDate.getTime()); } @Override public KeyStore.Entry engineGetEntry(String alias, KeyStore.ProtectionParameter protParam) throws KeyStoreException, NoSuchAlgorithmException, UnrecoverableEntryException { return super.engineGetEntry(alias, protParam); } @Override public Key engineGetKey(String alias, char[] password) { return allCertificates.stream() .map(AzureCertificates::getCertificateKeys) .filter(a -> a.containsKey(alias)) .findFirst() .map(certificateKeys -> certificateKeys.get(alias)) .orElse(null); } @Override public boolean engineIsCertificateEntry(String alias) { return getAllAliases().contains(alias); } @Override public boolean engineIsKeyEntry(String alias) { return engineIsCertificateEntry(alias); } @Override public void engineLoad(KeyStore.LoadStoreParameter param) { if (param instanceof KeyVaultLoadStoreParameter) { KeyVaultLoadStoreParameter parameter = (KeyVaultLoadStoreParameter) param; keyVaultCertificates.updateKeyVaultClient(parameter.getUri(), parameter.getTenantId(), parameter.getClientId(), parameter.getClientSecret(), parameter.getManagedIdentity()); } classpathCertificates.loadCertificatesFromClasspath(); } @Override public void engineLoad(InputStream stream, char[] password) { classpathCertificates.loadCertificatesFromClasspath(); } private List<String> getAllAliases() { List<String> allAliases = new ArrayList<>(jreCertificates.getAliases()); Map<String, List<String>> aliasLists = new HashMap<>(); aliasLists.put("well known certificates", wellKnowCertificates.getAliases()); aliasLists.put("custom certificates", customCertificates.getAliases()); aliasLists.put("key vault certificates", keyVaultCertificates.getAliases()); aliasLists.put("class path certificates", classpathCertificates.getAliases()); aliasLists.forEach((certificatesType, certificates) -> certificates.forEach(alias -> { if (allAliases.contains(alias)) { LOGGER.log(FINE, String.format("The certificate %s under %s already exists", alias, certificatesType)); } else { allAliases.add(alias); } })); return allAliases; } @Override public void engineSetCertificateEntry(String alias, Certificate certificate) { if (getAllAliases().contains(alias)) { LOGGER.log(WARNING, "Cannot overwrite own certificate"); return; } classpathCertificates.setCertificateEntry(alias, certificate); } @Override public void engineSetEntry(String alias, KeyStore.Entry entry, KeyStore.ProtectionParameter protParam) throws KeyStoreException { super.engineSetEntry(alias, entry, protParam); } @Override public void engineSetKeyEntry(String alias, Key key, char[] password, Certificate[] chain) { } @Override public void engineSetKeyEntry(String alias, byte[] key, Certificate[] chain) { } @Override public int engineSize() { return getAllAliases().size(); } @Override public void engineStore(OutputStream stream, char[] password) { } @Override public void engineStore(KeyStore.LoadStoreParameter param) { } }
class KeyVaultKeyStore extends KeyStoreSpi { /** * Stores the key-store name. */ public static final String KEY_STORE_TYPE = "AzureKeyVault"; /** * Stores the algorithm name. */ public static final String ALGORITHM_NAME = KEY_STORE_TYPE; /** * Stores the logger. */ private static final Logger LOGGER = Logger.getLogger(KeyVaultKeyStore.class.getName()); /** * Stores the Jre key store certificates. */ private final JreCertificates jreCertificates; /** * Store well Know certificates loaded from specific path. */ private final SpecificPathCertificates wellKnowCertificates; /** * Store custom certificates loaded from specific path. */ private final SpecificPathCertificates customCertificates; /** * Store certificates loaded from KeyVault. */ private final KeyVaultCertificates keyVaultCertificates; /** * Store certificates loaded from classpath. */ private final ClasspathCertificates classpathCertificates; /** * Stores all the certificates. */ private final List<AzureCertificates> allCertificates; /** * Stores the creation date. */ private final Date creationDate; private final boolean refreshCertificatesWhenHaveUnTrustCertificate; /** * Store the path where the well know certificate is placed */ final String wellKnowPath = Optional.ofNullable(System.getProperty("azure.cert-path.well-known")) .orElse("/etc/certs/well-known/"); /** * Store the path where the custom certificate is placed */ final String customPath = Optional.ofNullable(System.getProperty("azure.cert-path.custom")) .orElse("/etc/certs/custom/"); /** * Constructor. * * <p> * The constructor uses System.getProperty for * <code>azure.keyvault.uri</code>, * <code>azure.keyvault.aadAuthenticationUrl</code>, * <code>azure.keyvault.tenantId</code>, * <code>azure.keyvault.clientId</code>, * <code>azure.keyvault.clientSecret</code> and * <code>azure.keyvault.managedIdentity</code> to initialize the * Key Vault client. * </p> */ Long getRefreshInterval() { return Stream.of("azure.keyvault.jca.certificates-refresh-interval-in-ms", "azure.keyvault.jca.certificates-refresh-interval") .map(System::getProperty) .filter(Objects::nonNull) .map(Long::valueOf) .findFirst() .orElse(0L); } /** * get key vault key store by system property * * @return KeyVault key store * @throws CertificateException if any of the certificates in the * keystore could not be loaded * @throws NoSuchAlgorithmException when algorithm is unavailable. * @throws KeyStoreException when no Provider supports a KeyStoreSpi implementation for the specified type * @throws IOException when an I/O error occurs. */ public static KeyStore getKeyVaultKeyStoreBySystemProperty() throws CertificateException, NoSuchAlgorithmException, KeyStoreException, IOException { KeyStore keyStore = KeyStore.getInstance(KeyVaultJcaProvider.PROVIDER_NAME); KeyVaultLoadStoreParameter parameter = new KeyVaultLoadStoreParameter( System.getProperty("azure.keyvault.uri"), System.getProperty("azure.keyvault.tenant-id"), System.getProperty("azure.keyvault.client-id"), System.getProperty("azure.keyvault.client-secret"), System.getProperty("azure.keyvault.managed-identity")); keyStore.load(parameter); return keyStore; } @Override public Enumeration<String> engineAliases() { return Collections.enumeration(getAllAliases()); } @Override public boolean engineContainsAlias(String alias) { return engineIsCertificateEntry(alias); } @Override public void engineDeleteEntry(String alias) { allCertificates.forEach(a -> a.deleteEntry(alias)); } @Override public boolean engineEntryInstanceOf(String alias, Class<? extends KeyStore.Entry> entryClass) { return super.engineEntryInstanceOf(alias, entryClass); } @Override public Certificate engineGetCertificate(String alias) { Certificate certificate = allCertificates.stream() .map(AzureCertificates::getCertificates) .filter(a -> a.containsKey(alias)) .findFirst() .map(certificates -> certificates.get(alias)) .orElse(null); if (refreshCertificatesWhenHaveUnTrustCertificate && certificate == null) { keyVaultCertificates.refreshCertificates(); certificate = keyVaultCertificates.getCertificates().get(alias); } return certificate; } @Override public String engineGetCertificateAlias(Certificate cert) { String alias = null; if (cert != null) { List<String> aliasList = getAllAliases(); for (String candidateAlias : aliasList) { Certificate certificate = engineGetCertificate(candidateAlias); if (certificate.equals(cert)) { alias = candidateAlias; break; } } } if (refreshCertificatesWhenHaveUnTrustCertificate && alias == null) { alias = keyVaultCertificates.refreshAndGetAliasByCertificate(cert); } return alias; } @Override public Certificate[] engineGetCertificateChain(String alias) { Certificate[] chain = null; Certificate certificate = engineGetCertificate(alias); if (certificate != null) { chain = new Certificate[1]; chain[0] = certificate; } return chain; } @Override public Date engineGetCreationDate(String alias) { return new Date(creationDate.getTime()); } @Override public KeyStore.Entry engineGetEntry(String alias, KeyStore.ProtectionParameter protParam) throws KeyStoreException, NoSuchAlgorithmException, UnrecoverableEntryException { return super.engineGetEntry(alias, protParam); } @Override public Key engineGetKey(String alias, char[] password) { return allCertificates.stream() .map(AzureCertificates::getCertificateKeys) .filter(a -> a.containsKey(alias)) .findFirst() .map(certificateKeys -> certificateKeys.get(alias)) .orElse(null); } @Override public boolean engineIsCertificateEntry(String alias) { return getAllAliases().contains(alias); } @Override public boolean engineIsKeyEntry(String alias) { return engineIsCertificateEntry(alias); } @Override public void engineLoad(KeyStore.LoadStoreParameter param) { if (param instanceof KeyVaultLoadStoreParameter) { KeyVaultLoadStoreParameter parameter = (KeyVaultLoadStoreParameter) param; keyVaultCertificates.updateKeyVaultClient(parameter.getUri(), parameter.getTenantId(), parameter.getClientId(), parameter.getClientSecret(), parameter.getManagedIdentity()); } classpathCertificates.loadCertificatesFromClasspath(); } @Override public void engineLoad(InputStream stream, char[] password) { classpathCertificates.loadCertificatesFromClasspath(); } private List<String> getAllAliases() { List<String> allAliases = new ArrayList<>(jreCertificates.getAliases()); Map<String, List<String>> aliasLists = new HashMap<>(); aliasLists.put("well known certificates", wellKnowCertificates.getAliases()); aliasLists.put("custom certificates", customCertificates.getAliases()); aliasLists.put("key vault certificates", keyVaultCertificates.getAliases()); aliasLists.put("class path certificates", classpathCertificates.getAliases()); aliasLists.forEach((certificatesType, certificates) -> certificates.forEach(alias -> { if (allAliases.contains(alias)) { LOGGER.log(FINE, String.format("The certificate %s under %s already exists", alias, certificatesType)); } else { allAliases.add(alias); } })); return allAliases; } @Override public void engineSetCertificateEntry(String alias, Certificate certificate) { if (getAllAliases().contains(alias)) { LOGGER.log(WARNING, "Cannot overwrite own certificate"); return; } classpathCertificates.setCertificateEntry(alias, certificate); } @Override public void engineSetEntry(String alias, KeyStore.Entry entry, KeyStore.ProtectionParameter protParam) throws KeyStoreException { super.engineSetEntry(alias, entry, protParam); } @Override public void engineSetKeyEntry(String alias, Key key, char[] password, Certificate[] chain) { } @Override public void engineSetKeyEntry(String alias, byte[] key, Certificate[] chain) { } @Override public int engineSize() { return getAllAliases().size(); } @Override public void engineStore(OutputStream stream, char[] password) { } @Override public void engineStore(KeyStore.LoadStoreParameter param) { } }
New PR created: https://github.com/Azure/azure-sdk-for-java/pull/27426
public KeyVaultKeyStore() { LOGGER.log(FINE, "Constructing KeyVaultKeyStore."); creationDate = new Date(); String keyVaultUri = System.getProperty("azure.keyvault.uri"); String tenantId = System.getProperty("azure.keyvault.tenant-id"); String clientId = System.getProperty("azure.keyvault.client-id"); String clientSecret = System.getProperty("azure.keyvault.client-secret"); String managedIdentity = System.getProperty("azure.keyvault.managed-identity"); long refreshInterval = getRefreshInterval(); refreshCertificatesWhenHaveUnTrustCertificate = Optional.of("azure.keyvault.jca.refresh-certificates-when-have-un-trust-certificate") .map(System::getProperty) .map(Boolean::parseBoolean) .orElse(false); jreCertificates = JreCertificates.getInstance(); LOGGER.log(FINE, String.format("Loaded jre certificates: %s.", jreCertificates.getAliases())); wellKnowCertificates = SpecificPathCertificates.getSpecificPathCertificates(wellKnowPath); LOGGER.log(FINE, String.format("Loaded well know certificates: %s.", wellKnowCertificates.getAliases())); customCertificates = SpecificPathCertificates.getSpecificPathCertificates(customPath); LOGGER.log(FINE, String.format("Loaded custom certificates: %s.", customCertificates.getAliases())); keyVaultCertificates = new KeyVaultCertificates( refreshInterval, keyVaultUri, tenantId, clientId, clientSecret, managedIdentity); LOGGER.log(FINE, String.format("Loaded keyVault certificates: %s.", keyVaultCertificates.getAliases())); classpathCertificates = new ClasspathCertificates(); LOGGER.log(FINE, String.format("Loaded classpath certificates: %s.", classpathCertificates.getAliases())); allCertificates = Arrays.asList( jreCertificates, wellKnowCertificates, customCertificates, keyVaultCertificates, classpathCertificates); }
LOGGER.log(FINE, String.format("Loaded well know certificates: %s.", wellKnowCertificates.getAliases()));
public KeyVaultKeyStore() { LOGGER.log(FINE, "Constructing KeyVaultKeyStore."); creationDate = new Date(); String keyVaultUri = System.getProperty("azure.keyvault.uri"); String tenantId = System.getProperty("azure.keyvault.tenant-id"); String clientId = System.getProperty("azure.keyvault.client-id"); String clientSecret = System.getProperty("azure.keyvault.client-secret"); String managedIdentity = System.getProperty("azure.keyvault.managed-identity"); long refreshInterval = getRefreshInterval(); refreshCertificatesWhenHaveUnTrustCertificate = Optional.of("azure.keyvault.jca.refresh-certificates-when-have-un-trust-certificate") .map(System::getProperty) .map(Boolean::parseBoolean) .orElse(false); jreCertificates = JreCertificates.getInstance(); LOGGER.log(FINE, String.format("Loaded jre certificates: %s.", jreCertificates.getAliases())); wellKnowCertificates = SpecificPathCertificates.getSpecificPathCertificates(wellKnowPath); LOGGER.log(FINE, String.format("Loaded well know certificates: %s.", wellKnowCertificates.getAliases())); customCertificates = SpecificPathCertificates.getSpecificPathCertificates(customPath); LOGGER.log(FINE, String.format("Loaded custom certificates: %s.", customCertificates.getAliases())); keyVaultCertificates = new KeyVaultCertificates( refreshInterval, keyVaultUri, tenantId, clientId, clientSecret, managedIdentity); LOGGER.log(FINE, String.format("Loaded keyVault certificates: %s.", keyVaultCertificates.getAliases())); classpathCertificates = new ClasspathCertificates(); LOGGER.log(FINE, String.format("Loaded classpath certificates: %s.", classpathCertificates.getAliases())); allCertificates = Arrays.asList( jreCertificates, wellKnowCertificates, customCertificates, keyVaultCertificates, classpathCertificates); }
class KeyVaultKeyStore extends KeyStoreSpi { /** * Stores the key-store name. */ public static final String KEY_STORE_TYPE = "AzureKeyVault"; /** * Stores the algorithm name. */ public static final String ALGORITHM_NAME = KEY_STORE_TYPE; /** * Stores the logger. */ private static final Logger LOGGER = Logger.getLogger(KeyVaultKeyStore.class.getName()); /** * Stores the Jre key store certificates. */ private final JreCertificates jreCertificates; /** * Store well Know certificates loaded from specific path. */ private final SpecificPathCertificates wellKnowCertificates; /** * Store custom certificates loaded from specific path. */ private final SpecificPathCertificates customCertificates; /** * Store certificates loaded from KeyVault. */ private final KeyVaultCertificates keyVaultCertificates; /** * Store certificates loaded from classpath. */ private final ClasspathCertificates classpathCertificates; /** * Stores all the certificates. */ private final List<AzureCertificates> allCertificates; /** * Stores the creation date. */ private final Date creationDate; private final boolean refreshCertificatesWhenHaveUnTrustCertificate; /** * Store the path where the well know certificate is placed */ final String wellKnowPath = Optional.ofNullable(System.getProperty("azure.cert-path.well-known")) .orElse("/etc/certs/well-known/"); /** * Store the path where the custom certificate is placed */ final String customPath = Optional.ofNullable(System.getProperty("azure.cert-path.custom")) .orElse("/etc/certs/custom/"); /** * Constructor. * * <p> * The constructor uses System.getProperty for * <code>azure.keyvault.uri</code>, * <code>azure.keyvault.aadAuthenticationUrl</code>, * <code>azure.keyvault.tenantId</code>, * <code>azure.keyvault.clientId</code>, * <code>azure.keyvault.clientSecret</code> and * <code>azure.keyvault.managedIdentity</code> to initialize the * Key Vault client. * </p> */ Long getRefreshInterval() { return Stream.of("azure.keyvault.jca.certificates-refresh-interval-in-ms", "azure.keyvault.jca.certificates-refresh-interval") .map(System::getProperty) .filter(Objects::nonNull) .map(Long::valueOf) .findFirst() .orElse(0L); } /** * get key vault key store by system property * * @return KeyVault key store * @throws CertificateException if any of the certificates in the * keystore could not be loaded * @throws NoSuchAlgorithmException when algorithm is unavailable. * @throws KeyStoreException when no Provider supports a KeyStoreSpi implementation for the specified type * @throws IOException when an I/O error occurs. */ public static KeyStore getKeyVaultKeyStoreBySystemProperty() throws CertificateException, NoSuchAlgorithmException, KeyStoreException, IOException { KeyStore keyStore = KeyStore.getInstance(KeyVaultJcaProvider.PROVIDER_NAME); KeyVaultLoadStoreParameter parameter = new KeyVaultLoadStoreParameter( System.getProperty("azure.keyvault.uri"), System.getProperty("azure.keyvault.tenant-id"), System.getProperty("azure.keyvault.client-id"), System.getProperty("azure.keyvault.client-secret"), System.getProperty("azure.keyvault.managed-identity")); keyStore.load(parameter); return keyStore; } @Override public Enumeration<String> engineAliases() { return Collections.enumeration(getAllAliases()); } @Override public boolean engineContainsAlias(String alias) { return engineIsCertificateEntry(alias); } @Override public void engineDeleteEntry(String alias) { allCertificates.forEach(a -> a.deleteEntry(alias)); } @Override public boolean engineEntryInstanceOf(String alias, Class<? extends KeyStore.Entry> entryClass) { return super.engineEntryInstanceOf(alias, entryClass); } @Override public Certificate engineGetCertificate(String alias) { Certificate certificate = allCertificates.stream() .map(AzureCertificates::getCertificates) .filter(a -> a.containsKey(alias)) .findFirst() .map(certificates -> certificates.get(alias)) .orElse(null); if (refreshCertificatesWhenHaveUnTrustCertificate && certificate == null) { keyVaultCertificates.refreshCertificates(); certificate = keyVaultCertificates.getCertificates().get(alias); } return certificate; } @Override public String engineGetCertificateAlias(Certificate cert) { String alias = null; if (cert != null) { List<String> aliasList = getAllAliases(); for (String candidateAlias : aliasList) { Certificate certificate = engineGetCertificate(candidateAlias); if (certificate.equals(cert)) { alias = candidateAlias; break; } } } if (refreshCertificatesWhenHaveUnTrustCertificate && alias == null) { alias = keyVaultCertificates.refreshAndGetAliasByCertificate(cert); } return alias; } @Override public Certificate[] engineGetCertificateChain(String alias) { Certificate[] chain = null; Certificate certificate = engineGetCertificate(alias); if (certificate != null) { chain = new Certificate[1]; chain[0] = certificate; } return chain; } @Override public Date engineGetCreationDate(String alias) { return new Date(creationDate.getTime()); } @Override public KeyStore.Entry engineGetEntry(String alias, KeyStore.ProtectionParameter protParam) throws KeyStoreException, NoSuchAlgorithmException, UnrecoverableEntryException { return super.engineGetEntry(alias, protParam); } @Override public Key engineGetKey(String alias, char[] password) { return allCertificates.stream() .map(AzureCertificates::getCertificateKeys) .filter(a -> a.containsKey(alias)) .findFirst() .map(certificateKeys -> certificateKeys.get(alias)) .orElse(null); } @Override public boolean engineIsCertificateEntry(String alias) { return getAllAliases().contains(alias); } @Override public boolean engineIsKeyEntry(String alias) { return engineIsCertificateEntry(alias); } @Override public void engineLoad(KeyStore.LoadStoreParameter param) { if (param instanceof KeyVaultLoadStoreParameter) { KeyVaultLoadStoreParameter parameter = (KeyVaultLoadStoreParameter) param; keyVaultCertificates.updateKeyVaultClient(parameter.getUri(), parameter.getTenantId(), parameter.getClientId(), parameter.getClientSecret(), parameter.getManagedIdentity()); } classpathCertificates.loadCertificatesFromClasspath(); } @Override public void engineLoad(InputStream stream, char[] password) { classpathCertificates.loadCertificatesFromClasspath(); } private List<String> getAllAliases() { List<String> allAliases = new ArrayList<>(jreCertificates.getAliases()); Map<String, List<String>> aliasLists = new HashMap<>(); aliasLists.put("well known certificates", wellKnowCertificates.getAliases()); aliasLists.put("custom certificates", customCertificates.getAliases()); aliasLists.put("key vault certificates", keyVaultCertificates.getAliases()); aliasLists.put("class path certificates", classpathCertificates.getAliases()); aliasLists.forEach((certificatesType, certificates) -> certificates.forEach(alias -> { if (allAliases.contains(alias)) { LOGGER.log(FINE, String.format("The certificate %s under %s already exists", alias, certificatesType)); } else { allAliases.add(alias); } })); return allAliases; } @Override public void engineSetCertificateEntry(String alias, Certificate certificate) { if (getAllAliases().contains(alias)) { LOGGER.log(WARNING, "Cannot overwrite own certificate"); return; } classpathCertificates.setCertificateEntry(alias, certificate); } @Override public void engineSetEntry(String alias, KeyStore.Entry entry, KeyStore.ProtectionParameter protParam) throws KeyStoreException { super.engineSetEntry(alias, entry, protParam); } @Override public void engineSetKeyEntry(String alias, Key key, char[] password, Certificate[] chain) { } @Override public void engineSetKeyEntry(String alias, byte[] key, Certificate[] chain) { } @Override public int engineSize() { return getAllAliases().size(); } @Override public void engineStore(OutputStream stream, char[] password) { } @Override public void engineStore(KeyStore.LoadStoreParameter param) { } }
class KeyVaultKeyStore extends KeyStoreSpi { /** * Stores the key-store name. */ public static final String KEY_STORE_TYPE = "AzureKeyVault"; /** * Stores the algorithm name. */ public static final String ALGORITHM_NAME = KEY_STORE_TYPE; /** * Stores the logger. */ private static final Logger LOGGER = Logger.getLogger(KeyVaultKeyStore.class.getName()); /** * Stores the Jre key store certificates. */ private final JreCertificates jreCertificates; /** * Store well Know certificates loaded from specific path. */ private final SpecificPathCertificates wellKnowCertificates; /** * Store custom certificates loaded from specific path. */ private final SpecificPathCertificates customCertificates; /** * Store certificates loaded from KeyVault. */ private final KeyVaultCertificates keyVaultCertificates; /** * Store certificates loaded from classpath. */ private final ClasspathCertificates classpathCertificates; /** * Stores all the certificates. */ private final List<AzureCertificates> allCertificates; /** * Stores the creation date. */ private final Date creationDate; private final boolean refreshCertificatesWhenHaveUnTrustCertificate; /** * Store the path where the well know certificate is placed */ final String wellKnowPath = Optional.ofNullable(System.getProperty("azure.cert-path.well-known")) .orElse("/etc/certs/well-known/"); /** * Store the path where the custom certificate is placed */ final String customPath = Optional.ofNullable(System.getProperty("azure.cert-path.custom")) .orElse("/etc/certs/custom/"); /** * Constructor. * * <p> * The constructor uses System.getProperty for * <code>azure.keyvault.uri</code>, * <code>azure.keyvault.aadAuthenticationUrl</code>, * <code>azure.keyvault.tenantId</code>, * <code>azure.keyvault.clientId</code>, * <code>azure.keyvault.clientSecret</code> and * <code>azure.keyvault.managedIdentity</code> to initialize the * Key Vault client. * </p> */ Long getRefreshInterval() { return Stream.of("azure.keyvault.jca.certificates-refresh-interval-in-ms", "azure.keyvault.jca.certificates-refresh-interval") .map(System::getProperty) .filter(Objects::nonNull) .map(Long::valueOf) .findFirst() .orElse(0L); } /** * get key vault key store by system property * * @return KeyVault key store * @throws CertificateException if any of the certificates in the * keystore could not be loaded * @throws NoSuchAlgorithmException when algorithm is unavailable. * @throws KeyStoreException when no Provider supports a KeyStoreSpi implementation for the specified type * @throws IOException when an I/O error occurs. */ public static KeyStore getKeyVaultKeyStoreBySystemProperty() throws CertificateException, NoSuchAlgorithmException, KeyStoreException, IOException { KeyStore keyStore = KeyStore.getInstance(KeyVaultJcaProvider.PROVIDER_NAME); KeyVaultLoadStoreParameter parameter = new KeyVaultLoadStoreParameter( System.getProperty("azure.keyvault.uri"), System.getProperty("azure.keyvault.tenant-id"), System.getProperty("azure.keyvault.client-id"), System.getProperty("azure.keyvault.client-secret"), System.getProperty("azure.keyvault.managed-identity")); keyStore.load(parameter); return keyStore; } @Override public Enumeration<String> engineAliases() { return Collections.enumeration(getAllAliases()); } @Override public boolean engineContainsAlias(String alias) { return engineIsCertificateEntry(alias); } @Override public void engineDeleteEntry(String alias) { allCertificates.forEach(a -> a.deleteEntry(alias)); } @Override public boolean engineEntryInstanceOf(String alias, Class<? extends KeyStore.Entry> entryClass) { return super.engineEntryInstanceOf(alias, entryClass); } @Override public Certificate engineGetCertificate(String alias) { Certificate certificate = allCertificates.stream() .map(AzureCertificates::getCertificates) .filter(a -> a.containsKey(alias)) .findFirst() .map(certificates -> certificates.get(alias)) .orElse(null); if (refreshCertificatesWhenHaveUnTrustCertificate && certificate == null) { keyVaultCertificates.refreshCertificates(); certificate = keyVaultCertificates.getCertificates().get(alias); } return certificate; } @Override public String engineGetCertificateAlias(Certificate cert) { String alias = null; if (cert != null) { List<String> aliasList = getAllAliases(); for (String candidateAlias : aliasList) { Certificate certificate = engineGetCertificate(candidateAlias); if (certificate.equals(cert)) { alias = candidateAlias; break; } } } if (refreshCertificatesWhenHaveUnTrustCertificate && alias == null) { alias = keyVaultCertificates.refreshAndGetAliasByCertificate(cert); } return alias; } @Override public Certificate[] engineGetCertificateChain(String alias) { Certificate[] chain = null; Certificate certificate = engineGetCertificate(alias); if (certificate != null) { chain = new Certificate[1]; chain[0] = certificate; } return chain; } @Override public Date engineGetCreationDate(String alias) { return new Date(creationDate.getTime()); } @Override public KeyStore.Entry engineGetEntry(String alias, KeyStore.ProtectionParameter protParam) throws KeyStoreException, NoSuchAlgorithmException, UnrecoverableEntryException { return super.engineGetEntry(alias, protParam); } @Override public Key engineGetKey(String alias, char[] password) { return allCertificates.stream() .map(AzureCertificates::getCertificateKeys) .filter(a -> a.containsKey(alias)) .findFirst() .map(certificateKeys -> certificateKeys.get(alias)) .orElse(null); } @Override public boolean engineIsCertificateEntry(String alias) { return getAllAliases().contains(alias); } @Override public boolean engineIsKeyEntry(String alias) { return engineIsCertificateEntry(alias); } @Override public void engineLoad(KeyStore.LoadStoreParameter param) { if (param instanceof KeyVaultLoadStoreParameter) { KeyVaultLoadStoreParameter parameter = (KeyVaultLoadStoreParameter) param; keyVaultCertificates.updateKeyVaultClient(parameter.getUri(), parameter.getTenantId(), parameter.getClientId(), parameter.getClientSecret(), parameter.getManagedIdentity()); } classpathCertificates.loadCertificatesFromClasspath(); } @Override public void engineLoad(InputStream stream, char[] password) { classpathCertificates.loadCertificatesFromClasspath(); } private List<String> getAllAliases() { List<String> allAliases = new ArrayList<>(jreCertificates.getAliases()); Map<String, List<String>> aliasLists = new HashMap<>(); aliasLists.put("well known certificates", wellKnowCertificates.getAliases()); aliasLists.put("custom certificates", customCertificates.getAliases()); aliasLists.put("key vault certificates", keyVaultCertificates.getAliases()); aliasLists.put("class path certificates", classpathCertificates.getAliases()); aliasLists.forEach((certificatesType, certificates) -> certificates.forEach(alias -> { if (allAliases.contains(alias)) { LOGGER.log(FINE, String.format("The certificate %s under %s already exists", alias, certificatesType)); } else { allAliases.add(alias); } })); return allAliases; } @Override public void engineSetCertificateEntry(String alias, Certificate certificate) { if (getAllAliases().contains(alias)) { LOGGER.log(WARNING, "Cannot overwrite own certificate"); return; } classpathCertificates.setCertificateEntry(alias, certificate); } @Override public void engineSetEntry(String alias, KeyStore.Entry entry, KeyStore.ProtectionParameter protParam) throws KeyStoreException { super.engineSetEntry(alias, entry, protParam); } @Override public void engineSetKeyEntry(String alias, Key key, char[] password, Certificate[] chain) { } @Override public void engineSetKeyEntry(String alias, byte[] key, Certificate[] chain) { } @Override public int engineSize() { return getAllAliases().size(); } @Override public void engineStore(OutputStream stream, char[] password) { } @Override public void engineStore(KeyStore.LoadStoreParameter param) { } }
Can we use `map().collect(Collectors.toList()` instead?
private Mono<Void> sendInternal(ServiceBusMessageBatch batch, ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_SENDER, "sendMessages"))); } if (Objects.isNull(batch)) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } final boolean isTracingEnabled = tracerProvider.isEnabled(); final AtomicReference<Context> parentContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; if (batch.getMessages().isEmpty()) { logger.info("Cannot send an EventBatch that is empty."); return Mono.empty(); } logger.atInfo() .addKeyValue("batchSize", batch.getCount()) .log("Sending batch."); AtomicReference<Context> sharedContext = new AtomicReference<>(Context.NONE); final List<org.apache.qpid.proton.message.Message> messages = Collections.synchronizedList(new ArrayList<>()); batch.getMessages().forEach(serviceBusMessage -> { if (isTracingEnabled) { parentContext.set(serviceBusMessage.getContext()); if (sharedContext.get().equals(Context.NONE)) { sharedContext.set(tracerProvider.getSharedSpanBuilder(SERVICE_BASE_NAME, parentContext.get())); } tracerProvider.addSpanLinks(sharedContext.get().addData(SPAN_CONTEXT_KEY, serviceBusMessage.getContext())); } final org.apache.qpid.proton.message.Message message = messageSerializer.serialize(serviceBusMessage); final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); message.setMessageAnnotations(messageAnnotations); messages.add(message); }); if (isTracingEnabled) { final Context finalSharedContext = sharedContext.get().equals(Context.NONE) ? Context.NONE : sharedContext.get() .addData(ENTITY_PATH_KEY, entityName) .addData(HOST_NAME_KEY, connectionProcessor.getFullyQualifiedNamespace()) .addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); parentContext.set(tracerProvider.startSpan(AZ_TRACING_SERVICE_NAME, finalSharedContext, ProcessKind.SEND)); } final Mono<Void> sendMessage = getSendLink().flatMap(link -> { if (transactionContext != null && transactionContext.getTransactionId() != null) { final TransactionalState deliveryState = new TransactionalState(); deliveryState.setTxnId(new Binary(transactionContext.getTransactionId().array())); return messages.size() == 1 ? link.send(messages.get(0), deliveryState) : link.send(messages, deliveryState); } else { return messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages); } }); return withRetry(sendMessage, retryOptions, String.format("entityPath[%s], partitionId[%s]: Sending messages timed out.", entityName, batch.getCount())) .doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }).onErrorMap(this::mapError); }
batch.getMessages().forEach(serviceBusMessage -> {
private Mono<Void> sendInternal(ServiceBusMessageBatch batch, ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_SENDER, "sendMessages"))); } if (Objects.isNull(batch)) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } final boolean isTracingEnabled = tracerProvider.isEnabled(); final AtomicReference<Context> parentContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; if (batch.getMessages().isEmpty()) { logger.info("Cannot send an EventBatch that is empty."); return Mono.empty(); } logger.atInfo() .addKeyValue("batchSize", batch.getCount()) .log("Sending batch."); AtomicReference<Context> sharedContext = new AtomicReference<>(Context.NONE); final List<org.apache.qpid.proton.message.Message> messages = Collections.synchronizedList(new ArrayList<>()); batch.getMessages().forEach(serviceBusMessage -> { if (isTracingEnabled) { parentContext.set(serviceBusMessage.getContext()); if (sharedContext.get().equals(Context.NONE)) { sharedContext.set(tracerProvider.getSharedSpanBuilder(SERVICE_BASE_NAME, parentContext.get())); } tracerProvider.addSpanLinks(sharedContext.get().addData(SPAN_CONTEXT_KEY, serviceBusMessage.getContext())); } final org.apache.qpid.proton.message.Message message = messageSerializer.serialize(serviceBusMessage); final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); message.setMessageAnnotations(messageAnnotations); messages.add(message); }); if (isTracingEnabled) { final Context finalSharedContext = sharedContext.get().equals(Context.NONE) ? Context.NONE : sharedContext.get() .addData(ENTITY_PATH_KEY, entityName) .addData(HOST_NAME_KEY, connectionProcessor.getFullyQualifiedNamespace()) .addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); parentContext.set(tracerProvider.startSpan(AZ_TRACING_SERVICE_NAME, finalSharedContext, ProcessKind.SEND)); } final Mono<Void> sendMessage = getSendLink().flatMap(link -> { if (transactionContext != null && transactionContext.getTransactionId() != null) { final TransactionalState deliveryState = new TransactionalState(); deliveryState.setTxnId(new Binary(transactionContext.getTransactionId().array())); return messages.size() == 1 ? link.send(messages.get(0), deliveryState) : link.send(messages, deliveryState); } else { return messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages); } }); return withRetry(sendMessage, retryOptions, String.format("entityPath[%s], partitionId[%s]: Sending messages timed out.", entityName, batch.getCount())) .doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }).onErrorMap(this::mapError); }
class ServiceBusSenderAsyncClient implements AutoCloseable { /** * The default maximum allowable size, in bytes, for a batch to be sent. */ static final int MAX_MESSAGE_LENGTH_BYTES = 256 * 1024; private static final String TRANSACTION_LINK_NAME = "coordinator"; private static final String AZ_TRACING_NAMESPACE_VALUE = "Microsoft.ServiceBus"; private static final CreateMessageBatchOptions DEFAULT_BATCH_OPTIONS = new CreateMessageBatchOptions(); private static final String SERVICE_BASE_NAME = "ServiceBus."; private final ClientLogger logger = new ClientLogger(ServiceBusSenderAsyncClient.class); private final AtomicReference<String> linkName = new AtomicReference<>(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final AmqpRetryOptions retryOptions; private final AmqpRetryPolicy retryPolicy; private final MessagingEntityType entityType; private final Runnable onClientClose; private final String entityName; private final ServiceBusConnectionProcessor connectionProcessor; private final String viaEntityName; /** * Creates a new instance of this {@link ServiceBusSenderAsyncClient} that sends messages to a Service Bus entity. */ ServiceBusSenderAsyncClient(String entityName, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Runnable onClientClose, String viaEntityName) { this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.entityName = Objects.requireNonNull(entityName, "'entityPath' cannot be null."); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.tracerProvider = tracerProvider; this.retryPolicy = getRetryPolicy(retryOptions); this.entityType = entityType; this.viaEntityName = viaEntityName; this.onClientClose = onClientClose; } /** * Gets the fully qualified namespace. * * @return The fully qualified namespace. */ public String getFullyQualifiedNamespace() { return connectionProcessor.getFullyQualifiedNamespace(); } /** * Gets the name of the Service Bus resource. * * @return The name of the Service Bus resource. */ public String getEntityPath() { return entityName; } /** * Sends a message to a Service Bus queue or topic. * * @param message Message to be sent to Service Bus queue or topic. * * @return The {@link Mono} the finishes this operation on service bus resource. * * @throws NullPointerException if {@code message} is {@code null}. * @throws IllegalStateException if sender is already disposed. * @throws ServiceBusException if {@code message} is larger than the maximum allowed size of a single message or * the message could not be sent. */ public Mono<Void> sendMessage(ServiceBusMessage message) { if (Objects.isNull(message)) { return monoError(logger, new NullPointerException("'message' cannot be null.")); } return sendInternal(Flux.just(message), null); } /** * Sends a message to a Service Bus queue or topic. * * @param message Message to be sent to Service Bus queue or topic. * @param transactionContext to be set on batch message before sending to Service Bus. * * @return The {@link Mono} the finishes this operation on service bus resource. * * @throws NullPointerException if {@code message}, {@code transactionContext} or * {@code transactionContext.transactionId} is {@code null}. * @throws IllegalStateException if sender is already disposed. * @throws ServiceBusException if {@code message} is larger than the maximum allowed size of a single message or * the message could not be sent. */ public Mono<Void> sendMessage(ServiceBusMessage message, ServiceBusTransactionContext transactionContext) { if (Objects.isNull(transactionContext)) { return monoError(logger, new NullPointerException("'transactionContext' cannot be null.")); } if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return sendInternal(Flux.just(message), transactionContext); } /** * Sends a set of messages to a Service Bus queue or topic using a batched approach. If the size of messages * exceed the maximum size of a single batch, an exception will be triggered and the send will fail. * By default, the message size is the max amount allowed on the link. * * @param messages Messages to be sent to Service Bus queue or topic. * @param transactionContext to be set on batch message before sending to Service Bus. * * @return A {@link Mono} that completes when all messages have been sent to the Service Bus resource. * * @throws NullPointerException if {@code batch}, {@code transactionContext} or * {@code transactionContext.transactionId} is {@code null}. * @throws IllegalStateException if sender is already disposed. * @throws ServiceBusException if {@code messages} are larger than the maximum allowed size of a single message or * the message could not be sent. */ public Mono<Void> sendMessages(Iterable<ServiceBusMessage> messages, ServiceBusTransactionContext transactionContext) { if (Objects.isNull(transactionContext)) { return monoError(logger, new NullPointerException("'transactionContext' cannot be null.")); } if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return sendIterable(messages, transactionContext); } /** * Sends a set of messages to a Service Bus queue or topic using a batched approach. If the size of messages exceed * the maximum size of a single batch, an exception will be triggered and the send will fail. By default, the * message size is the max amount allowed on the link. * * @param messages Messages to be sent to Service Bus queue or topic. * * @return A {@link Mono} that completes when all messages have been sent to the Service Bus resource. * * @throws NullPointerException if {@code messages} is {@code null}. * @throws ServiceBusException if {@code messages} are larger than the maximum allowed size of a single message or * the message could not be sent. * @throws IllegalStateException if sender is already disposed. */ public Mono<Void> sendMessages(Iterable<ServiceBusMessage> messages) { return sendIterable(messages, null); } /** * Sends a message batch to the Azure Service Bus entity this sender is connected to. * * @param batch of messages which allows client to send maximum allowed size for a batch of messages. * * @return A {@link Mono} the finishes this operation on service bus resource. * * @throws NullPointerException if {@code batch} is {@code null}. * @throws ServiceBusException if the message batch could not be sent. * @throws IllegalStateException if sender is already disposed. */ public Mono<Void> sendMessages(ServiceBusMessageBatch batch) { return sendInternal(batch, null); } /** * Sends a message batch to the Azure Service Bus entity this sender is connected to. * * @param batch of messages which allows client to send maximum allowed size for a batch of messages. * @param transactionContext to be set on batch message before sending to Service Bus. * * @return A {@link Mono} the finishes this operation on service bus resource. * * @throws NullPointerException if {@code batch}, {@code transactionContext} or * {@code transactionContext.transactionId} is {@code null}. * @throws ServiceBusException if the message batch could not be sent. * @throws IllegalStateException if sender is already disposed. */ public Mono<Void> sendMessages(ServiceBusMessageBatch batch, ServiceBusTransactionContext transactionContext) { if (Objects.isNull(transactionContext)) { return monoError(logger, new NullPointerException("'transactionContext' cannot be null.")); } if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return sendInternal(batch, transactionContext); } /** * Creates a {@link ServiceBusMessageBatch} that can fit as many messages as the transport allows. * * @return A {@link ServiceBusMessageBatch} that can fit as many messages as the transport allows. * @throws ServiceBusException if the message batch could not be created. * @throws IllegalStateException if sender is already disposed. */ public Mono<ServiceBusMessageBatch> createMessageBatch() { return createMessageBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link ServiceBusMessageBatch} configured with the options specified. * * @param options A set of options used to configure the {@link ServiceBusMessageBatch}. * * @return A new {@link ServiceBusMessageBatch} configured with the given options. * @throws NullPointerException if {@code options} is null. * @throws ServiceBusException if the message batch could not be created. * @throws IllegalStateException if sender is already disposed. * @throws IllegalArgumentException if {@link CreateMessageBatchOptions * maximum allowed size. */ public Mono<ServiceBusMessageBatch> createMessageBatch(CreateMessageBatchOptions options) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_SENDER, "createMessageBatch"))); } if (Objects.isNull(options)) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } final int maxSize = options.getMaximumSizeInBytes(); return getSendLink().flatMap(link -> link.getLinkSize().flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (maxSize > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "CreateMessageBatchOptions.getMaximumSizeInBytes (%s bytes) is larger than the link size" + " (%s bytes).", maxSize, maximumLinkSize))); } final int batchSize = maxSize > 0 ? maxSize : maximumLinkSize; return Mono.just( new ServiceBusMessageBatch(batchSize, link::getErrorContext, tracerProvider, messageSerializer, entityName, getFullyQualifiedNamespace())); })).onErrorMap(this::mapError); } /** * Sends a scheduled message to the Azure Service Bus entity this sender is connected to. A scheduled message is * enqueued and made available to receivers only at the scheduled enqueue time. * * @param message Message to be sent to the Service Bus Queue. * @param scheduledEnqueueTime OffsetDateTime at which the message should appear in the Service Bus queue or topic. * @param transactionContext to be set on message before sending to Service Bus. * * @return The sequence number of the scheduled message which can be used to cancel the scheduling of the message. * * @throws NullPointerException if {@code message}, {@code scheduledEnqueueTime}, {@code transactionContext} or * {@code transactionContext.transactionID} is {@code null}. * @throws ServiceBusException If the message could not be scheduled. * @throws IllegalStateException if sender is already disposed. */ public Mono<Long> scheduleMessage(ServiceBusMessage message, OffsetDateTime scheduledEnqueueTime, ServiceBusTransactionContext transactionContext) { if (Objects.isNull(transactionContext)) { return monoError(logger, new NullPointerException("'transactionContext' cannot be null.")); } if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return scheduleMessageInternal(message, scheduledEnqueueTime, transactionContext); } /** * Sends a scheduled message to the Azure Service Bus entity this sender is connected to. A scheduled message is * enqueued and made available to receivers only at the scheduled enqueue time. * * @param message Message to be sent to the Service Bus Queue. * @param scheduledEnqueueTime OffsetDateTime at which the message should appear in the Service Bus queue or topic. * * @return The sequence number of the scheduled message which can be used to cancel the scheduling of the message. * * @throws NullPointerException if {@code message} or {@code scheduledEnqueueTime} is {@code null}. * @throws ServiceBusException If the message could not be scheduled. */ public Mono<Long> scheduleMessage(ServiceBusMessage message, OffsetDateTime scheduledEnqueueTime) { return scheduleMessageInternal(message, scheduledEnqueueTime, null); } /** * Sends a batch of scheduled messages to the Azure Service Bus entity this sender is connected to. A scheduled * message is enqueued and made available to receivers only at the scheduled enqueue time. * * @param messages Messages to be sent to the Service Bus queue or topic. * @param scheduledEnqueueTime OffsetDateTime at which the message should appear in the Service Bus queue or topic. * * @return Sequence numbers of the scheduled messages which can be used to cancel the messages. * * @throws NullPointerException If {@code messages} or {@code scheduledEnqueueTime} is {@code null}. * @throws ServiceBusException If the messages could not be scheduled. * @throws IllegalStateException if sender is already disposed. */ public Flux<Long> scheduleMessages(Iterable<ServiceBusMessage> messages, OffsetDateTime scheduledEnqueueTime) { return scheduleMessages(messages, scheduledEnqueueTime, null); } /** * Sends a scheduled messages to the Azure Service Bus entity this sender is connected to. A scheduled message is * enqueued and made available to receivers only at the scheduled enqueue time. * * @param messages Messages to be sent to the Service Bus Queue. * @param scheduledEnqueueTime OffsetDateTime at which the messages should appear in the Service Bus queue or topic. * @param transactionContext Transaction to associate with the operation. * * @return Sequence numbers of the scheduled messages which can be used to cancel the messages. * * @throws NullPointerException If {@code messages}, {@code scheduledEnqueueTime}, {@code transactionContext} or * {@code transactionContext.transactionId} is {@code null}. * @throws ServiceBusException If the messages could not be scheduled. * @throws IllegalStateException if sender is already disposed. */ public Flux<Long> scheduleMessages(Iterable<ServiceBusMessage> messages, OffsetDateTime scheduledEnqueueTime, ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return fluxError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_SENDER, "scheduleMessages"))); } if (Objects.isNull(messages)) { return fluxError(logger, new NullPointerException("'messages' cannot be null.")); } if (Objects.isNull(scheduledEnqueueTime)) { return fluxError(logger, new NullPointerException("'scheduledEnqueueTime' cannot be null.")); } return createMessageBatch() .map(messageBatch -> { int index = 0; for (ServiceBusMessage message : messages) { if (!messageBatch.tryAddMessage(message)) { final String error = String.format(Locale.US, "Messages exceed max allowed size for all the messages together. " + "Failed to add message at index '%s'.", index); throw logger.logExceptionAsError(new IllegalArgumentException(error)); } ++index; } return messageBatch; }) .flatMapMany(messageBatch -> connectionProcessor .flatMap(connection -> connection.getManagementNode(entityName, entityType)) .flatMapMany(managementNode -> managementNode.schedule(messageBatch.getMessages(), scheduledEnqueueTime, messageBatch.getMaxSizeInBytes(), linkName.get(), transactionContext)) ); } /** * Cancels the enqueuing of a scheduled message, if it was not already enqueued. * * @param sequenceNumber of the scheduled message to cancel. * * @return The {@link Mono} that finishes this operation on service bus resource. * * @throws IllegalArgumentException if {@code sequenceNumber} is negative. * @throws ServiceBusException If the messages could not be cancelled. * @throws IllegalStateException if sender is already disposed. */ public Mono<Void> cancelScheduledMessage(long sequenceNumber) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_SENDER, "cancelScheduledMessage"))); } if (sequenceNumber < 0) { return monoError(logger, new IllegalArgumentException("'sequenceNumber' cannot be negative.")); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityName, entityType)) .flatMap(managementNode -> managementNode.cancelScheduledMessages( Collections.singletonList(sequenceNumber), linkName.get())); } /** * Cancels the enqueuing of an already scheduled message, if it was not already enqueued. * * @param sequenceNumbers of the scheduled messages to cancel. * * @return The {@link Mono} that finishes this operation on service bus resource. * * @throws NullPointerException if {@code sequenceNumbers} is null. * @throws IllegalStateException if sender is already disposed. * @throws ServiceBusException if the scheduled messages cannot cancelled. */ public Mono<Void> cancelScheduledMessages(Iterable<Long> sequenceNumbers) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_SENDER, "cancelScheduledMessages"))); } if (Objects.isNull(sequenceNumbers)) { return monoError(logger, new NullPointerException("'messages' cannot be null.")); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityName, entityType)) .flatMap(managementNode -> managementNode.cancelScheduledMessages(sequenceNumbers, linkName.get())); } /** * Starts a new transaction on Service Bus. The {@link ServiceBusTransactionContext} should be passed along with * {@link ServiceBusReceivedMessage} all operations that needs to be in this transaction. * * @return A new {@link ServiceBusTransactionContext}. * * @throws IllegalStateException if sender is already disposed. * @throws ServiceBusException if a transaction cannot be created. * * @see ServiceBusReceiverAsyncClient */ public Mono<ServiceBusTransactionContext> createTransaction() { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_SENDER, "createTransaction"))); } return connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.createTransaction()) .map(transaction -> new ServiceBusTransactionContext(transaction.getTransactionId())); } /** * Commits the transaction given {@link ServiceBusTransactionContext}. This will make a call to Service Bus. * * @param transactionContext to be committed. * * @return The {@link Mono} that finishes this operation on Service Bus resource. * * @throws IllegalStateException if sender is already disposed. * @throws NullPointerException if {@code transactionContext} or {@code transactionContext.transactionId} is null. * @throws ServiceBusException if the transaction could not be committed. * * @see ServiceBusReceiverAsyncClient */ public Mono<Void> commitTransaction(ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_SENDER, "commitTransaction"))); } return connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.commitTransaction(new AmqpTransaction( transactionContext.getTransactionId()))); } /** * Rollbacks the transaction given {@link ServiceBusTransactionContext}. This will make a call to Service Bus. * * @param transactionContext Transaction to rollback. * * @return The {@link Mono} that finishes this operation on the Service Bus resource. * * @throws IllegalStateException if sender is already disposed. * @throws NullPointerException if {@code transactionContext} or {@code transactionContext.transactionId} is null. * @throws ServiceBusException if the transaction could not be rolled back. * * @see ServiceBusReceiverAsyncClient */ public Mono<Void> rollbackTransaction(ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_SENDER, "rollbackTransaction"))); } return connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.rollbackTransaction(new AmqpTransaction( transactionContext.getTransactionId()))); } /** * Disposes of the {@link ServiceBusSenderAsyncClient}. If the client has a dedicated connection, the underlying * connection is also closed. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } onClientClose.run(); } private Mono<Void> sendIterable(Iterable<ServiceBusMessage> messages, ServiceBusTransactionContext transaction) { if (Objects.isNull(messages)) { return monoError(logger, new NullPointerException("'messages' cannot be null.")); } return createMessageBatch().flatMap(messageBatch -> { StreamSupport.stream(messages.spliterator(), false) .forEach(message -> messageBatch.tryAddMessage(message)); return sendInternal(messageBatch, transaction); }); } private Mono<Long> scheduleMessageInternal(ServiceBusMessage message, OffsetDateTime scheduledEnqueueTime, ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_SENDER, "scheduleMessage"))); } if (Objects.isNull(message)) { return monoError(logger, new NullPointerException("'message' cannot be null.")); } if (Objects.isNull(scheduledEnqueueTime)) { return monoError(logger, new NullPointerException("'scheduledEnqueueTime' cannot be null.")); } return getSendLink() .flatMap(link -> link.getLinkSize().flatMap(size -> { int maxSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityName, entityType)) .flatMap(managementNode -> managementNode.schedule(Arrays.asList(message), scheduledEnqueueTime, maxSize, link.getLinkName(), transactionContext) .next()); })); } /** * Sends a message batch to the Azure Service Bus entity this sender is connected to. * @param batch of messages which allows client to send maximum allowed size for a batch of messages. * @param transactionContext to be set on batch message before sending to Service Bus. * * @return A {@link Mono} the finishes this operation on service bus resource. */ private Mono<Void> sendInternal(Flux<ServiceBusMessage> messages, ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_SENDER, "sendMessage"))); } return withRetry(getSendLink(), retryOptions, "Failed to create send link " + linkName) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateMessageBatchOptions batchOptions = new CreateMessageBatchOptions() .setMaximumSizeInBytes(batchSize); return messages.collect(new AmqpMessageCollector(batchOptions, 1, link::getErrorContext, tracerProvider, messageSerializer, entityName, link.getHostname())); }) .flatMap(list -> sendInternalBatch(Flux.fromIterable(list), transactionContext))) .onErrorMap(this::mapError); } private Mono<Void> sendInternalBatch(Flux<ServiceBusMessageBatch> eventBatches, ServiceBusTransactionContext transactionContext) { return eventBatches .flatMap(messageBatch -> sendInternal(messageBatch, transactionContext)) .then() .doOnError(error -> logger.error("Error sending batch.", error)); } private Mono<AmqpSendLink> getSendLink() { return connectionProcessor .flatMap(connection -> { if (!CoreUtils.isNullOrEmpty(viaEntityName)) { return connection.createSendLink("VIA-".concat(viaEntityName), viaEntityName, retryOptions, entityName); } else { return connection.createSendLink(entityName, entityName, retryOptions, null); } }) .doOnNext(next -> linkName.compareAndSet(null, next.getLinkName())); } private Throwable mapError(Throwable throwable) { if (!(throwable instanceof ServiceBusException)) { return new ServiceBusException(throwable, ServiceBusErrorSource.SEND); } return throwable; } private static class AmqpMessageCollector implements Collector<ServiceBusMessage, List<ServiceBusMessageBatch>, List<ServiceBusMessageBatch>> { private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private final TracerProvider tracerProvider; private final MessageSerializer serializer; private final String entityPath; private final String hostname; private volatile ServiceBusMessageBatch currentBatch; AmqpMessageCollector(CreateMessageBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider, TracerProvider tracerProvider, MessageSerializer serializer, String entityPath, String hostname) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.contextProvider = contextProvider; this.tracerProvider = tracerProvider; this.serializer = serializer; this.entityPath = entityPath; this.hostname = hostname; currentBatch = new ServiceBusMessageBatch(maxMessageSize, contextProvider, tracerProvider, serializer, entityPath, hostname); } @Override public Supplier<List<ServiceBusMessageBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<ServiceBusMessageBatch>, ServiceBusMessage> accumulator() { return (list, event) -> { ServiceBusMessageBatch batch = currentBatch; if (batch.tryAddMessage(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, "EventData does not fit into maximum number of batches. '%s'", maxNumberOfBatches); throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new ServiceBusMessageBatch(maxMessageSize, contextProvider, tracerProvider, serializer, entityPath, hostname); currentBatch.tryAddMessage(event); list.add(batch); }; } @Override public BinaryOperator<List<ServiceBusMessageBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<ServiceBusMessageBatch>, List<ServiceBusMessageBatch>> finisher() { return list -> { ServiceBusMessageBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
class ServiceBusSenderAsyncClient implements AutoCloseable { /** * The default maximum allowable size, in bytes, for a batch to be sent. */ static final int MAX_MESSAGE_LENGTH_BYTES = 256 * 1024; private static final String TRANSACTION_LINK_NAME = "coordinator"; private static final String AZ_TRACING_NAMESPACE_VALUE = "Microsoft.ServiceBus"; private static final CreateMessageBatchOptions DEFAULT_BATCH_OPTIONS = new CreateMessageBatchOptions(); private static final String SERVICE_BASE_NAME = "ServiceBus."; private final ClientLogger logger = new ClientLogger(ServiceBusSenderAsyncClient.class); private final AtomicReference<String> linkName = new AtomicReference<>(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final AmqpRetryOptions retryOptions; private final AmqpRetryPolicy retryPolicy; private final MessagingEntityType entityType; private final Runnable onClientClose; private final String entityName; private final ServiceBusConnectionProcessor connectionProcessor; private final String viaEntityName; /** * Creates a new instance of this {@link ServiceBusSenderAsyncClient} that sends messages to a Service Bus entity. */ ServiceBusSenderAsyncClient(String entityName, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Runnable onClientClose, String viaEntityName) { this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.entityName = Objects.requireNonNull(entityName, "'entityPath' cannot be null."); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.tracerProvider = tracerProvider; this.retryPolicy = getRetryPolicy(retryOptions); this.entityType = entityType; this.viaEntityName = viaEntityName; this.onClientClose = onClientClose; } /** * Gets the fully qualified namespace. * * @return The fully qualified namespace. */ public String getFullyQualifiedNamespace() { return connectionProcessor.getFullyQualifiedNamespace(); } /** * Gets the name of the Service Bus resource. * * @return The name of the Service Bus resource. */ public String getEntityPath() { return entityName; } /** * Sends a message to a Service Bus queue or topic. * * @param message Message to be sent to Service Bus queue or topic. * * @return The {@link Mono} the finishes this operation on service bus resource. * * @throws NullPointerException if {@code message} is {@code null}. * @throws IllegalStateException if sender is already disposed. * @throws ServiceBusException if {@code message} is larger than the maximum allowed size of a single message or * the message could not be sent. */ public Mono<Void> sendMessage(ServiceBusMessage message) { if (Objects.isNull(message)) { return monoError(logger, new NullPointerException("'message' cannot be null.")); } return sendInternal(Flux.just(message), null); } /** * Sends a message to a Service Bus queue or topic. * * @param message Message to be sent to Service Bus queue or topic. * @param transactionContext to be set on batch message before sending to Service Bus. * * @return The {@link Mono} the finishes this operation on service bus resource. * * @throws NullPointerException if {@code message}, {@code transactionContext} or * {@code transactionContext.transactionId} is {@code null}. * @throws IllegalStateException if sender is already disposed. * @throws ServiceBusException if {@code message} is larger than the maximum allowed size of a single message or * the message could not be sent. */ public Mono<Void> sendMessage(ServiceBusMessage message, ServiceBusTransactionContext transactionContext) { if (Objects.isNull(transactionContext)) { return monoError(logger, new NullPointerException("'transactionContext' cannot be null.")); } if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return sendInternal(Flux.just(message), transactionContext); } /** * Sends a set of messages to a Service Bus queue or topic using a batched approach. If the size of messages * exceed the maximum size of a single batch, an exception will be triggered and the send will fail. * By default, the message size is the max amount allowed on the link. * * @param messages Messages to be sent to Service Bus queue or topic. * @param transactionContext to be set on batch message before sending to Service Bus. * * @return A {@link Mono} that completes when all messages have been sent to the Service Bus resource. * * @throws NullPointerException if {@code batch}, {@code transactionContext} or * {@code transactionContext.transactionId} is {@code null}. * @throws IllegalStateException if sender is already disposed. * @throws ServiceBusException if {@code messages} are larger than the maximum allowed size of a single message or * the message could not be sent. */ public Mono<Void> sendMessages(Iterable<ServiceBusMessage> messages, ServiceBusTransactionContext transactionContext) { if (Objects.isNull(transactionContext)) { return monoError(logger, new NullPointerException("'transactionContext' cannot be null.")); } if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return sendIterable(messages, transactionContext); } /** * Sends a set of messages to a Service Bus queue or topic using a batched approach. If the size of messages exceed * the maximum size of a single batch, an exception will be triggered and the send will fail. By default, the * message size is the max amount allowed on the link. * * @param messages Messages to be sent to Service Bus queue or topic. * * @return A {@link Mono} that completes when all messages have been sent to the Service Bus resource. * * @throws NullPointerException if {@code messages} is {@code null}. * @throws ServiceBusException if {@code messages} are larger than the maximum allowed size of a single message or * the message could not be sent. * @throws IllegalStateException if sender is already disposed. */ public Mono<Void> sendMessages(Iterable<ServiceBusMessage> messages) { return sendIterable(messages, null); } /** * Sends a message batch to the Azure Service Bus entity this sender is connected to. * * @param batch of messages which allows client to send maximum allowed size for a batch of messages. * * @return A {@link Mono} the finishes this operation on service bus resource. * * @throws NullPointerException if {@code batch} is {@code null}. * @throws ServiceBusException if the message batch could not be sent. * @throws IllegalStateException if sender is already disposed. */ public Mono<Void> sendMessages(ServiceBusMessageBatch batch) { return sendInternal(batch, null); } /** * Sends a message batch to the Azure Service Bus entity this sender is connected to. * * @param batch of messages which allows client to send maximum allowed size for a batch of messages. * @param transactionContext to be set on batch message before sending to Service Bus. * * @return A {@link Mono} the finishes this operation on service bus resource. * * @throws NullPointerException if {@code batch}, {@code transactionContext} or * {@code transactionContext.transactionId} is {@code null}. * @throws ServiceBusException if the message batch could not be sent. * @throws IllegalStateException if sender is already disposed. */ public Mono<Void> sendMessages(ServiceBusMessageBatch batch, ServiceBusTransactionContext transactionContext) { if (Objects.isNull(transactionContext)) { return monoError(logger, new NullPointerException("'transactionContext' cannot be null.")); } if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return sendInternal(batch, transactionContext); } /** * Creates a {@link ServiceBusMessageBatch} that can fit as many messages as the transport allows. * * @return A {@link ServiceBusMessageBatch} that can fit as many messages as the transport allows. * @throws ServiceBusException if the message batch could not be created. * @throws IllegalStateException if sender is already disposed. */ public Mono<ServiceBusMessageBatch> createMessageBatch() { return createMessageBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link ServiceBusMessageBatch} configured with the options specified. * * @param options A set of options used to configure the {@link ServiceBusMessageBatch}. * * @return A new {@link ServiceBusMessageBatch} configured with the given options. * @throws NullPointerException if {@code options} is null. * @throws ServiceBusException if the message batch could not be created. * @throws IllegalStateException if sender is already disposed. * @throws IllegalArgumentException if {@link CreateMessageBatchOptions * maximum allowed size. */ public Mono<ServiceBusMessageBatch> createMessageBatch(CreateMessageBatchOptions options) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_SENDER, "createMessageBatch"))); } if (Objects.isNull(options)) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } final int maxSize = options.getMaximumSizeInBytes(); return getSendLink().flatMap(link -> link.getLinkSize().flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (maxSize > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "CreateMessageBatchOptions.getMaximumSizeInBytes (%s bytes) is larger than the link size" + " (%s bytes).", maxSize, maximumLinkSize))); } final int batchSize = maxSize > 0 ? maxSize : maximumLinkSize; return Mono.just( new ServiceBusMessageBatch(batchSize, link::getErrorContext, tracerProvider, messageSerializer, entityName, getFullyQualifiedNamespace())); })).onErrorMap(this::mapError); } /** * Sends a scheduled message to the Azure Service Bus entity this sender is connected to. A scheduled message is * enqueued and made available to receivers only at the scheduled enqueue time. * * @param message Message to be sent to the Service Bus Queue. * @param scheduledEnqueueTime OffsetDateTime at which the message should appear in the Service Bus queue or topic. * @param transactionContext to be set on message before sending to Service Bus. * * @return The sequence number of the scheduled message which can be used to cancel the scheduling of the message. * * @throws NullPointerException if {@code message}, {@code scheduledEnqueueTime}, {@code transactionContext} or * {@code transactionContext.transactionID} is {@code null}. * @throws ServiceBusException If the message could not be scheduled. * @throws IllegalStateException if sender is already disposed. */ public Mono<Long> scheduleMessage(ServiceBusMessage message, OffsetDateTime scheduledEnqueueTime, ServiceBusTransactionContext transactionContext) { if (Objects.isNull(transactionContext)) { return monoError(logger, new NullPointerException("'transactionContext' cannot be null.")); } if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return scheduleMessageInternal(message, scheduledEnqueueTime, transactionContext); } /** * Sends a scheduled message to the Azure Service Bus entity this sender is connected to. A scheduled message is * enqueued and made available to receivers only at the scheduled enqueue time. * * @param message Message to be sent to the Service Bus Queue. * @param scheduledEnqueueTime OffsetDateTime at which the message should appear in the Service Bus queue or topic. * * @return The sequence number of the scheduled message which can be used to cancel the scheduling of the message. * * @throws NullPointerException if {@code message} or {@code scheduledEnqueueTime} is {@code null}. * @throws ServiceBusException If the message could not be scheduled. */ public Mono<Long> scheduleMessage(ServiceBusMessage message, OffsetDateTime scheduledEnqueueTime) { return scheduleMessageInternal(message, scheduledEnqueueTime, null); } /** * Sends a batch of scheduled messages to the Azure Service Bus entity this sender is connected to. A scheduled * message is enqueued and made available to receivers only at the scheduled enqueue time. * * @param messages Messages to be sent to the Service Bus queue or topic. * @param scheduledEnqueueTime OffsetDateTime at which the message should appear in the Service Bus queue or topic. * * @return Sequence numbers of the scheduled messages which can be used to cancel the messages. * * @throws NullPointerException If {@code messages} or {@code scheduledEnqueueTime} is {@code null}. * @throws ServiceBusException If the messages could not be scheduled. * @throws IllegalStateException if sender is already disposed. */ public Flux<Long> scheduleMessages(Iterable<ServiceBusMessage> messages, OffsetDateTime scheduledEnqueueTime) { return scheduleMessages(messages, scheduledEnqueueTime, null); } /** * Sends a scheduled messages to the Azure Service Bus entity this sender is connected to. A scheduled message is * enqueued and made available to receivers only at the scheduled enqueue time. * * @param messages Messages to be sent to the Service Bus Queue. * @param scheduledEnqueueTime OffsetDateTime at which the messages should appear in the Service Bus queue or topic. * @param transactionContext Transaction to associate with the operation. * * @return Sequence numbers of the scheduled messages which can be used to cancel the messages. * * @throws NullPointerException If {@code messages}, {@code scheduledEnqueueTime}, {@code transactionContext} or * {@code transactionContext.transactionId} is {@code null}. * @throws ServiceBusException If the messages could not be scheduled. * @throws IllegalStateException if sender is already disposed. */ public Flux<Long> scheduleMessages(Iterable<ServiceBusMessage> messages, OffsetDateTime scheduledEnqueueTime, ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return fluxError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_SENDER, "scheduleMessages"))); } if (Objects.isNull(messages)) { return fluxError(logger, new NullPointerException("'messages' cannot be null.")); } if (Objects.isNull(scheduledEnqueueTime)) { return fluxError(logger, new NullPointerException("'scheduledEnqueueTime' cannot be null.")); } return createMessageBatch() .map(messageBatch -> { int index = 0; for (ServiceBusMessage message : messages) { if (!messageBatch.tryAddMessage(message)) { final String error = String.format(Locale.US, "Messages exceed max allowed size for all the messages together. " + "Failed to add message at index '%s'.", index); throw logger.logExceptionAsError(new IllegalArgumentException(error)); } ++index; } return messageBatch; }) .flatMapMany(messageBatch -> connectionProcessor .flatMap(connection -> connection.getManagementNode(entityName, entityType)) .flatMapMany(managementNode -> managementNode.schedule(messageBatch.getMessages(), scheduledEnqueueTime, messageBatch.getMaxSizeInBytes(), linkName.get(), transactionContext)) ); } /** * Cancels the enqueuing of a scheduled message, if it was not already enqueued. * * @param sequenceNumber of the scheduled message to cancel. * * @return The {@link Mono} that finishes this operation on service bus resource. * * @throws IllegalArgumentException if {@code sequenceNumber} is negative. * @throws ServiceBusException If the messages could not be cancelled. * @throws IllegalStateException if sender is already disposed. */ public Mono<Void> cancelScheduledMessage(long sequenceNumber) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_SENDER, "cancelScheduledMessage"))); } if (sequenceNumber < 0) { return monoError(logger, new IllegalArgumentException("'sequenceNumber' cannot be negative.")); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityName, entityType)) .flatMap(managementNode -> managementNode.cancelScheduledMessages( Collections.singletonList(sequenceNumber), linkName.get())); } /** * Cancels the enqueuing of an already scheduled message, if it was not already enqueued. * * @param sequenceNumbers of the scheduled messages to cancel. * * @return The {@link Mono} that finishes this operation on service bus resource. * * @throws NullPointerException if {@code sequenceNumbers} is null. * @throws IllegalStateException if sender is already disposed. * @throws ServiceBusException if the scheduled messages cannot cancelled. */ public Mono<Void> cancelScheduledMessages(Iterable<Long> sequenceNumbers) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_SENDER, "cancelScheduledMessages"))); } if (Objects.isNull(sequenceNumbers)) { return monoError(logger, new NullPointerException("'messages' cannot be null.")); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityName, entityType)) .flatMap(managementNode -> managementNode.cancelScheduledMessages(sequenceNumbers, linkName.get())); } /** * Starts a new transaction on Service Bus. The {@link ServiceBusTransactionContext} should be passed along with * {@link ServiceBusReceivedMessage} all operations that needs to be in this transaction. * * @return A new {@link ServiceBusTransactionContext}. * * @throws IllegalStateException if sender is already disposed. * @throws ServiceBusException if a transaction cannot be created. * * @see ServiceBusReceiverAsyncClient */ public Mono<ServiceBusTransactionContext> createTransaction() { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_SENDER, "createTransaction"))); } return connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.createTransaction()) .map(transaction -> new ServiceBusTransactionContext(transaction.getTransactionId())); } /** * Commits the transaction given {@link ServiceBusTransactionContext}. This will make a call to Service Bus. * * @param transactionContext to be committed. * * @return The {@link Mono} that finishes this operation on Service Bus resource. * * @throws IllegalStateException if sender is already disposed. * @throws NullPointerException if {@code transactionContext} or {@code transactionContext.transactionId} is null. * @throws ServiceBusException if the transaction could not be committed. * * @see ServiceBusReceiverAsyncClient */ public Mono<Void> commitTransaction(ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_SENDER, "commitTransaction"))); } return connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.commitTransaction(new AmqpTransaction( transactionContext.getTransactionId()))); } /** * Rollbacks the transaction given {@link ServiceBusTransactionContext}. This will make a call to Service Bus. * * @param transactionContext Transaction to rollback. * * @return The {@link Mono} that finishes this operation on the Service Bus resource. * * @throws IllegalStateException if sender is already disposed. * @throws NullPointerException if {@code transactionContext} or {@code transactionContext.transactionId} is null. * @throws ServiceBusException if the transaction could not be rolled back. * * @see ServiceBusReceiverAsyncClient */ public Mono<Void> rollbackTransaction(ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_SENDER, "rollbackTransaction"))); } return connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.rollbackTransaction(new AmqpTransaction( transactionContext.getTransactionId()))); } /** * Disposes of the {@link ServiceBusSenderAsyncClient}. If the client has a dedicated connection, the underlying * connection is also closed. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } onClientClose.run(); } private Mono<Void> sendIterable(Iterable<ServiceBusMessage> messages, ServiceBusTransactionContext transaction) { if (Objects.isNull(messages)) { return monoError(logger, new NullPointerException("'messages' cannot be null.")); } return createMessageBatch().flatMap(messageBatch -> { StreamSupport.stream(messages.spliterator(), false) .forEach(message -> messageBatch.tryAddMessage(message)); return sendInternal(messageBatch, transaction); }); } private Mono<Long> scheduleMessageInternal(ServiceBusMessage message, OffsetDateTime scheduledEnqueueTime, ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_SENDER, "scheduleMessage"))); } if (Objects.isNull(message)) { return monoError(logger, new NullPointerException("'message' cannot be null.")); } if (Objects.isNull(scheduledEnqueueTime)) { return monoError(logger, new NullPointerException("'scheduledEnqueueTime' cannot be null.")); } return getSendLink() .flatMap(link -> link.getLinkSize().flatMap(size -> { int maxSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityName, entityType)) .flatMap(managementNode -> managementNode.schedule(Arrays.asList(message), scheduledEnqueueTime, maxSize, link.getLinkName(), transactionContext) .next()); })); } /** * Sends a message batch to the Azure Service Bus entity this sender is connected to. * @param batch of messages which allows client to send maximum allowed size for a batch of messages. * @param transactionContext to be set on batch message before sending to Service Bus. * * @return A {@link Mono} the finishes this operation on service bus resource. */ private Mono<Void> sendInternal(Flux<ServiceBusMessage> messages, ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_SENDER, "sendMessage"))); } return withRetry(getSendLink(), retryOptions, "Failed to create send link " + linkName) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateMessageBatchOptions batchOptions = new CreateMessageBatchOptions() .setMaximumSizeInBytes(batchSize); return messages.collect(new AmqpMessageCollector(batchOptions, 1, link::getErrorContext, tracerProvider, messageSerializer, entityName, link.getHostname())); }) .flatMap(list -> sendInternalBatch(Flux.fromIterable(list), transactionContext))) .onErrorMap(this::mapError); } private Mono<Void> sendInternalBatch(Flux<ServiceBusMessageBatch> eventBatches, ServiceBusTransactionContext transactionContext) { return eventBatches .flatMap(messageBatch -> sendInternal(messageBatch, transactionContext)) .then() .doOnError(error -> logger.error("Error sending batch.", error)); } private Mono<AmqpSendLink> getSendLink() { return connectionProcessor .flatMap(connection -> { if (!CoreUtils.isNullOrEmpty(viaEntityName)) { return connection.createSendLink("VIA-".concat(viaEntityName), viaEntityName, retryOptions, entityName); } else { return connection.createSendLink(entityName, entityName, retryOptions, null); } }) .doOnNext(next -> linkName.compareAndSet(null, next.getLinkName())); } private Throwable mapError(Throwable throwable) { if (!(throwable instanceof ServiceBusException)) { return new ServiceBusException(throwable, ServiceBusErrorSource.SEND); } return throwable; } private static class AmqpMessageCollector implements Collector<ServiceBusMessage, List<ServiceBusMessageBatch>, List<ServiceBusMessageBatch>> { private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private final TracerProvider tracerProvider; private final MessageSerializer serializer; private final String entityPath; private final String hostname; private volatile ServiceBusMessageBatch currentBatch; AmqpMessageCollector(CreateMessageBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider, TracerProvider tracerProvider, MessageSerializer serializer, String entityPath, String hostname) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.contextProvider = contextProvider; this.tracerProvider = tracerProvider; this.serializer = serializer; this.entityPath = entityPath; this.hostname = hostname; currentBatch = new ServiceBusMessageBatch(maxMessageSize, contextProvider, tracerProvider, serializer, entityPath, hostname); } @Override public Supplier<List<ServiceBusMessageBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<ServiceBusMessageBatch>, ServiceBusMessage> accumulator() { return (list, event) -> { ServiceBusMessageBatch batch = currentBatch; if (batch.tryAddMessage(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, "EventData does not fit into maximum number of batches. '%s'", maxNumberOfBatches); throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new ServiceBusMessageBatch(maxMessageSize, contextProvider, tracerProvider, serializer, entityPath, hostname); currentBatch.tryAddMessage(event); list.add(batch); }; } @Override public BinaryOperator<List<ServiceBusMessageBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<ServiceBusMessageBatch>, List<ServiceBusMessageBatch>> finisher() { return list -> { ServiceBusMessageBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
Use try-with-resources instead of try/finally
private Mono<ConfidentialClientApplication> getConfidentialClientApplication() { return Mono.defer(() -> { if (clientId == null) { return Mono.error(LOGGER.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication."))); } String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId; IClientCredential credential; if (clientSecret != null) { credential = ClientCredentialFactory.createFromSecret(clientSecret); } else if (certificate != null || certificatePath != null) { try { if (certificatePassword == null) { byte[] pemCertificateBytes = getCertificateBytes(); List<X509Certificate> x509CertificateList = CertificateUtil.publicKeyFromPem(pemCertificateBytes); PrivateKey privateKey = CertificateUtil.privateKeyFromPem(pemCertificateBytes); if (x509CertificateList.size() == 1) { credential = ClientCredentialFactory.createFromCertificate( privateKey, x509CertificateList.get(0)); } else { credential = ClientCredentialFactory.createFromCertificateChain( privateKey, x509CertificateList); } } else { try (InputStream pfxCertificateStream = getCertificateInputStream()) { credential = ClientCredentialFactory.createFromCertificate(pfxCertificateStream, certificatePassword); } } } catch (IOException | GeneralSecurityException e) { return Mono.error(LOGGER.logExceptionAsError(new RuntimeException( "Failed to parse the certificate for the credential: " + e.getMessage(), e))); } } else if (clientAssertionSupplier != null) { credential = ClientCredentialFactory.createFromClientAssertion(clientAssertionSupplier.get()); } else { return Mono.error(LOGGER.logExceptionAsError( new IllegalArgumentException("Must provide client secret or client certificate path." + " To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, credential); try { applicationBuilder = applicationBuilder.authority(authorityUrl) .validateAuthority(options.getAuthorityValidation()); } catch (MalformedURLException e) { return Mono.error(LOGGER.logExceptionAsWarning(new IllegalStateException(e))); } applicationBuilder.sendX5c(options.isIncludeX5c()); initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } TokenCachePersistenceOptions tokenCachePersistenceOptions = options.getTokenCacheOptions(); PersistentTokenCacheImpl tokenCache = null; if (tokenCachePersistenceOptions != null) { try { tokenCache = new PersistentTokenCacheImpl() .setAllowUnencryptedStorage(tokenCachePersistenceOptions.isUnencryptedStorageAllowed()) .setName(tokenCachePersistenceOptions.getName()); applicationBuilder.setTokenCacheAccessAspect(tokenCache); } catch (Throwable t) { return Mono.error(LOGGER.logExceptionAsError(new ClientAuthenticationException( "Shared token cache is unavailable in this environment.", null, t))); } } if (options.getRegionalAuthority() != null) { if (options.getRegionalAuthority() == RegionalAuthority.AUTO_DISCOVER_REGION) { applicationBuilder.autoDetectRegion(true); } else { applicationBuilder.azureRegion(options.getRegionalAuthority().toString()); } } ConfidentialClientApplication confidentialClientApplication = applicationBuilder.build(); return tokenCache != null ? tokenCache.registerCache() .map(ignored -> confidentialClientApplication) : Mono.just(confidentialClientApplication); }); }
try (InputStream pfxCertificateStream = getCertificateInputStream()) {
private Mono<ConfidentialClientApplication> getConfidentialClientApplication() { return Mono.defer(() -> { if (clientId == null) { return Mono.error(LOGGER.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication."))); } String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId; IClientCredential credential; if (clientSecret != null) { credential = ClientCredentialFactory.createFromSecret(clientSecret); } else if (certificate != null || certificatePath != null) { try { if (certificatePassword == null) { byte[] pemCertificateBytes = getCertificateBytes(); List<X509Certificate> x509CertificateList = CertificateUtil.publicKeyFromPem(pemCertificateBytes); PrivateKey privateKey = CertificateUtil.privateKeyFromPem(pemCertificateBytes); if (x509CertificateList.size() == 1) { credential = ClientCredentialFactory.createFromCertificate( privateKey, x509CertificateList.get(0)); } else { credential = ClientCredentialFactory.createFromCertificateChain( privateKey, x509CertificateList); } } else { try (InputStream pfxCertificateStream = getCertificateInputStream()) { credential = ClientCredentialFactory.createFromCertificate(pfxCertificateStream, certificatePassword); } } } catch (IOException | GeneralSecurityException e) { return Mono.error(LOGGER.logExceptionAsError(new RuntimeException( "Failed to parse the certificate for the credential: " + e.getMessage(), e))); } } else if (clientAssertionSupplier != null) { credential = ClientCredentialFactory.createFromClientAssertion(clientAssertionSupplier.get()); } else { return Mono.error(LOGGER.logExceptionAsError( new IllegalArgumentException("Must provide client secret or client certificate path." + " To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, credential); try { applicationBuilder = applicationBuilder.authority(authorityUrl) .validateAuthority(options.getAuthorityValidation()); } catch (MalformedURLException e) { return Mono.error(LOGGER.logExceptionAsWarning(new IllegalStateException(e))); } applicationBuilder.sendX5c(options.isIncludeX5c()); initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } TokenCachePersistenceOptions tokenCachePersistenceOptions = options.getTokenCacheOptions(); PersistentTokenCacheImpl tokenCache = null; if (tokenCachePersistenceOptions != null) { try { tokenCache = new PersistentTokenCacheImpl() .setAllowUnencryptedStorage(tokenCachePersistenceOptions.isUnencryptedStorageAllowed()) .setName(tokenCachePersistenceOptions.getName()); applicationBuilder.setTokenCacheAccessAspect(tokenCache); } catch (Throwable t) { return Mono.error(LOGGER.logExceptionAsError(new ClientAuthenticationException( "Shared token cache is unavailable in this environment.", null, t))); } } if (options.getRegionalAuthority() != null) { if (options.getRegionalAuthority() == RegionalAuthority.AUTO_DISCOVER_REGION) { applicationBuilder.autoDetectRegion(true); } else { applicationBuilder.azureRegion(options.getRegionalAuthority().toString()); } } ConfidentialClientApplication confidentialClientApplication = applicationBuilder.build(); return tokenCache != null ? tokenCache.registerCache() .map(ignored -> confidentialClientApplication) : Mono.just(confidentialClientApplication); }); }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private static final String WINDOWS_STARTER = "cmd.exe"; private static final String LINUX_MAC_STARTER = "/bin/sh"; private static final String WINDOWS_SWITCHER = "/c"; private static final String LINUX_MAC_SWITCHER = "-c"; private static final String WINDOWS_PROCESS_ERROR_MESSAGE = "'az' is not recognized"; private static final Pattern LINUX_MAC_PROCESS_ERROR_MESSAGE = Pattern.compile("(.*)az:(.*)not found"); private static final String DEFAULT_WINDOWS_SYSTEM_ROOT = System.getenv("SystemRoot"); private static final String DEFAULT_WINDOWS_PS_EXECUTABLE = "pwsh.exe"; private static final String LEGACY_WINDOWS_PS_EXECUTABLE = "powershell.exe"; private static final String DEFAULT_LINUX_PS_EXECUTABLE = "pwsh"; private static final String DEFAULT_MAC_LINUX_PATH = "/bin/"; private static final Duration REFRESH_OFFSET = Duration.ofMinutes(5); private static final String IDENTITY_ENDPOINT_VERSION = "2019-08-01"; private static final String MSI_ENDPOINT_VERSION = "2017-09-01"; private static final String ADFS_TENANT = "adfs"; private static final String HTTP_LOCALHOST = "http: private static final String SERVICE_FABRIC_MANAGED_IDENTITY_API_VERSION = "2019-07-01-preview"; private static final ClientLogger LOGGER = new ClientLogger(IdentityClient.class); private static final Pattern ACCESS_TOKEN_PATTERN = Pattern.compile("\"accessToken\": \"(.*?)(\"|$)"); private static final Pattern TRAILING_FORWARD_SLASHES = Pattern.compile("/+$"); private final IdentityClientOptions options; private final String tenantId; private final String clientId; private final String resourceId; private final String clientSecret; private final String clientAssertionFilePath; private final InputStream certificate; private final String certificatePath; private final Supplier<String> clientAssertionSupplier; private final String certificatePassword; private HttpPipelineAdapter httpPipelineAdapter; private final SynchronizedAccessor<PublicClientApplication> publicClientApplicationAccessor; private final SynchronizedAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessor; private final SynchronizedAccessor<String> clientAssertionAccessor; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param clientSecret the client secret of the application. * @param resourceId the resource ID of the application * @param certificatePath the path to the PKCS12 or PEM certificate of the application. * @param certificate the PKCS12 or PEM certificate of the application. * @param certificatePassword the password protecting the PFX certificate. * @param isSharedTokenCacheCredential Indicate whether the credential is * {@link com.azure.identity.SharedTokenCacheCredential} or not. * @param clientAssertionTimeout the timeout to use for the client assertion. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, String clientSecret, String certificatePath, String clientAssertionFilePath, String resourceId, Supplier<String> clientAssertionSupplier, InputStream certificate, String certificatePassword, boolean isSharedTokenCacheCredential, Duration clientAssertionTimeout, IdentityClientOptions options) { if (tenantId == null) { tenantId = "organizations"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.resourceId = resourceId; this.clientSecret = clientSecret; this.clientAssertionFilePath = clientAssertionFilePath; this.certificatePath = certificatePath; this.certificate = certificate; this.certificatePassword = certificatePassword; this.clientAssertionSupplier = clientAssertionSupplier; this.options = options; this.publicClientApplicationAccessor = new SynchronizedAccessor<>(() -> getPublicClientApplication(isSharedTokenCacheCredential)); this.confidentialClientApplicationAccessor = new SynchronizedAccessor<>(() -> getConfidentialClientApplication()); this.clientAssertionAccessor = clientAssertionTimeout == null ? new SynchronizedAccessor<>(() -> parseClientAssertion(), Duration.ofMinutes(5)) : new SynchronizedAccessor<>(() -> parseClientAssertion(), clientAssertionTimeout); } private Mono<String> parseClientAssertion() { return Mono.fromCallable(() -> { if (clientAssertionFilePath != null) { byte[] encoded = Files.readAllBytes(Paths.get(clientAssertionFilePath)); return new String(encoded, StandardCharsets.UTF_8); } else { throw LOGGER.logExceptionAsError(new IllegalStateException( "Client Assertion File Path is not provided." + " It should be provided to authenticate with client assertion." )); } }); } private Mono<PublicClientApplication> getPublicClientApplication(boolean sharedTokenCacheCredential) { return Mono.defer(() -> { if (clientId == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication.")); } String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl) .validateAuthority(options.getAuthorityValidation()); } catch (MalformedURLException e) { throw LOGGER.logExceptionAsWarning(new IllegalStateException(e)); } initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { publicClientApplicationBuilder.httpClient(httpPipelineAdapter); } else { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { publicClientApplicationBuilder.executorService(options.getExecutorService()); } if (!options.isCp1Disabled()) { Set<String> set = new HashSet<>(1); set.add("CP1"); publicClientApplicationBuilder.clientCapabilities(set); } return Mono.just(publicClientApplicationBuilder); }).flatMap(builder -> { TokenCachePersistenceOptions tokenCachePersistenceOptions = options.getTokenCacheOptions(); PersistentTokenCacheImpl tokenCache = null; if (tokenCachePersistenceOptions != null) { try { tokenCache = new PersistentTokenCacheImpl() .setAllowUnencryptedStorage(tokenCachePersistenceOptions.isUnencryptedStorageAllowed()) .setName(tokenCachePersistenceOptions.getName()); builder.setTokenCacheAccessAspect(tokenCache); } catch (Throwable t) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException( "Shared token cache is unavailable in this environment.", null, t)); } } PublicClientApplication publicClientApplication = builder.build(); return tokenCache != null ? tokenCache.registerCache() .map(ignored -> publicClientApplication) : Mono.just(publicClientApplication); }); } public Mono<MsalToken> authenticateWithIntelliJ(TokenRequestContext request) { try { IntelliJCacheAccessor cacheAccessor = new IntelliJCacheAccessor(options.getIntelliJKeePassDatabasePath()); IntelliJAuthMethodDetails authDetails; try { authDetails = cacheAccessor.getAuthDetailsIfAvailable(); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available.", e))); } if (authDetails == null) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available." + " Please log in with Azure Tools for IntelliJ plugin in the IDE."))); } String authType = authDetails.getAuthMethod(); if ("SP".equalsIgnoreCase(authType)) { Map<String, String> spDetails = cacheAccessor .getIntellijServicePrincipalDetails(authDetails.getCredFilePath()); String authorityUrl = spDetails.get("authURL") + spDetails.get("tenant"); try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(spDetails.get("client"), ClientCredentialFactory.createFromSecret(spDetails.get("key"))) .authority(authorityUrl).validateAuthority(options.getAuthorityValidation()); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } } else if ("DC".equalsIgnoreCase(authType)) { LOGGER.verbose("IntelliJ Authentication => Device Code Authentication scheme detected in Azure Tools" + " for IntelliJ Plugin."); if (isADFSTenant()) { LOGGER.verbose("IntelliJ Authentication => The input tenant is detected to be ADFS and" + " the ADFS tenants are not supported via IntelliJ Authentication currently."); return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJCredential " + "authentication unavailable. ADFS tenant/authorities are not supported."))); } try { JsonNode intelliJCredentials = cacheAccessor.getDeviceCodeCredentials(); String refreshToken = intelliJCredentials.get("refreshToken").textValue(); RefreshTokenParameters.RefreshTokenParametersBuilder refreshTokenParametersBuilder = RefreshTokenParameters.builder(new HashSet<>(request.getScopes()), refreshToken); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); refreshTokenParametersBuilder.claims(customClaimRequest); } return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(refreshTokenParametersBuilder.build())) .map(MsalToken::new)); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, e)); } } else { LOGGER.verbose("IntelliJ Authentication = > Only Service Principal and Device Code Authentication" + " schemes are currently supported via IntelliJ Credential currently. Please ensure you used one" + " of those schemes from Azure Tools for IntelliJ plugin."); return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available." + " Please login with Azure Tools for IntelliJ plugin in the IDE."))); } } catch (IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with Azure CLI. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) { StringBuilder azCommand = new StringBuilder("az account get-access-token --output json --resource "); String scopes = ScopeUtil.scopesToResource(request.getScopes()); try { ScopeUtil.validateScope(scopes); } catch (IllegalArgumentException ex) { return Mono.error(LOGGER.logExceptionAsError(ex)); } azCommand.append(scopes); String tenant = IdentityUtil.resolveTenantId(null, request, options); if (!CoreUtils.isNullOrEmpty(tenant)) { azCommand.append("--tenant ").append(tenant); } AccessToken token; try { String starter; String switcher; if (isWindowsPlatform()) { starter = WINDOWS_STARTER; switcher = WINDOWS_SWITCHER; } else { starter = LINUX_MAC_STARTER; switcher = LINUX_MAC_SWITCHER; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, azCommand.toString()); String workingDirectory = getSafeWorkingDirectory(); if (workingDirectory != null) { builder.directory(new File(workingDirectory)); } else { throw LOGGER.logExceptionAsError(new IllegalStateException("A Safe Working directory could not be" + " found to execute CLI command from. To mitigate this issue, please refer to the troubleshooting " + " guidelines here at https: } builder.redirectErrorStream(true); Process process = builder.start(); StringBuilder output = new StringBuilder(); try (BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream(), StandardCharsets.UTF_8.name()))) { String line; while (true) { line = reader.readLine(); if (line == null) { break; } if (line.startsWith(WINDOWS_PROCESS_ERROR_MESSAGE) || LINUX_MAC_PROCESS_ERROR_MESSAGE.matcher(line).matches()) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "AzureCliCredential authentication unavailable. Azure CLI not installed." + "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } output.append(line); } } String processOutput = output.toString(); process.waitFor(10, TimeUnit.SECONDS); if (process.exitValue() != 0) { if (processOutput.length() > 0) { String redactedOutput = redactInfo(processOutput); if (redactedOutput.contains("az login") || redactedOutput.contains("az account set")) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "AzureCliCredential authentication unavailable." + " Please run 'az login' to set up account. To further mitigate this" + " issue, please refer to the troubleshooting guidelines here at " + "https: } throw LOGGER.logExceptionAsError(new ClientAuthenticationException(redactedOutput, null)); } else { throw LOGGER.logExceptionAsError( new ClientAuthenticationException("Failed to invoke Azure CLI ", null)); } } LOGGER.verbose("Azure CLI Authentication => A token response was received from Azure CLI, deserializing the" + " response into an Access Token."); Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("accessToken"); String time = objectMap.get("expiresOn"); String timeToSecond = time.substring(0, time.indexOf(".")); String timeJoinedWithT = String.join("T", timeToSecond.split(" ")); OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME) .atZone(ZoneId.systemDefault()) .toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC); token = new AccessToken(accessToken, expiresOn); } catch (IOException | InterruptedException e) { throw LOGGER.logExceptionAsError(new IllegalStateException(e)); } catch (RuntimeException e) { return Mono.error(e instanceof CredentialUnavailableException ? LoggingUtil.logCredentialUnavailableException(LOGGER, options, (CredentialUnavailableException) e) : LOGGER.logExceptionAsError(e)); } return Mono.just(token); } /** * Asynchronously acquire a token from Active Directory with Azure Power Shell. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithAzurePowerShell(TokenRequestContext request) { List<CredentialUnavailableException> exceptions = new ArrayList<>(2); PowershellManager defaultPowerShellManager = new PowershellManager(Platform.isWindows() ? DEFAULT_WINDOWS_PS_EXECUTABLE : DEFAULT_LINUX_PS_EXECUTABLE); PowershellManager legacyPowerShellManager = Platform.isWindows() ? new PowershellManager(LEGACY_WINDOWS_PS_EXECUTABLE) : null; List<PowershellManager> powershellManagers = new ArrayList<>(2); powershellManagers.add(defaultPowerShellManager); if (legacyPowerShellManager != null) { powershellManagers.add(legacyPowerShellManager); } return Flux.fromIterable(powershellManagers) .flatMap(powershellManager -> getAccessTokenFromPowerShell(request, powershellManager) .onErrorResume(t -> { if (!t.getClass().getSimpleName().equals("CredentialUnavailableException")) { return Mono.error(new ClientAuthenticationException( "Azure Powershell authentication failed. Error Details: " + t.getMessage() + ". To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: null, t)); } exceptions.add((CredentialUnavailableException) t); return Mono.empty(); }), 1) .next() .switchIfEmpty(Mono.defer(() -> { CredentialUnavailableException last = exceptions.get(exceptions.size() - 1); for (int z = exceptions.size() - 2; z >= 0; z--) { CredentialUnavailableException current = exceptions.get(z); last = new CredentialUnavailableException("Azure PowerShell authentication failed using default" + "powershell(pwsh) with following error: " + current.getMessage() + "\r\n" + "Azure PowerShell authentication failed using powershell-core(powershell)" + " with following error: " + last.getMessage(), last.getCause()); } return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, (last))); })); } /** * Asynchronously acquire a token from Active Directory with Azure PowerShell. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithOBO(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> confidentialClient.acquireToken(OnBehalfOfParameters .builder(new HashSet<>(request.getScopes()), options.getUserAssertion()) .tenant(IdentityUtil.resolveTenantId(tenantId, request, options)) .build())) .map(MsalToken::new)); } private Mono<AccessToken> getAccessTokenFromPowerShell(TokenRequestContext request, PowershellManager powershellManager) { return powershellManager.initSession() .flatMap(manager -> { String azAccountsCommand = "Import-Module Az.Accounts -MinimumVersion 2.2.0 -PassThru"; return manager.runCommand(azAccountsCommand) .flatMap(output -> { if (output.contains("The specified module 'Az.Accounts' with version '2.2.0' was not loaded " + "because no valid module file")) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Az.Account module with version >= 2.2.0 is not installed. It needs to be installed to" + " use Azure PowerShell Credential."))); } LOGGER.verbose("Az.accounts module was found installed."); StringBuilder accessTokenCommand = new StringBuilder("Get-AzAccessToken -ResourceUrl "); accessTokenCommand.append(ScopeUtil.scopesToResource(request.getScopes())); accessTokenCommand.append(" | ConvertTo-Json"); String command = accessTokenCommand.toString(); LOGGER.verbose("Azure Powershell Authentication => Executing the command `%s` in Azure " + "Powershell to retrieve the Access Token.", accessTokenCommand); return manager.runCommand(accessTokenCommand.toString()) .flatMap(out -> { if (out.contains("Run Connect-AzAccount to login")) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Run Connect-AzAccount to login to Azure account in PowerShell."))); } try { LOGGER.verbose("Azure Powershell Authentication => Attempting to deserialize the " + "received response from Azure Powershell."); Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(out, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("Token"); String time = objectMap.get("ExpiresOn"); OffsetDateTime expiresOn = OffsetDateTime.parse(time) .withOffsetSameInstant(ZoneOffset.UTC); return Mono.just(new AccessToken(accessToken, expiresOn)); } catch (IOException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Encountered error when deserializing response from Azure Power Shell.", e))); } }); }); }).doFinally(ignored -> powershellManager.close()); } /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithConfidentialClient(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> { ClientCredentialParameters.ClientCredentialParametersBuilder builder = ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); return confidentialClient.acquireToken(builder.build()); } )).map(MsalToken::new); } private HttpPipeline setupPipeline(HttpClient httpClient) { List<HttpPipelinePolicy> policies = new ArrayList<>(); HttpLogOptions httpLogOptions = new HttpLogOptions(); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(new RetryPolicy()); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); return new HttpPipelineBuilder().httpClient(httpClient) .policies(policies.toArray(new HttpPipelinePolicy[0])).build(); } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> { UserNamePasswordParameters.UserNamePasswordParametersBuilder userNamePasswordParametersBuilder = UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray()); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest .formatAsClaimsRequest(request.getClaims()); userNamePasswordParametersBuilder.claims(customClaimRequest); } userNamePasswordParametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); return pc.acquireToken(userNamePasswordParametersBuilder.build()); } )).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with username and " + "password. To mitigate this issue, please refer to the troubleshooting guidelines " + "here at https: null, t)).map(MsalToken::new); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @param account the account used to log in to acquire the last token * @return a Publisher that emits an AccessToken */ @SuppressWarnings("deprecation") public Mono<MsalToken> authenticateWithPublicClientCache(TokenRequestContext request, IAccount account) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); parametersBuilder.forceRefresh(true); } if (account != null) { parametersBuilder = parametersBuilder.account(account); } parametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); try { return pc.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET))) .switchIfEmpty(Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder forceParametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())).forceRefresh(true); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest .formatAsClaimsRequest(request.getClaims()); forceParametersBuilder.claims(customClaimRequest); } if (account != null) { forceParametersBuilder = forceParametersBuilder.account(account); } forceParametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); try { return pc.acquireTokenSilently(forceParametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new))); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ @SuppressWarnings("deprecation") public Mono<AccessToken> authenticateWithConfidentialClientCache(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())) .tenant(IdentityUtil.resolveTenantId(tenantId, request, options)); try { return confidentialClient.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(ar -> (AccessToken) new MsalToken(ar)) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET)))); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return publicClientApplicationAccessor.getValue().flatMap(pc -> Mono.fromFuture(() -> { DeviceCodeFlowParameters.DeviceCodeFlowParametersBuilder parametersBuilder = DeviceCodeFlowParameters.builder( new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept( new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } return pc.acquireToken(parametersBuilder.build()); }).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with device code", null, t)) .map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with Visual Studio cached refresh token. * * @param request the details of the token request * @return a Publisher that emits an AccessToken. */ public Mono<MsalToken> authenticateWithVsCodeCredential(TokenRequestContext request, String cloud) { if (isADFSTenant()) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("VsCodeCredential " + "authentication unavailable. ADFS tenant/authorities are not supported. " + "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } VisualStudioCacheAccessor accessor = new VisualStudioCacheAccessor(); String credential = null; try { credential = accessor.getCredentials("VS Code Azure", cloud); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, e)); } RefreshTokenParameters.RefreshTokenParametersBuilder parametersBuilder = RefreshTokenParameters .builder(new HashSet<>(request.getScopes()), credential); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(parametersBuilder.build())) .onErrorResume(t -> { if (t instanceof MsalInteractionRequiredException) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("Failed to acquire token with" + " VS code credential." + " To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } return Mono.error(new ClientAuthenticationException("Failed to acquire token with" + " VS code credential", null, t)); }) .map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { AuthorizationCodeParameters.AuthorizationCodeParametersBuilder parametersBuilder = AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } Mono<IAuthenticationResult> acquireToken; if (clientSecret != null) { acquireToken = confidentialClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parametersBuilder.build()))); } else { acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parametersBuilder.build()))); } return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with authorization code", null, t)).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @param redirectUrl the redirect URL to listen on and receive security code * @param loginHint the username suggestion to pre-fill the login page's username/email address field * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, Integer port, String redirectUrl, String loginHint) { URI redirectUri; String redirect; if (port != null) { redirect = HTTP_LOCALHOST + ":" + port; } else if (redirectUrl != null) { redirect = redirectUrl; } else { redirect = HTTP_LOCALHOST; } try { redirectUri = new URI(redirect); } catch (URISyntaxException e) { return Mono.error(LOGGER.logExceptionAsError(new RuntimeException(e))); } InteractiveRequestParameters.InteractiveRequestParametersBuilder builder = InteractiveRequestParameters.builder(redirectUri) .scopes(new HashSet<>(request.getScopes())) .prompt(Prompt.SELECT_ACCOUNT) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); builder.claims(customClaimRequest); } if (loginHint != null) { builder.loginHint(loginHint); } Mono<IAuthenticationResult> acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(builder.build()))); return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with Interactive Browser Authentication.", null, t)).map(MsalToken::new); } /** * Gets token from shared token cache * */ public Mono<MsalToken> authenticateWithSharedTokenCache(TokenRequestContext request, String username) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.getAccounts()) .onErrorMap(t -> new CredentialUnavailableException( "Cannot get accounts from token cache. Error: " + t.getMessage(), t)) .flatMap(set -> { IAccount requestedAccount; Map<String, IAccount> accounts = new HashMap<>(); if (set.isEmpty()) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("SharedTokenCacheCredential " + "authentication unavailable. No accounts were found in the cache."))); } for (IAccount cached : set) { if (username == null || username.equals(cached.username())) { if (!accounts.containsKey(cached.homeAccountId())) { accounts.put(cached.homeAccountId(), cached); } } } if (accounts.isEmpty()) { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. No account matching the specified username: %s was " + "found in the cache.", username))); } else if (accounts.size() > 1) { if (username == null) { return Mono.error(new RuntimeException("SharedTokenCacheCredential authentication " + "unavailable. Multiple accounts were found in the cache. Use username and " + "tenant id to disambiguate.")); } else { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. Multiple accounts matching the specified username: " + "%s were found in the cache.", username))); } } else { requestedAccount = accounts.values().iterator().next(); } return authenticateWithPublicClientCache(request, requestedAccount); })); } /** * Asynchronously acquire a token from the Azure Arc Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToArcManagedIdentityEndpoint(String identityEndpoint, TokenRequestContext request) { return Mono.fromCallable(() -> { HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(ScopeUtil.scopesToResource(request.getScopes()), StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode("2019-11-01", StandardCharsets.UTF_8.name())); URL url = new URL(String.format("%s?%s", identityEndpoint, payload)); String secretKey = null; try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); } catch (IOException e) { if (connection == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Failed to initialize " + "Http URL connection to the endpoint.", null, e)); } int status = connection.getResponseCode(); if (status != 401) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException(String.format("Expected a 401" + " Unauthorized response from Azure Arc Managed Identity Endpoint, received: %d", status), null, e)); } String realm = connection.getHeaderField("WWW-Authenticate"); if (realm == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a value" + " for WWW-Authenticate header in the response from Azure Arc Managed Identity Endpoint", null)); } int separatorIndex = realm.indexOf("="); if (separatorIndex == -1) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a correct value" + " for WWW-Authenticate header in the response from Azure Arc Managed Identity Endpoint", null)); } String secretKeyPath = realm.substring(separatorIndex + 1); secretKey = new String(Files.readAllBytes(Paths.get(secretKeyPath)), StandardCharsets.UTF_8); } finally { if (connection != null) { connection.disconnect(); } } if (secretKey == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a secret value" + " in the response from Azure Arc Managed Identity Endpoint", null)); } try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Authorization", String.format("Basic %s", secretKey)); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner scanner = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = scanner.hasNext() ? scanner.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the Azure Arc Managed Service Identity endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithExchangeToken(TokenRequestContext request) { return clientAssertionAccessor.getValue() .flatMap(assertionToken -> Mono.fromCallable(() -> { String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId + "/oauth2/v2.0/token"; StringBuilder urlParametersBuilder = new StringBuilder(); urlParametersBuilder.append("client_assertion="); urlParametersBuilder.append(assertionToken); urlParametersBuilder.append("&client_assertion_type=urn:ietf:params:oauth:client-assertion-type" + ":jwt-bearer"); urlParametersBuilder.append("&client_id="); urlParametersBuilder.append(clientId); urlParametersBuilder.append("&grant_type=client_credentials"); urlParametersBuilder.append("&scope="); urlParametersBuilder.append(URLEncoder.encode(request.getScopes().get(0), StandardCharsets.UTF_8.name())); String urlParams = urlParametersBuilder.toString(); byte[] postData = urlParams.getBytes(StandardCharsets.UTF_8); int postDataLength = postData.length; HttpURLConnection connection = null; URL url = new URL(authorityUrl); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("POST"); connection.setRequestProperty("Content-Type", "application/x-www-form-urlencoded"); connection.setRequestProperty("Content-Length", Integer.toString(postDataLength)); connection.setDoOutput(true); try (DataOutputStream outputStream = new DataOutputStream(connection.getOutputStream())) { outputStream.write(postData); } connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } })); } /** * Asynchronously acquire a token from the Azure Service Fabric Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param identityHeader the identity header to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToServiceFabricManagedIdentityEndpoint(String identityEndpoint, String identityHeader, String thumbprint, TokenRequestContext request) { return Mono.fromCallable(() -> { HttpsURLConnection connection = null; String endpoint = identityEndpoint; String headerValue = identityHeader; String endpointVersion = SERVICE_FABRIC_MANAGED_IDENTITY_API_VERSION; String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode(endpointVersion, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } try { URL url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpsURLConnection) url.openConnection(); IdentitySslUtil.addTrustedCertificateThumbprint(connection, thumbprint, LOGGER); connection.setRequestMethod("GET"); if (headerValue != null) { connection.setRequestProperty("Secret", headerValue); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param identityHeader the identity header to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String identityEndpoint, String identityHeader, TokenRequestContext request) { return Mono.fromCallable(() -> { String endpoint; String headerValue; String endpointVersion; endpoint = identityEndpoint; headerValue = identityHeader; endpointVersion = IDENTITY_ENDPOINT_VERSION; String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode(endpointVersion, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } try { URL url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (headerValue != null) { if (IDENTITY_ENDPOINT_VERSION.equals(endpointVersion)) { connection.setRequestProperty("X-IDENTITY-HEADER", headerValue); } else { connection.setRequestProperty("Secret", headerValue); } } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", StandardCharsets.UTF_8.name())); payload.append("&resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } } catch (IOException exception) { return Mono.error(exception); } String endpoint = TRAILING_FORWARD_SLASHES.matcher(options.getImdsAuthorityHost()).replaceAll("") + IdentityConstants.DEFAULT_IMDS_TOKENPATH; return checkIMDSAvailable(endpoint).flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw LOGGER.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode; try { responseCode = connection.getResponseCode(); } catch (Exception e) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } if (responseCode == 400) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established.", null)); } if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw LOGGER.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw LOGGER.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); } private Mono<Boolean> checkIMDSAvailable(String endpoint) { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", StandardCharsets.UTF_8.name())); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("%s?%s", endpoint, payload)); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } catch (Exception e) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } private String getSafeWorkingDirectory() { if (isWindowsPlatform()) { if (CoreUtils.isNullOrEmpty(DEFAULT_WINDOWS_SYSTEM_ROOT)) { return null; } return DEFAULT_WINDOWS_SYSTEM_ROOT + "\\system32"; } else { return DEFAULT_MAC_LINUX_PATH; } } private boolean isWindowsPlatform() { return System.getProperty("os.name").contains("Windows"); } private String redactInfo(String input) { return ACCESS_TOKEN_PATTERN.matcher(input).replaceAll("****"); } void openUrl(String url) throws IOException { Runtime rt = Runtime.getRuntime(); String os = System.getProperty("os.name").toLowerCase(Locale.ROOT); if (os.contains("win")) { rt.exec("rundll32 url.dll,FileProtocolHandler " + url); } else if (os.contains("mac")) { rt.exec("open " + url); } else if (os.contains("nix") || os.contains("nux")) { rt.exec("xdg-open " + url); } else { LOGGER.error("Browser could not be opened - please open {} in a browser on this device.", url); } } private CompletableFuture<IAuthenticationResult> getFailedCompletableFuture(Exception e) { CompletableFuture<IAuthenticationResult> completableFuture = new CompletableFuture<>(); completableFuture.completeExceptionally(e); return completableFuture; } private void initializeHttpPipelineAdapter() { HttpPipeline httpPipeline = options.getHttpPipeline(); if (httpPipeline != null) { httpPipelineAdapter = new HttpPipelineAdapter(httpPipeline); } else { HttpClient httpClient = options.getHttpClient(); if (httpClient != null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(httpClient)); } else if (options.getProxyOptions() == null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(HttpClient.createDefault())); } } } /** * Get the configured tenant id. * * @return the tenant id. */ public String getTenantId() { return tenantId; } /** * Get the configured client id. * * @return the client id. */ public String getClientId() { return clientId; } /** * Get the configured identity client options. * * @return the client options. */ public IdentityClientOptions getIdentityClientOptions() { return options; } private boolean isADFSTenant() { return this.tenantId.equals(ADFS_TENANT); } private byte[] getCertificateBytes() throws IOException { if (certificatePath != null) { return Files.readAllBytes(Paths.get(certificatePath)); } else if (certificate != null) { ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); byte[] buffer = new byte[1024]; int read = certificate.read(buffer, 0, buffer.length); while (read != -1) { outputStream.write(buffer, 0, read); read = certificate.read(buffer, 0, buffer.length); } return outputStream.toByteArray(); } else { return new byte[0]; } } private InputStream getCertificateInputStream() throws IOException { if (certificatePath != null) { return new BufferedInputStream(new FileInputStream(certificatePath)); } else { return certificate; } } }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private static final String WINDOWS_STARTER = "cmd.exe"; private static final String LINUX_MAC_STARTER = "/bin/sh"; private static final String WINDOWS_SWITCHER = "/c"; private static final String LINUX_MAC_SWITCHER = "-c"; private static final String WINDOWS_PROCESS_ERROR_MESSAGE = "'az' is not recognized"; private static final Pattern LINUX_MAC_PROCESS_ERROR_MESSAGE = Pattern.compile("(.*)az:(.*)not found"); private static final String DEFAULT_WINDOWS_SYSTEM_ROOT = System.getenv("SystemRoot"); private static final String DEFAULT_WINDOWS_PS_EXECUTABLE = "pwsh.exe"; private static final String LEGACY_WINDOWS_PS_EXECUTABLE = "powershell.exe"; private static final String DEFAULT_LINUX_PS_EXECUTABLE = "pwsh"; private static final String DEFAULT_MAC_LINUX_PATH = "/bin/"; private static final Duration REFRESH_OFFSET = Duration.ofMinutes(5); private static final String IDENTITY_ENDPOINT_VERSION = "2019-08-01"; private static final String MSI_ENDPOINT_VERSION = "2017-09-01"; private static final String ADFS_TENANT = "adfs"; private static final String HTTP_LOCALHOST = "http: private static final String SERVICE_FABRIC_MANAGED_IDENTITY_API_VERSION = "2019-07-01-preview"; private static final ClientLogger LOGGER = new ClientLogger(IdentityClient.class); private static final Pattern ACCESS_TOKEN_PATTERN = Pattern.compile("\"accessToken\": \"(.*?)(\"|$)"); private static final Pattern TRAILING_FORWARD_SLASHES = Pattern.compile("/+$"); private final IdentityClientOptions options; private final String tenantId; private final String clientId; private final String resourceId; private final String clientSecret; private final String clientAssertionFilePath; private final InputStream certificate; private final String certificatePath; private final Supplier<String> clientAssertionSupplier; private final String certificatePassword; private HttpPipelineAdapter httpPipelineAdapter; private final SynchronizedAccessor<PublicClientApplication> publicClientApplicationAccessor; private final SynchronizedAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessor; private final SynchronizedAccessor<String> clientAssertionAccessor; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param clientSecret the client secret of the application. * @param resourceId the resource ID of the application * @param certificatePath the path to the PKCS12 or PEM certificate of the application. * @param certificate the PKCS12 or PEM certificate of the application. * @param certificatePassword the password protecting the PFX certificate. * @param isSharedTokenCacheCredential Indicate whether the credential is * {@link com.azure.identity.SharedTokenCacheCredential} or not. * @param clientAssertionTimeout the timeout to use for the client assertion. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, String clientSecret, String certificatePath, String clientAssertionFilePath, String resourceId, Supplier<String> clientAssertionSupplier, InputStream certificate, String certificatePassword, boolean isSharedTokenCacheCredential, Duration clientAssertionTimeout, IdentityClientOptions options) { if (tenantId == null) { tenantId = "organizations"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.resourceId = resourceId; this.clientSecret = clientSecret; this.clientAssertionFilePath = clientAssertionFilePath; this.certificatePath = certificatePath; this.certificate = certificate; this.certificatePassword = certificatePassword; this.clientAssertionSupplier = clientAssertionSupplier; this.options = options; this.publicClientApplicationAccessor = new SynchronizedAccessor<>(() -> getPublicClientApplication(isSharedTokenCacheCredential)); this.confidentialClientApplicationAccessor = new SynchronizedAccessor<>(() -> getConfidentialClientApplication()); this.clientAssertionAccessor = clientAssertionTimeout == null ? new SynchronizedAccessor<>(() -> parseClientAssertion(), Duration.ofMinutes(5)) : new SynchronizedAccessor<>(() -> parseClientAssertion(), clientAssertionTimeout); } private Mono<String> parseClientAssertion() { return Mono.fromCallable(() -> { if (clientAssertionFilePath != null) { byte[] encoded = Files.readAllBytes(Paths.get(clientAssertionFilePath)); return new String(encoded, StandardCharsets.UTF_8); } else { throw LOGGER.logExceptionAsError(new IllegalStateException( "Client Assertion File Path is not provided." + " It should be provided to authenticate with client assertion." )); } }); } private Mono<PublicClientApplication> getPublicClientApplication(boolean sharedTokenCacheCredential) { return Mono.defer(() -> { if (clientId == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication.")); } String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl) .validateAuthority(options.getAuthorityValidation()); } catch (MalformedURLException e) { throw LOGGER.logExceptionAsWarning(new IllegalStateException(e)); } initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { publicClientApplicationBuilder.httpClient(httpPipelineAdapter); } else { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { publicClientApplicationBuilder.executorService(options.getExecutorService()); } if (!options.isCp1Disabled()) { Set<String> set = new HashSet<>(1); set.add("CP1"); publicClientApplicationBuilder.clientCapabilities(set); } return Mono.just(publicClientApplicationBuilder); }).flatMap(builder -> { TokenCachePersistenceOptions tokenCachePersistenceOptions = options.getTokenCacheOptions(); PersistentTokenCacheImpl tokenCache = null; if (tokenCachePersistenceOptions != null) { try { tokenCache = new PersistentTokenCacheImpl() .setAllowUnencryptedStorage(tokenCachePersistenceOptions.isUnencryptedStorageAllowed()) .setName(tokenCachePersistenceOptions.getName()); builder.setTokenCacheAccessAspect(tokenCache); } catch (Throwable t) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException( "Shared token cache is unavailable in this environment.", null, t)); } } PublicClientApplication publicClientApplication = builder.build(); return tokenCache != null ? tokenCache.registerCache() .map(ignored -> publicClientApplication) : Mono.just(publicClientApplication); }); } public Mono<MsalToken> authenticateWithIntelliJ(TokenRequestContext request) { try { IntelliJCacheAccessor cacheAccessor = new IntelliJCacheAccessor(options.getIntelliJKeePassDatabasePath()); IntelliJAuthMethodDetails authDetails; try { authDetails = cacheAccessor.getAuthDetailsIfAvailable(); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available.", e))); } if (authDetails == null) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available." + " Please log in with Azure Tools for IntelliJ plugin in the IDE."))); } String authType = authDetails.getAuthMethod(); if ("SP".equalsIgnoreCase(authType)) { Map<String, String> spDetails = cacheAccessor .getIntellijServicePrincipalDetails(authDetails.getCredFilePath()); String authorityUrl = spDetails.get("authURL") + spDetails.get("tenant"); try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(spDetails.get("client"), ClientCredentialFactory.createFromSecret(spDetails.get("key"))) .authority(authorityUrl).validateAuthority(options.getAuthorityValidation()); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } } else if ("DC".equalsIgnoreCase(authType)) { LOGGER.verbose("IntelliJ Authentication => Device Code Authentication scheme detected in Azure Tools" + " for IntelliJ Plugin."); if (isADFSTenant()) { LOGGER.verbose("IntelliJ Authentication => The input tenant is detected to be ADFS and" + " the ADFS tenants are not supported via IntelliJ Authentication currently."); return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJCredential " + "authentication unavailable. ADFS tenant/authorities are not supported."))); } try { JsonNode intelliJCredentials = cacheAccessor.getDeviceCodeCredentials(); String refreshToken = intelliJCredentials.get("refreshToken").textValue(); RefreshTokenParameters.RefreshTokenParametersBuilder refreshTokenParametersBuilder = RefreshTokenParameters.builder(new HashSet<>(request.getScopes()), refreshToken); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); refreshTokenParametersBuilder.claims(customClaimRequest); } return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(refreshTokenParametersBuilder.build())) .map(MsalToken::new)); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, e)); } } else { LOGGER.verbose("IntelliJ Authentication = > Only Service Principal and Device Code Authentication" + " schemes are currently supported via IntelliJ Credential currently. Please ensure you used one" + " of those schemes from Azure Tools for IntelliJ plugin."); return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available." + " Please login with Azure Tools for IntelliJ plugin in the IDE."))); } } catch (IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with Azure CLI. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) { StringBuilder azCommand = new StringBuilder("az account get-access-token --output json --resource "); String scopes = ScopeUtil.scopesToResource(request.getScopes()); try { ScopeUtil.validateScope(scopes); } catch (IllegalArgumentException ex) { return Mono.error(LOGGER.logExceptionAsError(ex)); } azCommand.append(scopes); String tenant = IdentityUtil.resolveTenantId(null, request, options); if (!CoreUtils.isNullOrEmpty(tenant)) { azCommand.append("--tenant ").append(tenant); } AccessToken token; try { String starter; String switcher; if (isWindowsPlatform()) { starter = WINDOWS_STARTER; switcher = WINDOWS_SWITCHER; } else { starter = LINUX_MAC_STARTER; switcher = LINUX_MAC_SWITCHER; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, azCommand.toString()); String workingDirectory = getSafeWorkingDirectory(); if (workingDirectory != null) { builder.directory(new File(workingDirectory)); } else { throw LOGGER.logExceptionAsError(new IllegalStateException("A Safe Working directory could not be" + " found to execute CLI command from. To mitigate this issue, please refer to the troubleshooting " + " guidelines here at https: } builder.redirectErrorStream(true); Process process = builder.start(); StringBuilder output = new StringBuilder(); try (BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream(), StandardCharsets.UTF_8.name()))) { String line; while (true) { line = reader.readLine(); if (line == null) { break; } if (line.startsWith(WINDOWS_PROCESS_ERROR_MESSAGE) || LINUX_MAC_PROCESS_ERROR_MESSAGE.matcher(line).matches()) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "AzureCliCredential authentication unavailable. Azure CLI not installed." + "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } output.append(line); } } String processOutput = output.toString(); process.waitFor(10, TimeUnit.SECONDS); if (process.exitValue() != 0) { if (processOutput.length() > 0) { String redactedOutput = redactInfo(processOutput); if (redactedOutput.contains("az login") || redactedOutput.contains("az account set")) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "AzureCliCredential authentication unavailable." + " Please run 'az login' to set up account. To further mitigate this" + " issue, please refer to the troubleshooting guidelines here at " + "https: } throw LOGGER.logExceptionAsError(new ClientAuthenticationException(redactedOutput, null)); } else { throw LOGGER.logExceptionAsError( new ClientAuthenticationException("Failed to invoke Azure CLI ", null)); } } LOGGER.verbose("Azure CLI Authentication => A token response was received from Azure CLI, deserializing the" + " response into an Access Token."); Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("accessToken"); String time = objectMap.get("expiresOn"); String timeToSecond = time.substring(0, time.indexOf(".")); String timeJoinedWithT = String.join("T", timeToSecond.split(" ")); OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME) .atZone(ZoneId.systemDefault()) .toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC); token = new AccessToken(accessToken, expiresOn); } catch (IOException | InterruptedException e) { throw LOGGER.logExceptionAsError(new IllegalStateException(e)); } catch (RuntimeException e) { return Mono.error(e instanceof CredentialUnavailableException ? LoggingUtil.logCredentialUnavailableException(LOGGER, options, (CredentialUnavailableException) e) : LOGGER.logExceptionAsError(e)); } return Mono.just(token); } /** * Asynchronously acquire a token from Active Directory with Azure Power Shell. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithAzurePowerShell(TokenRequestContext request) { List<CredentialUnavailableException> exceptions = new ArrayList<>(2); PowershellManager defaultPowerShellManager = new PowershellManager(Platform.isWindows() ? DEFAULT_WINDOWS_PS_EXECUTABLE : DEFAULT_LINUX_PS_EXECUTABLE); PowershellManager legacyPowerShellManager = Platform.isWindows() ? new PowershellManager(LEGACY_WINDOWS_PS_EXECUTABLE) : null; List<PowershellManager> powershellManagers = new ArrayList<>(2); powershellManagers.add(defaultPowerShellManager); if (legacyPowerShellManager != null) { powershellManagers.add(legacyPowerShellManager); } return Flux.fromIterable(powershellManagers) .flatMap(powershellManager -> getAccessTokenFromPowerShell(request, powershellManager) .onErrorResume(t -> { if (!t.getClass().getSimpleName().equals("CredentialUnavailableException")) { return Mono.error(new ClientAuthenticationException( "Azure Powershell authentication failed. Error Details: " + t.getMessage() + ". To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: null, t)); } exceptions.add((CredentialUnavailableException) t); return Mono.empty(); }), 1) .next() .switchIfEmpty(Mono.defer(() -> { CredentialUnavailableException last = exceptions.get(exceptions.size() - 1); for (int z = exceptions.size() - 2; z >= 0; z--) { CredentialUnavailableException current = exceptions.get(z); last = new CredentialUnavailableException("Azure PowerShell authentication failed using default" + "powershell(pwsh) with following error: " + current.getMessage() + "\r\n" + "Azure PowerShell authentication failed using powershell-core(powershell)" + " with following error: " + last.getMessage(), last.getCause()); } return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, (last))); })); } /** * Asynchronously acquire a token from Active Directory with Azure PowerShell. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithOBO(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> confidentialClient.acquireToken(OnBehalfOfParameters .builder(new HashSet<>(request.getScopes()), options.getUserAssertion()) .tenant(IdentityUtil.resolveTenantId(tenantId, request, options)) .build())) .map(MsalToken::new)); } private Mono<AccessToken> getAccessTokenFromPowerShell(TokenRequestContext request, PowershellManager powershellManager) { return powershellManager.initSession() .flatMap(manager -> { String azAccountsCommand = "Import-Module Az.Accounts -MinimumVersion 2.2.0 -PassThru"; return manager.runCommand(azAccountsCommand) .flatMap(output -> { if (output.contains("The specified module 'Az.Accounts' with version '2.2.0' was not loaded " + "because no valid module file")) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Az.Account module with version >= 2.2.0 is not installed. It needs to be installed to" + " use Azure PowerShell Credential."))); } LOGGER.verbose("Az.accounts module was found installed."); StringBuilder accessTokenCommand = new StringBuilder("Get-AzAccessToken -ResourceUrl "); accessTokenCommand.append(ScopeUtil.scopesToResource(request.getScopes())); accessTokenCommand.append(" | ConvertTo-Json"); String command = accessTokenCommand.toString(); LOGGER.verbose("Azure Powershell Authentication => Executing the command `%s` in Azure " + "Powershell to retrieve the Access Token.", accessTokenCommand); return manager.runCommand(accessTokenCommand.toString()) .flatMap(out -> { if (out.contains("Run Connect-AzAccount to login")) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Run Connect-AzAccount to login to Azure account in PowerShell."))); } try { LOGGER.verbose("Azure Powershell Authentication => Attempting to deserialize the " + "received response from Azure Powershell."); Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(out, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("Token"); String time = objectMap.get("ExpiresOn"); OffsetDateTime expiresOn = OffsetDateTime.parse(time) .withOffsetSameInstant(ZoneOffset.UTC); return Mono.just(new AccessToken(accessToken, expiresOn)); } catch (IOException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Encountered error when deserializing response from Azure Power Shell.", e))); } }); }); }).doFinally(ignored -> powershellManager.close()); } /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithConfidentialClient(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> { ClientCredentialParameters.ClientCredentialParametersBuilder builder = ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); return confidentialClient.acquireToken(builder.build()); } )).map(MsalToken::new); } private HttpPipeline setupPipeline(HttpClient httpClient) { List<HttpPipelinePolicy> policies = new ArrayList<>(); HttpLogOptions httpLogOptions = new HttpLogOptions(); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(new RetryPolicy()); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); return new HttpPipelineBuilder().httpClient(httpClient) .policies(policies.toArray(new HttpPipelinePolicy[0])).build(); } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> { UserNamePasswordParameters.UserNamePasswordParametersBuilder userNamePasswordParametersBuilder = UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray()); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest .formatAsClaimsRequest(request.getClaims()); userNamePasswordParametersBuilder.claims(customClaimRequest); } userNamePasswordParametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); return pc.acquireToken(userNamePasswordParametersBuilder.build()); } )).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with username and " + "password. To mitigate this issue, please refer to the troubleshooting guidelines " + "here at https: null, t)).map(MsalToken::new); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @param account the account used to log in to acquire the last token * @return a Publisher that emits an AccessToken */ @SuppressWarnings("deprecation") public Mono<MsalToken> authenticateWithPublicClientCache(TokenRequestContext request, IAccount account) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); parametersBuilder.forceRefresh(true); } if (account != null) { parametersBuilder = parametersBuilder.account(account); } parametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); try { return pc.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET))) .switchIfEmpty(Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder forceParametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())).forceRefresh(true); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest .formatAsClaimsRequest(request.getClaims()); forceParametersBuilder.claims(customClaimRequest); } if (account != null) { forceParametersBuilder = forceParametersBuilder.account(account); } forceParametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); try { return pc.acquireTokenSilently(forceParametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new))); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ @SuppressWarnings("deprecation") public Mono<AccessToken> authenticateWithConfidentialClientCache(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())) .tenant(IdentityUtil.resolveTenantId(tenantId, request, options)); try { return confidentialClient.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(ar -> (AccessToken) new MsalToken(ar)) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET)))); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return publicClientApplicationAccessor.getValue().flatMap(pc -> Mono.fromFuture(() -> { DeviceCodeFlowParameters.DeviceCodeFlowParametersBuilder parametersBuilder = DeviceCodeFlowParameters.builder( new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept( new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } return pc.acquireToken(parametersBuilder.build()); }).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with device code", null, t)) .map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with Visual Studio cached refresh token. * * @param request the details of the token request * @return a Publisher that emits an AccessToken. */ public Mono<MsalToken> authenticateWithVsCodeCredential(TokenRequestContext request, String cloud) { if (isADFSTenant()) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("VsCodeCredential " + "authentication unavailable. ADFS tenant/authorities are not supported. " + "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } VisualStudioCacheAccessor accessor = new VisualStudioCacheAccessor(); String credential = null; try { credential = accessor.getCredentials("VS Code Azure", cloud); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, e)); } RefreshTokenParameters.RefreshTokenParametersBuilder parametersBuilder = RefreshTokenParameters .builder(new HashSet<>(request.getScopes()), credential); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(parametersBuilder.build())) .onErrorResume(t -> { if (t instanceof MsalInteractionRequiredException) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("Failed to acquire token with" + " VS code credential." + " To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } return Mono.error(new ClientAuthenticationException("Failed to acquire token with" + " VS code credential", null, t)); }) .map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { AuthorizationCodeParameters.AuthorizationCodeParametersBuilder parametersBuilder = AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } Mono<IAuthenticationResult> acquireToken; if (clientSecret != null) { acquireToken = confidentialClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parametersBuilder.build()))); } else { acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parametersBuilder.build()))); } return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with authorization code", null, t)).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @param redirectUrl the redirect URL to listen on and receive security code * @param loginHint the username suggestion to pre-fill the login page's username/email address field * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, Integer port, String redirectUrl, String loginHint) { URI redirectUri; String redirect; if (port != null) { redirect = HTTP_LOCALHOST + ":" + port; } else if (redirectUrl != null) { redirect = redirectUrl; } else { redirect = HTTP_LOCALHOST; } try { redirectUri = new URI(redirect); } catch (URISyntaxException e) { return Mono.error(LOGGER.logExceptionAsError(new RuntimeException(e))); } InteractiveRequestParameters.InteractiveRequestParametersBuilder builder = InteractiveRequestParameters.builder(redirectUri) .scopes(new HashSet<>(request.getScopes())) .prompt(Prompt.SELECT_ACCOUNT) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); builder.claims(customClaimRequest); } if (loginHint != null) { builder.loginHint(loginHint); } Mono<IAuthenticationResult> acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(builder.build()))); return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with Interactive Browser Authentication.", null, t)).map(MsalToken::new); } /** * Gets token from shared token cache * */ public Mono<MsalToken> authenticateWithSharedTokenCache(TokenRequestContext request, String username) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.getAccounts()) .onErrorMap(t -> new CredentialUnavailableException( "Cannot get accounts from token cache. Error: " + t.getMessage(), t)) .flatMap(set -> { IAccount requestedAccount; Map<String, IAccount> accounts = new HashMap<>(); if (set.isEmpty()) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("SharedTokenCacheCredential " + "authentication unavailable. No accounts were found in the cache."))); } for (IAccount cached : set) { if (username == null || username.equals(cached.username())) { if (!accounts.containsKey(cached.homeAccountId())) { accounts.put(cached.homeAccountId(), cached); } } } if (accounts.isEmpty()) { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. No account matching the specified username: %s was " + "found in the cache.", username))); } else if (accounts.size() > 1) { if (username == null) { return Mono.error(new RuntimeException("SharedTokenCacheCredential authentication " + "unavailable. Multiple accounts were found in the cache. Use username and " + "tenant id to disambiguate.")); } else { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. Multiple accounts matching the specified username: " + "%s were found in the cache.", username))); } } else { requestedAccount = accounts.values().iterator().next(); } return authenticateWithPublicClientCache(request, requestedAccount); })); } /** * Asynchronously acquire a token from the Azure Arc Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToArcManagedIdentityEndpoint(String identityEndpoint, TokenRequestContext request) { return Mono.fromCallable(() -> { HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(ScopeUtil.scopesToResource(request.getScopes()), StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode("2019-11-01", StandardCharsets.UTF_8.name())); URL url = new URL(String.format("%s?%s", identityEndpoint, payload)); String secretKey = null; try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); } catch (IOException e) { if (connection == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Failed to initialize " + "Http URL connection to the endpoint.", null, e)); } int status = connection.getResponseCode(); if (status != 401) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException(String.format("Expected a 401" + " Unauthorized response from Azure Arc Managed Identity Endpoint, received: %d", status), null, e)); } String realm = connection.getHeaderField("WWW-Authenticate"); if (realm == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a value" + " for WWW-Authenticate header in the response from Azure Arc Managed Identity Endpoint", null)); } int separatorIndex = realm.indexOf("="); if (separatorIndex == -1) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a correct value" + " for WWW-Authenticate header in the response from Azure Arc Managed Identity Endpoint", null)); } String secretKeyPath = realm.substring(separatorIndex + 1); secretKey = new String(Files.readAllBytes(Paths.get(secretKeyPath)), StandardCharsets.UTF_8); } finally { if (connection != null) { connection.disconnect(); } } if (secretKey == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a secret value" + " in the response from Azure Arc Managed Identity Endpoint", null)); } try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Authorization", String.format("Basic %s", secretKey)); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner scanner = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = scanner.hasNext() ? scanner.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the Azure Arc Managed Service Identity endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithExchangeToken(TokenRequestContext request) { return clientAssertionAccessor.getValue() .flatMap(assertionToken -> Mono.fromCallable(() -> { String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId + "/oauth2/v2.0/token"; StringBuilder urlParametersBuilder = new StringBuilder(); urlParametersBuilder.append("client_assertion="); urlParametersBuilder.append(assertionToken); urlParametersBuilder.append("&client_assertion_type=urn:ietf:params:oauth:client-assertion-type" + ":jwt-bearer"); urlParametersBuilder.append("&client_id="); urlParametersBuilder.append(clientId); urlParametersBuilder.append("&grant_type=client_credentials"); urlParametersBuilder.append("&scope="); urlParametersBuilder.append(URLEncoder.encode(request.getScopes().get(0), StandardCharsets.UTF_8.name())); String urlParams = urlParametersBuilder.toString(); byte[] postData = urlParams.getBytes(StandardCharsets.UTF_8); int postDataLength = postData.length; HttpURLConnection connection = null; URL url = new URL(authorityUrl); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("POST"); connection.setRequestProperty("Content-Type", "application/x-www-form-urlencoded"); connection.setRequestProperty("Content-Length", Integer.toString(postDataLength)); connection.setDoOutput(true); try (DataOutputStream outputStream = new DataOutputStream(connection.getOutputStream())) { outputStream.write(postData); } connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } })); } /** * Asynchronously acquire a token from the Azure Service Fabric Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param identityHeader the identity header to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToServiceFabricManagedIdentityEndpoint(String identityEndpoint, String identityHeader, String thumbprint, TokenRequestContext request) { return Mono.fromCallable(() -> { HttpsURLConnection connection = null; String endpoint = identityEndpoint; String headerValue = identityHeader; String endpointVersion = SERVICE_FABRIC_MANAGED_IDENTITY_API_VERSION; String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode(endpointVersion, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } try { URL url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpsURLConnection) url.openConnection(); IdentitySslUtil.addTrustedCertificateThumbprint(connection, thumbprint, LOGGER); connection.setRequestMethod("GET"); if (headerValue != null) { connection.setRequestProperty("Secret", headerValue); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param identityHeader the identity header to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String identityEndpoint, String identityHeader, TokenRequestContext request) { return Mono.fromCallable(() -> { String endpoint; String headerValue; String endpointVersion; endpoint = identityEndpoint; headerValue = identityHeader; endpointVersion = IDENTITY_ENDPOINT_VERSION; String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode(endpointVersion, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } try { URL url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (headerValue != null) { if (IDENTITY_ENDPOINT_VERSION.equals(endpointVersion)) { connection.setRequestProperty("X-IDENTITY-HEADER", headerValue); } else { connection.setRequestProperty("Secret", headerValue); } } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", StandardCharsets.UTF_8.name())); payload.append("&resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } } catch (IOException exception) { return Mono.error(exception); } String endpoint = TRAILING_FORWARD_SLASHES.matcher(options.getImdsAuthorityHost()).replaceAll("") + IdentityConstants.DEFAULT_IMDS_TOKENPATH; return checkIMDSAvailable(endpoint).flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw LOGGER.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode; try { responseCode = connection.getResponseCode(); } catch (Exception e) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } if (responseCode == 400) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established.", null)); } if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw LOGGER.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw LOGGER.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); } private Mono<Boolean> checkIMDSAvailable(String endpoint) { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", StandardCharsets.UTF_8.name())); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("%s?%s", endpoint, payload)); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } catch (Exception e) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } private String getSafeWorkingDirectory() { if (isWindowsPlatform()) { if (CoreUtils.isNullOrEmpty(DEFAULT_WINDOWS_SYSTEM_ROOT)) { return null; } return DEFAULT_WINDOWS_SYSTEM_ROOT + "\\system32"; } else { return DEFAULT_MAC_LINUX_PATH; } } private boolean isWindowsPlatform() { return System.getProperty("os.name").contains("Windows"); } private String redactInfo(String input) { return ACCESS_TOKEN_PATTERN.matcher(input).replaceAll("****"); } void openUrl(String url) throws IOException { Runtime rt = Runtime.getRuntime(); String os = System.getProperty("os.name").toLowerCase(Locale.ROOT); if (os.contains("win")) { rt.exec("rundll32 url.dll,FileProtocolHandler " + url); } else if (os.contains("mac")) { rt.exec("open " + url); } else if (os.contains("nix") || os.contains("nux")) { rt.exec("xdg-open " + url); } else { LOGGER.error("Browser could not be opened - please open {} in a browser on this device.", url); } } private CompletableFuture<IAuthenticationResult> getFailedCompletableFuture(Exception e) { CompletableFuture<IAuthenticationResult> completableFuture = new CompletableFuture<>(); completableFuture.completeExceptionally(e); return completableFuture; } private void initializeHttpPipelineAdapter() { HttpPipeline httpPipeline = options.getHttpPipeline(); if (httpPipeline != null) { httpPipelineAdapter = new HttpPipelineAdapter(httpPipeline); } else { HttpClient httpClient = options.getHttpClient(); if (httpClient != null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(httpClient)); } else if (options.getProxyOptions() == null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(HttpClient.createDefault())); } } } /** * Get the configured tenant id. * * @return the tenant id. */ public String getTenantId() { return tenantId; } /** * Get the configured client id. * * @return the client id. */ public String getClientId() { return clientId; } /** * Get the configured identity client options. * * @return the client options. */ public IdentityClientOptions getIdentityClientOptions() { return options; } private boolean isADFSTenant() { return this.tenantId.equals(ADFS_TENANT); } private byte[] getCertificateBytes() throws IOException { if (certificatePath != null) { return Files.readAllBytes(Paths.get(certificatePath)); } else if (certificate != null) { ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); byte[] buffer = new byte[1024]; int read = certificate.read(buffer, 0, buffer.length); while (read != -1) { outputStream.write(buffer, 0, read); read = certificate.read(buffer, 0, buffer.length); } return outputStream.toByteArray(); } else { return new byte[0]; } } private InputStream getCertificateInputStream() throws IOException { if (certificatePath != null) { return new BufferedInputStream(new FileInputStream(certificatePath)); } else { return certificate; } } }
Remove a string concatenation by using two appends
public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) { StringBuilder azCommand = new StringBuilder("az account get-access-token --output json --resource "); String scopes = ScopeUtil.scopesToResource(request.getScopes()); try { ScopeUtil.validateScope(scopes); } catch (IllegalArgumentException ex) { return Mono.error(LOGGER.logExceptionAsError(ex)); } azCommand.append(scopes); String tenant = IdentityUtil.resolveTenantId(null, request, options); if (!CoreUtils.isNullOrEmpty(tenant)) { azCommand.append("--tenant ").append(tenant); } AccessToken token; try { String starter; String switcher; if (isWindowsPlatform()) { starter = WINDOWS_STARTER; switcher = WINDOWS_SWITCHER; } else { starter = LINUX_MAC_STARTER; switcher = LINUX_MAC_SWITCHER; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, azCommand.toString()); String workingDirectory = getSafeWorkingDirectory(); if (workingDirectory != null) { builder.directory(new File(workingDirectory)); } else { throw LOGGER.logExceptionAsError(new IllegalStateException("A Safe Working directory could not be" + " found to execute CLI command from. To mitigate this issue, please refer to the troubleshooting " + " guidelines here at https: } builder.redirectErrorStream(true); Process process = builder.start(); StringBuilder output = new StringBuilder(); try (BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream(), StandardCharsets.UTF_8.name()))) { String line; while (true) { line = reader.readLine(); if (line == null) { break; } if (line.startsWith(WINDOWS_PROCESS_ERROR_MESSAGE) || LINUX_MAC_PROCESS_ERROR_MESSAGE.matcher(line).matches()) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "AzureCliCredential authentication unavailable. Azure CLI not installed." + "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } output.append(line); } } String processOutput = output.toString(); process.waitFor(10, TimeUnit.SECONDS); if (process.exitValue() != 0) { if (processOutput.length() > 0) { String redactedOutput = redactInfo(processOutput); if (redactedOutput.contains("az login") || redactedOutput.contains("az account set")) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "AzureCliCredential authentication unavailable." + " Please run 'az login' to set up account. To further mitigate this" + " issue, please refer to the troubleshooting guidelines here at " + "https: } throw LOGGER.logExceptionAsError(new ClientAuthenticationException(redactedOutput, null)); } else { throw LOGGER.logExceptionAsError( new ClientAuthenticationException("Failed to invoke Azure CLI ", null)); } } LOGGER.verbose("Azure CLI Authentication => A token response was received from Azure CLI, deserializing the" + " response into an Access Token."); Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("accessToken"); String time = objectMap.get("expiresOn"); String timeToSecond = time.substring(0, time.indexOf(".")); String timeJoinedWithT = String.join("T", timeToSecond.split(" ")); OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME) .atZone(ZoneId.systemDefault()) .toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC); token = new AccessToken(accessToken, expiresOn); } catch (IOException | InterruptedException e) { throw LOGGER.logExceptionAsError(new IllegalStateException(e)); } catch (RuntimeException e) { return Mono.error(e instanceof CredentialUnavailableException ? LoggingUtil.logCredentialUnavailableException(LOGGER, options, (CredentialUnavailableException) e) : LOGGER.logExceptionAsError(e)); } return Mono.just(token); }
azCommand.append("--tenant ").append(tenant);
public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) { StringBuilder azCommand = new StringBuilder("az account get-access-token --output json --resource "); String scopes = ScopeUtil.scopesToResource(request.getScopes()); try { ScopeUtil.validateScope(scopes); } catch (IllegalArgumentException ex) { return Mono.error(LOGGER.logExceptionAsError(ex)); } azCommand.append(scopes); String tenant = IdentityUtil.resolveTenantId(null, request, options); if (!CoreUtils.isNullOrEmpty(tenant)) { azCommand.append("--tenant ").append(tenant); } AccessToken token; try { String starter; String switcher; if (isWindowsPlatform()) { starter = WINDOWS_STARTER; switcher = WINDOWS_SWITCHER; } else { starter = LINUX_MAC_STARTER; switcher = LINUX_MAC_SWITCHER; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, azCommand.toString()); String workingDirectory = getSafeWorkingDirectory(); if (workingDirectory != null) { builder.directory(new File(workingDirectory)); } else { throw LOGGER.logExceptionAsError(new IllegalStateException("A Safe Working directory could not be" + " found to execute CLI command from. To mitigate this issue, please refer to the troubleshooting " + " guidelines here at https: } builder.redirectErrorStream(true); Process process = builder.start(); StringBuilder output = new StringBuilder(); try (BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream(), StandardCharsets.UTF_8.name()))) { String line; while (true) { line = reader.readLine(); if (line == null) { break; } if (line.startsWith(WINDOWS_PROCESS_ERROR_MESSAGE) || LINUX_MAC_PROCESS_ERROR_MESSAGE.matcher(line).matches()) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "AzureCliCredential authentication unavailable. Azure CLI not installed." + "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } output.append(line); } } String processOutput = output.toString(); process.waitFor(10, TimeUnit.SECONDS); if (process.exitValue() != 0) { if (processOutput.length() > 0) { String redactedOutput = redactInfo(processOutput); if (redactedOutput.contains("az login") || redactedOutput.contains("az account set")) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "AzureCliCredential authentication unavailable." + " Please run 'az login' to set up account. To further mitigate this" + " issue, please refer to the troubleshooting guidelines here at " + "https: } throw LOGGER.logExceptionAsError(new ClientAuthenticationException(redactedOutput, null)); } else { throw LOGGER.logExceptionAsError( new ClientAuthenticationException("Failed to invoke Azure CLI ", null)); } } LOGGER.verbose("Azure CLI Authentication => A token response was received from Azure CLI, deserializing the" + " response into an Access Token."); Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("accessToken"); String time = objectMap.get("expiresOn"); String timeToSecond = time.substring(0, time.indexOf(".")); String timeJoinedWithT = String.join("T", timeToSecond.split(" ")); OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME) .atZone(ZoneId.systemDefault()) .toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC); token = new AccessToken(accessToken, expiresOn); } catch (IOException | InterruptedException e) { throw LOGGER.logExceptionAsError(new IllegalStateException(e)); } catch (RuntimeException e) { return Mono.error(e instanceof CredentialUnavailableException ? LoggingUtil.logCredentialUnavailableException(LOGGER, options, (CredentialUnavailableException) e) : LOGGER.logExceptionAsError(e)); } return Mono.just(token); }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private static final String WINDOWS_STARTER = "cmd.exe"; private static final String LINUX_MAC_STARTER = "/bin/sh"; private static final String WINDOWS_SWITCHER = "/c"; private static final String LINUX_MAC_SWITCHER = "-c"; private static final String WINDOWS_PROCESS_ERROR_MESSAGE = "'az' is not recognized"; private static final Pattern LINUX_MAC_PROCESS_ERROR_MESSAGE = Pattern.compile("(.*)az:(.*)not found"); private static final String DEFAULT_WINDOWS_SYSTEM_ROOT = System.getenv("SystemRoot"); private static final String DEFAULT_WINDOWS_PS_EXECUTABLE = "pwsh.exe"; private static final String LEGACY_WINDOWS_PS_EXECUTABLE = "powershell.exe"; private static final String DEFAULT_LINUX_PS_EXECUTABLE = "pwsh"; private static final String DEFAULT_MAC_LINUX_PATH = "/bin/"; private static final Duration REFRESH_OFFSET = Duration.ofMinutes(5); private static final String IDENTITY_ENDPOINT_VERSION = "2019-08-01"; private static final String MSI_ENDPOINT_VERSION = "2017-09-01"; private static final String ADFS_TENANT = "adfs"; private static final String HTTP_LOCALHOST = "http: private static final String SERVICE_FABRIC_MANAGED_IDENTITY_API_VERSION = "2019-07-01-preview"; private static final ClientLogger LOGGER = new ClientLogger(IdentityClient.class); private static final Pattern ACCESS_TOKEN_PATTERN = Pattern.compile("\"accessToken\": \"(.*?)(\"|$)"); private static final Pattern TRAILING_FORWARD_SLASHES = Pattern.compile("/+$"); private final IdentityClientOptions options; private final String tenantId; private final String clientId; private final String resourceId; private final String clientSecret; private final String clientAssertionFilePath; private final InputStream certificate; private final String certificatePath; private final Supplier<String> clientAssertionSupplier; private final String certificatePassword; private HttpPipelineAdapter httpPipelineAdapter; private final SynchronizedAccessor<PublicClientApplication> publicClientApplicationAccessor; private final SynchronizedAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessor; private final SynchronizedAccessor<String> clientAssertionAccessor; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param clientSecret the client secret of the application. * @param resourceId the resource ID of the application * @param certificatePath the path to the PKCS12 or PEM certificate of the application. * @param certificate the PKCS12 or PEM certificate of the application. * @param certificatePassword the password protecting the PFX certificate. * @param isSharedTokenCacheCredential Indicate whether the credential is * {@link com.azure.identity.SharedTokenCacheCredential} or not. * @param clientAssertionTimeout the timeout to use for the client assertion. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, String clientSecret, String certificatePath, String clientAssertionFilePath, String resourceId, Supplier<String> clientAssertionSupplier, InputStream certificate, String certificatePassword, boolean isSharedTokenCacheCredential, Duration clientAssertionTimeout, IdentityClientOptions options) { if (tenantId == null) { tenantId = "organizations"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.resourceId = resourceId; this.clientSecret = clientSecret; this.clientAssertionFilePath = clientAssertionFilePath; this.certificatePath = certificatePath; this.certificate = certificate; this.certificatePassword = certificatePassword; this.clientAssertionSupplier = clientAssertionSupplier; this.options = options; this.publicClientApplicationAccessor = new SynchronizedAccessor<>(() -> getPublicClientApplication(isSharedTokenCacheCredential)); this.confidentialClientApplicationAccessor = new SynchronizedAccessor<>(() -> getConfidentialClientApplication()); this.clientAssertionAccessor = clientAssertionTimeout == null ? new SynchronizedAccessor<>(() -> parseClientAssertion(), Duration.ofMinutes(5)) : new SynchronizedAccessor<>(() -> parseClientAssertion(), clientAssertionTimeout); } private Mono<ConfidentialClientApplication> getConfidentialClientApplication() { return Mono.defer(() -> { if (clientId == null) { return Mono.error(LOGGER.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication."))); } String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId; IClientCredential credential; if (clientSecret != null) { credential = ClientCredentialFactory.createFromSecret(clientSecret); } else if (certificate != null || certificatePath != null) { try { if (certificatePassword == null) { byte[] pemCertificateBytes = getCertificateBytes(); List<X509Certificate> x509CertificateList = CertificateUtil.publicKeyFromPem(pemCertificateBytes); PrivateKey privateKey = CertificateUtil.privateKeyFromPem(pemCertificateBytes); if (x509CertificateList.size() == 1) { credential = ClientCredentialFactory.createFromCertificate( privateKey, x509CertificateList.get(0)); } else { credential = ClientCredentialFactory.createFromCertificateChain( privateKey, x509CertificateList); } } else { try (InputStream pfxCertificateStream = getCertificateInputStream()) { credential = ClientCredentialFactory.createFromCertificate(pfxCertificateStream, certificatePassword); } } } catch (IOException | GeneralSecurityException e) { return Mono.error(LOGGER.logExceptionAsError(new RuntimeException( "Failed to parse the certificate for the credential: " + e.getMessage(), e))); } } else if (clientAssertionSupplier != null) { credential = ClientCredentialFactory.createFromClientAssertion(clientAssertionSupplier.get()); } else { return Mono.error(LOGGER.logExceptionAsError( new IllegalArgumentException("Must provide client secret or client certificate path." + " To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, credential); try { applicationBuilder = applicationBuilder.authority(authorityUrl) .validateAuthority(options.getAuthorityValidation()); } catch (MalformedURLException e) { return Mono.error(LOGGER.logExceptionAsWarning(new IllegalStateException(e))); } applicationBuilder.sendX5c(options.isIncludeX5c()); initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } TokenCachePersistenceOptions tokenCachePersistenceOptions = options.getTokenCacheOptions(); PersistentTokenCacheImpl tokenCache = null; if (tokenCachePersistenceOptions != null) { try { tokenCache = new PersistentTokenCacheImpl() .setAllowUnencryptedStorage(tokenCachePersistenceOptions.isUnencryptedStorageAllowed()) .setName(tokenCachePersistenceOptions.getName()); applicationBuilder.setTokenCacheAccessAspect(tokenCache); } catch (Throwable t) { return Mono.error(LOGGER.logExceptionAsError(new ClientAuthenticationException( "Shared token cache is unavailable in this environment.", null, t))); } } if (options.getRegionalAuthority() != null) { if (options.getRegionalAuthority() == RegionalAuthority.AUTO_DISCOVER_REGION) { applicationBuilder.autoDetectRegion(true); } else { applicationBuilder.azureRegion(options.getRegionalAuthority().toString()); } } ConfidentialClientApplication confidentialClientApplication = applicationBuilder.build(); return tokenCache != null ? tokenCache.registerCache() .map(ignored -> confidentialClientApplication) : Mono.just(confidentialClientApplication); }); } private Mono<String> parseClientAssertion() { return Mono.fromCallable(() -> { if (clientAssertionFilePath != null) { byte[] encoded = Files.readAllBytes(Paths.get(clientAssertionFilePath)); return new String(encoded, StandardCharsets.UTF_8); } else { throw LOGGER.logExceptionAsError(new IllegalStateException( "Client Assertion File Path is not provided." + " It should be provided to authenticate with client assertion." )); } }); } private Mono<PublicClientApplication> getPublicClientApplication(boolean sharedTokenCacheCredential) { return Mono.defer(() -> { if (clientId == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication.")); } String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl) .validateAuthority(options.getAuthorityValidation()); } catch (MalformedURLException e) { throw LOGGER.logExceptionAsWarning(new IllegalStateException(e)); } initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { publicClientApplicationBuilder.httpClient(httpPipelineAdapter); } else { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { publicClientApplicationBuilder.executorService(options.getExecutorService()); } if (!options.isCp1Disabled()) { Set<String> set = new HashSet<>(1); set.add("CP1"); publicClientApplicationBuilder.clientCapabilities(set); } return Mono.just(publicClientApplicationBuilder); }).flatMap(builder -> { TokenCachePersistenceOptions tokenCachePersistenceOptions = options.getTokenCacheOptions(); PersistentTokenCacheImpl tokenCache = null; if (tokenCachePersistenceOptions != null) { try { tokenCache = new PersistentTokenCacheImpl() .setAllowUnencryptedStorage(tokenCachePersistenceOptions.isUnencryptedStorageAllowed()) .setName(tokenCachePersistenceOptions.getName()); builder.setTokenCacheAccessAspect(tokenCache); } catch (Throwable t) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException( "Shared token cache is unavailable in this environment.", null, t)); } } PublicClientApplication publicClientApplication = builder.build(); return tokenCache != null ? tokenCache.registerCache() .map(ignored -> publicClientApplication) : Mono.just(publicClientApplication); }); } public Mono<MsalToken> authenticateWithIntelliJ(TokenRequestContext request) { try { IntelliJCacheAccessor cacheAccessor = new IntelliJCacheAccessor(options.getIntelliJKeePassDatabasePath()); IntelliJAuthMethodDetails authDetails; try { authDetails = cacheAccessor.getAuthDetailsIfAvailable(); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available.", e))); } if (authDetails == null) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available." + " Please log in with Azure Tools for IntelliJ plugin in the IDE."))); } String authType = authDetails.getAuthMethod(); if ("SP".equalsIgnoreCase(authType)) { Map<String, String> spDetails = cacheAccessor .getIntellijServicePrincipalDetails(authDetails.getCredFilePath()); String authorityUrl = spDetails.get("authURL") + spDetails.get("tenant"); try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(spDetails.get("client"), ClientCredentialFactory.createFromSecret(spDetails.get("key"))) .authority(authorityUrl).validateAuthority(options.getAuthorityValidation()); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } } else if ("DC".equalsIgnoreCase(authType)) { LOGGER.verbose("IntelliJ Authentication => Device Code Authentication scheme detected in Azure Tools" + " for IntelliJ Plugin."); if (isADFSTenant()) { LOGGER.verbose("IntelliJ Authentication => The input tenant is detected to be ADFS and" + " the ADFS tenants are not supported via IntelliJ Authentication currently."); return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJCredential " + "authentication unavailable. ADFS tenant/authorities are not supported."))); } try { JsonNode intelliJCredentials = cacheAccessor.getDeviceCodeCredentials(); String refreshToken = intelliJCredentials.get("refreshToken").textValue(); RefreshTokenParameters.RefreshTokenParametersBuilder refreshTokenParametersBuilder = RefreshTokenParameters.builder(new HashSet<>(request.getScopes()), refreshToken); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); refreshTokenParametersBuilder.claims(customClaimRequest); } return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(refreshTokenParametersBuilder.build())) .map(MsalToken::new)); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, e)); } } else { LOGGER.verbose("IntelliJ Authentication = > Only Service Principal and Device Code Authentication" + " schemes are currently supported via IntelliJ Credential currently. Please ensure you used one" + " of those schemes from Azure Tools for IntelliJ plugin."); return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available." + " Please login with Azure Tools for IntelliJ plugin in the IDE."))); } } catch (IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with Azure CLI. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ /** * Asynchronously acquire a token from Active Directory with Azure Power Shell. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithAzurePowerShell(TokenRequestContext request) { List<CredentialUnavailableException> exceptions = new ArrayList<>(2); PowershellManager defaultPowerShellManager = new PowershellManager(Platform.isWindows() ? DEFAULT_WINDOWS_PS_EXECUTABLE : DEFAULT_LINUX_PS_EXECUTABLE); PowershellManager legacyPowerShellManager = Platform.isWindows() ? new PowershellManager(LEGACY_WINDOWS_PS_EXECUTABLE) : null; List<PowershellManager> powershellManagers = new ArrayList<>(2); powershellManagers.add(defaultPowerShellManager); if (legacyPowerShellManager != null) { powershellManagers.add(legacyPowerShellManager); } return Flux.fromIterable(powershellManagers) .flatMap(powershellManager -> getAccessTokenFromPowerShell(request, powershellManager) .onErrorResume(t -> { if (!t.getClass().getSimpleName().equals("CredentialUnavailableException")) { return Mono.error(new ClientAuthenticationException( "Azure Powershell authentication failed. Error Details: " + t.getMessage() + ". To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: null, t)); } exceptions.add((CredentialUnavailableException) t); return Mono.empty(); }), 1) .next() .switchIfEmpty(Mono.defer(() -> { CredentialUnavailableException last = exceptions.get(exceptions.size() - 1); for (int z = exceptions.size() - 2; z >= 0; z--) { CredentialUnavailableException current = exceptions.get(z); last = new CredentialUnavailableException("Azure PowerShell authentication failed using default" + "powershell(pwsh) with following error: " + current.getMessage() + "\r\n" + "Azure PowerShell authentication failed using powershell-core(powershell)" + " with following error: " + last.getMessage(), last.getCause()); } return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, (last))); })); } /** * Asynchronously acquire a token from Active Directory with Azure PowerShell. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithOBO(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> confidentialClient.acquireToken(OnBehalfOfParameters .builder(new HashSet<>(request.getScopes()), options.getUserAssertion()) .tenant(IdentityUtil.resolveTenantId(tenantId, request, options)) .build())) .map(MsalToken::new)); } private Mono<AccessToken> getAccessTokenFromPowerShell(TokenRequestContext request, PowershellManager powershellManager) { return powershellManager.initSession() .flatMap(manager -> { String azAccountsCommand = "Import-Module Az.Accounts -MinimumVersion 2.2.0 -PassThru"; return manager.runCommand(azAccountsCommand) .flatMap(output -> { if (output.contains("The specified module 'Az.Accounts' with version '2.2.0' was not loaded " + "because no valid module file")) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Az.Account module with version >= 2.2.0 is not installed. It needs to be installed to" + " use Azure PowerShell Credential."))); } LOGGER.verbose("Az.accounts module was found installed."); StringBuilder accessTokenCommand = new StringBuilder("Get-AzAccessToken -ResourceUrl "); accessTokenCommand.append(ScopeUtil.scopesToResource(request.getScopes())); accessTokenCommand.append(" | ConvertTo-Json"); String command = accessTokenCommand.toString(); LOGGER.verbose("Azure Powershell Authentication => Executing the command `%s` in Azure " + "Powershell to retrieve the Access Token.", accessTokenCommand); return manager.runCommand(accessTokenCommand.toString()) .flatMap(out -> { if (out.contains("Run Connect-AzAccount to login")) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Run Connect-AzAccount to login to Azure account in PowerShell."))); } try { LOGGER.verbose("Azure Powershell Authentication => Attempting to deserialize the " + "received response from Azure Powershell."); Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(out, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("Token"); String time = objectMap.get("ExpiresOn"); OffsetDateTime expiresOn = OffsetDateTime.parse(time) .withOffsetSameInstant(ZoneOffset.UTC); return Mono.just(new AccessToken(accessToken, expiresOn)); } catch (IOException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Encountered error when deserializing response from Azure Power Shell.", e))); } }); }); }).doFinally(ignored -> powershellManager.close()); } /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithConfidentialClient(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> { ClientCredentialParameters.ClientCredentialParametersBuilder builder = ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); return confidentialClient.acquireToken(builder.build()); } )).map(MsalToken::new); } private HttpPipeline setupPipeline(HttpClient httpClient) { List<HttpPipelinePolicy> policies = new ArrayList<>(); HttpLogOptions httpLogOptions = new HttpLogOptions(); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(new RetryPolicy()); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); return new HttpPipelineBuilder().httpClient(httpClient) .policies(policies.toArray(new HttpPipelinePolicy[0])).build(); } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> { UserNamePasswordParameters.UserNamePasswordParametersBuilder userNamePasswordParametersBuilder = UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray()); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest .formatAsClaimsRequest(request.getClaims()); userNamePasswordParametersBuilder.claims(customClaimRequest); } userNamePasswordParametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); return pc.acquireToken(userNamePasswordParametersBuilder.build()); } )).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with username and " + "password. To mitigate this issue, please refer to the troubleshooting guidelines " + "here at https: null, t)).map(MsalToken::new); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @param account the account used to log in to acquire the last token * @return a Publisher that emits an AccessToken */ @SuppressWarnings("deprecation") public Mono<MsalToken> authenticateWithPublicClientCache(TokenRequestContext request, IAccount account) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); parametersBuilder.forceRefresh(true); } if (account != null) { parametersBuilder = parametersBuilder.account(account); } parametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); try { return pc.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET))) .switchIfEmpty(Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder forceParametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())).forceRefresh(true); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest .formatAsClaimsRequest(request.getClaims()); forceParametersBuilder.claims(customClaimRequest); } if (account != null) { forceParametersBuilder = forceParametersBuilder.account(account); } forceParametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); try { return pc.acquireTokenSilently(forceParametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new))); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ @SuppressWarnings("deprecation") public Mono<AccessToken> authenticateWithConfidentialClientCache(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())) .tenant(IdentityUtil.resolveTenantId(tenantId, request, options)); try { return confidentialClient.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(ar -> (AccessToken) new MsalToken(ar)) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET)))); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return publicClientApplicationAccessor.getValue().flatMap(pc -> Mono.fromFuture(() -> { DeviceCodeFlowParameters.DeviceCodeFlowParametersBuilder parametersBuilder = DeviceCodeFlowParameters.builder( new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept( new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } return pc.acquireToken(parametersBuilder.build()); }).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with device code", null, t)) .map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with Visual Studio cached refresh token. * * @param request the details of the token request * @return a Publisher that emits an AccessToken. */ public Mono<MsalToken> authenticateWithVsCodeCredential(TokenRequestContext request, String cloud) { if (isADFSTenant()) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("VsCodeCredential " + "authentication unavailable. ADFS tenant/authorities are not supported. " + "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } VisualStudioCacheAccessor accessor = new VisualStudioCacheAccessor(); String credential = null; try { credential = accessor.getCredentials("VS Code Azure", cloud); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, e)); } RefreshTokenParameters.RefreshTokenParametersBuilder parametersBuilder = RefreshTokenParameters .builder(new HashSet<>(request.getScopes()), credential); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(parametersBuilder.build())) .onErrorResume(t -> { if (t instanceof MsalInteractionRequiredException) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("Failed to acquire token with" + " VS code credential." + " To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } return Mono.error(new ClientAuthenticationException("Failed to acquire token with" + " VS code credential", null, t)); }) .map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { AuthorizationCodeParameters.AuthorizationCodeParametersBuilder parametersBuilder = AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } Mono<IAuthenticationResult> acquireToken; if (clientSecret != null) { acquireToken = confidentialClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parametersBuilder.build()))); } else { acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parametersBuilder.build()))); } return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with authorization code", null, t)).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @param redirectUrl the redirect URL to listen on and receive security code * @param loginHint the username suggestion to pre-fill the login page's username/email address field * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, Integer port, String redirectUrl, String loginHint) { URI redirectUri; String redirect; if (port != null) { redirect = HTTP_LOCALHOST + ":" + port; } else if (redirectUrl != null) { redirect = redirectUrl; } else { redirect = HTTP_LOCALHOST; } try { redirectUri = new URI(redirect); } catch (URISyntaxException e) { return Mono.error(LOGGER.logExceptionAsError(new RuntimeException(e))); } InteractiveRequestParameters.InteractiveRequestParametersBuilder builder = InteractiveRequestParameters.builder(redirectUri) .scopes(new HashSet<>(request.getScopes())) .prompt(Prompt.SELECT_ACCOUNT) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); builder.claims(customClaimRequest); } if (loginHint != null) { builder.loginHint(loginHint); } Mono<IAuthenticationResult> acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(builder.build()))); return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with Interactive Browser Authentication.", null, t)).map(MsalToken::new); } /** * Gets token from shared token cache * */ public Mono<MsalToken> authenticateWithSharedTokenCache(TokenRequestContext request, String username) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.getAccounts()) .onErrorMap(t -> new CredentialUnavailableException( "Cannot get accounts from token cache. Error: " + t.getMessage(), t)) .flatMap(set -> { IAccount requestedAccount; Map<String, IAccount> accounts = new HashMap<>(); if (set.isEmpty()) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("SharedTokenCacheCredential " + "authentication unavailable. No accounts were found in the cache."))); } for (IAccount cached : set) { if (username == null || username.equals(cached.username())) { if (!accounts.containsKey(cached.homeAccountId())) { accounts.put(cached.homeAccountId(), cached); } } } if (accounts.isEmpty()) { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. No account matching the specified username: %s was " + "found in the cache.", username))); } else if (accounts.size() > 1) { if (username == null) { return Mono.error(new RuntimeException("SharedTokenCacheCredential authentication " + "unavailable. Multiple accounts were found in the cache. Use username and " + "tenant id to disambiguate.")); } else { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. Multiple accounts matching the specified username: " + "%s were found in the cache.", username))); } } else { requestedAccount = accounts.values().iterator().next(); } return authenticateWithPublicClientCache(request, requestedAccount); })); } /** * Asynchronously acquire a token from the Azure Arc Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToArcManagedIdentityEndpoint(String identityEndpoint, TokenRequestContext request) { return Mono.fromCallable(() -> { HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(ScopeUtil.scopesToResource(request.getScopes()), StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode("2019-11-01", StandardCharsets.UTF_8.name())); URL url = new URL(String.format("%s?%s", identityEndpoint, payload)); String secretKey = null; try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); } catch (IOException e) { if (connection == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Failed to initialize " + "Http URL connection to the endpoint.", null, e)); } int status = connection.getResponseCode(); if (status != 401) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException(String.format("Expected a 401" + " Unauthorized response from Azure Arc Managed Identity Endpoint, received: %d", status), null, e)); } String realm = connection.getHeaderField("WWW-Authenticate"); if (realm == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a value" + " for WWW-Authenticate header in the response from Azure Arc Managed Identity Endpoint", null)); } int separatorIndex = realm.indexOf("="); if (separatorIndex == -1) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a correct value" + " for WWW-Authenticate header in the response from Azure Arc Managed Identity Endpoint", null)); } String secretKeyPath = realm.substring(separatorIndex + 1); secretKey = new String(Files.readAllBytes(Paths.get(secretKeyPath)), StandardCharsets.UTF_8); } finally { if (connection != null) { connection.disconnect(); } } if (secretKey == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a secret value" + " in the response from Azure Arc Managed Identity Endpoint", null)); } try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Authorization", String.format("Basic %s", secretKey)); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner scanner = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = scanner.hasNext() ? scanner.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the Azure Arc Managed Service Identity endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithExchangeToken(TokenRequestContext request) { return clientAssertionAccessor.getValue() .flatMap(assertionToken -> Mono.fromCallable(() -> { String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId + "/oauth2/v2.0/token"; StringBuilder urlParametersBuilder = new StringBuilder(); urlParametersBuilder.append("client_assertion="); urlParametersBuilder.append(assertionToken); urlParametersBuilder.append("&client_assertion_type=urn:ietf:params:oauth:client-assertion-type" + ":jwt-bearer"); urlParametersBuilder.append("&client_id="); urlParametersBuilder.append(clientId); urlParametersBuilder.append("&grant_type=client_credentials"); urlParametersBuilder.append("&scope="); urlParametersBuilder.append(URLEncoder.encode(request.getScopes().get(0), StandardCharsets.UTF_8.name())); String urlParams = urlParametersBuilder.toString(); byte[] postData = urlParams.getBytes(StandardCharsets.UTF_8); int postDataLength = postData.length; HttpURLConnection connection = null; URL url = new URL(authorityUrl); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("POST"); connection.setRequestProperty("Content-Type", "application/x-www-form-urlencoded"); connection.setRequestProperty("Content-Length", Integer.toString(postDataLength)); connection.setDoOutput(true); try (DataOutputStream outputStream = new DataOutputStream(connection.getOutputStream())) { outputStream.write(postData); } connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } })); } /** * Asynchronously acquire a token from the Azure Service Fabric Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param identityHeader the identity header to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToServiceFabricManagedIdentityEndpoint(String identityEndpoint, String identityHeader, String thumbprint, TokenRequestContext request) { return Mono.fromCallable(() -> { HttpsURLConnection connection = null; String endpoint = identityEndpoint; String headerValue = identityHeader; String endpointVersion = SERVICE_FABRIC_MANAGED_IDENTITY_API_VERSION; String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode(endpointVersion, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } try { URL url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpsURLConnection) url.openConnection(); IdentitySslUtil.addTrustedCertificateThumbprint(connection, thumbprint, LOGGER); connection.setRequestMethod("GET"); if (headerValue != null) { connection.setRequestProperty("Secret", headerValue); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param identityHeader the identity header to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String identityEndpoint, String identityHeader, TokenRequestContext request) { return Mono.fromCallable(() -> { String endpoint; String headerValue; String endpointVersion; endpoint = identityEndpoint; headerValue = identityHeader; endpointVersion = IDENTITY_ENDPOINT_VERSION; String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode(endpointVersion, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } try { URL url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (headerValue != null) { if (IDENTITY_ENDPOINT_VERSION.equals(endpointVersion)) { connection.setRequestProperty("X-IDENTITY-HEADER", headerValue); } else { connection.setRequestProperty("Secret", headerValue); } } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", StandardCharsets.UTF_8.name())); payload.append("&resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } } catch (IOException exception) { return Mono.error(exception); } String endpoint = TRAILING_FORWARD_SLASHES.matcher(options.getImdsAuthorityHost()).replaceAll("") + IdentityConstants.DEFAULT_IMDS_TOKENPATH; return checkIMDSAvailable(endpoint).flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw LOGGER.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode; try { responseCode = connection.getResponseCode(); } catch (Exception e) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } if (responseCode == 400) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established.", null)); } if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw LOGGER.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw LOGGER.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); } private Mono<Boolean> checkIMDSAvailable(String endpoint) { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", StandardCharsets.UTF_8.name())); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("%s?%s", endpoint, payload)); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } catch (Exception e) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } private String getSafeWorkingDirectory() { if (isWindowsPlatform()) { if (CoreUtils.isNullOrEmpty(DEFAULT_WINDOWS_SYSTEM_ROOT)) { return null; } return DEFAULT_WINDOWS_SYSTEM_ROOT + "\\system32"; } else { return DEFAULT_MAC_LINUX_PATH; } } private boolean isWindowsPlatform() { return System.getProperty("os.name").contains("Windows"); } private String redactInfo(String input) { return ACCESS_TOKEN_PATTERN.matcher(input).replaceAll("****"); } void openUrl(String url) throws IOException { Runtime rt = Runtime.getRuntime(); String os = System.getProperty("os.name").toLowerCase(Locale.ROOT); if (os.contains("win")) { rt.exec("rundll32 url.dll,FileProtocolHandler " + url); } else if (os.contains("mac")) { rt.exec("open " + url); } else if (os.contains("nix") || os.contains("nux")) { rt.exec("xdg-open " + url); } else { LOGGER.error("Browser could not be opened - please open {} in a browser on this device.", url); } } private CompletableFuture<IAuthenticationResult> getFailedCompletableFuture(Exception e) { CompletableFuture<IAuthenticationResult> completableFuture = new CompletableFuture<>(); completableFuture.completeExceptionally(e); return completableFuture; } private void initializeHttpPipelineAdapter() { HttpPipeline httpPipeline = options.getHttpPipeline(); if (httpPipeline != null) { httpPipelineAdapter = new HttpPipelineAdapter(httpPipeline); } else { HttpClient httpClient = options.getHttpClient(); if (httpClient != null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(httpClient)); } else if (options.getProxyOptions() == null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(HttpClient.createDefault())); } } } /** * Get the configured tenant id. * * @return the tenant id. */ public String getTenantId() { return tenantId; } /** * Get the configured client id. * * @return the client id. */ public String getClientId() { return clientId; } /** * Get the configured identity client options. * * @return the client options. */ public IdentityClientOptions getIdentityClientOptions() { return options; } private boolean isADFSTenant() { return this.tenantId.equals(ADFS_TENANT); } private byte[] getCertificateBytes() throws IOException { if (certificatePath != null) { return Files.readAllBytes(Paths.get(certificatePath)); } else if (certificate != null) { ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); byte[] buffer = new byte[1024]; int read = certificate.read(buffer, 0, buffer.length); while (read != -1) { outputStream.write(buffer, 0, read); read = certificate.read(buffer, 0, buffer.length); } return outputStream.toByteArray(); } else { return new byte[0]; } } private InputStream getCertificateInputStream() throws IOException { if (certificatePath != null) { return new BufferedInputStream(new FileInputStream(certificatePath)); } else { return certificate; } } }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private static final String WINDOWS_STARTER = "cmd.exe"; private static final String LINUX_MAC_STARTER = "/bin/sh"; private static final String WINDOWS_SWITCHER = "/c"; private static final String LINUX_MAC_SWITCHER = "-c"; private static final String WINDOWS_PROCESS_ERROR_MESSAGE = "'az' is not recognized"; private static final Pattern LINUX_MAC_PROCESS_ERROR_MESSAGE = Pattern.compile("(.*)az:(.*)not found"); private static final String DEFAULT_WINDOWS_SYSTEM_ROOT = System.getenv("SystemRoot"); private static final String DEFAULT_WINDOWS_PS_EXECUTABLE = "pwsh.exe"; private static final String LEGACY_WINDOWS_PS_EXECUTABLE = "powershell.exe"; private static final String DEFAULT_LINUX_PS_EXECUTABLE = "pwsh"; private static final String DEFAULT_MAC_LINUX_PATH = "/bin/"; private static final Duration REFRESH_OFFSET = Duration.ofMinutes(5); private static final String IDENTITY_ENDPOINT_VERSION = "2019-08-01"; private static final String MSI_ENDPOINT_VERSION = "2017-09-01"; private static final String ADFS_TENANT = "adfs"; private static final String HTTP_LOCALHOST = "http: private static final String SERVICE_FABRIC_MANAGED_IDENTITY_API_VERSION = "2019-07-01-preview"; private static final ClientLogger LOGGER = new ClientLogger(IdentityClient.class); private static final Pattern ACCESS_TOKEN_PATTERN = Pattern.compile("\"accessToken\": \"(.*?)(\"|$)"); private static final Pattern TRAILING_FORWARD_SLASHES = Pattern.compile("/+$"); private final IdentityClientOptions options; private final String tenantId; private final String clientId; private final String resourceId; private final String clientSecret; private final String clientAssertionFilePath; private final InputStream certificate; private final String certificatePath; private final Supplier<String> clientAssertionSupplier; private final String certificatePassword; private HttpPipelineAdapter httpPipelineAdapter; private final SynchronizedAccessor<PublicClientApplication> publicClientApplicationAccessor; private final SynchronizedAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessor; private final SynchronizedAccessor<String> clientAssertionAccessor; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param clientSecret the client secret of the application. * @param resourceId the resource ID of the application * @param certificatePath the path to the PKCS12 or PEM certificate of the application. * @param certificate the PKCS12 or PEM certificate of the application. * @param certificatePassword the password protecting the PFX certificate. * @param isSharedTokenCacheCredential Indicate whether the credential is * {@link com.azure.identity.SharedTokenCacheCredential} or not. * @param clientAssertionTimeout the timeout to use for the client assertion. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, String clientSecret, String certificatePath, String clientAssertionFilePath, String resourceId, Supplier<String> clientAssertionSupplier, InputStream certificate, String certificatePassword, boolean isSharedTokenCacheCredential, Duration clientAssertionTimeout, IdentityClientOptions options) { if (tenantId == null) { tenantId = "organizations"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.resourceId = resourceId; this.clientSecret = clientSecret; this.clientAssertionFilePath = clientAssertionFilePath; this.certificatePath = certificatePath; this.certificate = certificate; this.certificatePassword = certificatePassword; this.clientAssertionSupplier = clientAssertionSupplier; this.options = options; this.publicClientApplicationAccessor = new SynchronizedAccessor<>(() -> getPublicClientApplication(isSharedTokenCacheCredential)); this.confidentialClientApplicationAccessor = new SynchronizedAccessor<>(() -> getConfidentialClientApplication()); this.clientAssertionAccessor = clientAssertionTimeout == null ? new SynchronizedAccessor<>(() -> parseClientAssertion(), Duration.ofMinutes(5)) : new SynchronizedAccessor<>(() -> parseClientAssertion(), clientAssertionTimeout); } private Mono<ConfidentialClientApplication> getConfidentialClientApplication() { return Mono.defer(() -> { if (clientId == null) { return Mono.error(LOGGER.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication."))); } String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId; IClientCredential credential; if (clientSecret != null) { credential = ClientCredentialFactory.createFromSecret(clientSecret); } else if (certificate != null || certificatePath != null) { try { if (certificatePassword == null) { byte[] pemCertificateBytes = getCertificateBytes(); List<X509Certificate> x509CertificateList = CertificateUtil.publicKeyFromPem(pemCertificateBytes); PrivateKey privateKey = CertificateUtil.privateKeyFromPem(pemCertificateBytes); if (x509CertificateList.size() == 1) { credential = ClientCredentialFactory.createFromCertificate( privateKey, x509CertificateList.get(0)); } else { credential = ClientCredentialFactory.createFromCertificateChain( privateKey, x509CertificateList); } } else { try (InputStream pfxCertificateStream = getCertificateInputStream()) { credential = ClientCredentialFactory.createFromCertificate(pfxCertificateStream, certificatePassword); } } } catch (IOException | GeneralSecurityException e) { return Mono.error(LOGGER.logExceptionAsError(new RuntimeException( "Failed to parse the certificate for the credential: " + e.getMessage(), e))); } } else if (clientAssertionSupplier != null) { credential = ClientCredentialFactory.createFromClientAssertion(clientAssertionSupplier.get()); } else { return Mono.error(LOGGER.logExceptionAsError( new IllegalArgumentException("Must provide client secret or client certificate path." + " To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, credential); try { applicationBuilder = applicationBuilder.authority(authorityUrl) .validateAuthority(options.getAuthorityValidation()); } catch (MalformedURLException e) { return Mono.error(LOGGER.logExceptionAsWarning(new IllegalStateException(e))); } applicationBuilder.sendX5c(options.isIncludeX5c()); initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } TokenCachePersistenceOptions tokenCachePersistenceOptions = options.getTokenCacheOptions(); PersistentTokenCacheImpl tokenCache = null; if (tokenCachePersistenceOptions != null) { try { tokenCache = new PersistentTokenCacheImpl() .setAllowUnencryptedStorage(tokenCachePersistenceOptions.isUnencryptedStorageAllowed()) .setName(tokenCachePersistenceOptions.getName()); applicationBuilder.setTokenCacheAccessAspect(tokenCache); } catch (Throwable t) { return Mono.error(LOGGER.logExceptionAsError(new ClientAuthenticationException( "Shared token cache is unavailable in this environment.", null, t))); } } if (options.getRegionalAuthority() != null) { if (options.getRegionalAuthority() == RegionalAuthority.AUTO_DISCOVER_REGION) { applicationBuilder.autoDetectRegion(true); } else { applicationBuilder.azureRegion(options.getRegionalAuthority().toString()); } } ConfidentialClientApplication confidentialClientApplication = applicationBuilder.build(); return tokenCache != null ? tokenCache.registerCache() .map(ignored -> confidentialClientApplication) : Mono.just(confidentialClientApplication); }); } private Mono<String> parseClientAssertion() { return Mono.fromCallable(() -> { if (clientAssertionFilePath != null) { byte[] encoded = Files.readAllBytes(Paths.get(clientAssertionFilePath)); return new String(encoded, StandardCharsets.UTF_8); } else { throw LOGGER.logExceptionAsError(new IllegalStateException( "Client Assertion File Path is not provided." + " It should be provided to authenticate with client assertion." )); } }); } private Mono<PublicClientApplication> getPublicClientApplication(boolean sharedTokenCacheCredential) { return Mono.defer(() -> { if (clientId == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication.")); } String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl) .validateAuthority(options.getAuthorityValidation()); } catch (MalformedURLException e) { throw LOGGER.logExceptionAsWarning(new IllegalStateException(e)); } initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { publicClientApplicationBuilder.httpClient(httpPipelineAdapter); } else { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { publicClientApplicationBuilder.executorService(options.getExecutorService()); } if (!options.isCp1Disabled()) { Set<String> set = new HashSet<>(1); set.add("CP1"); publicClientApplicationBuilder.clientCapabilities(set); } return Mono.just(publicClientApplicationBuilder); }).flatMap(builder -> { TokenCachePersistenceOptions tokenCachePersistenceOptions = options.getTokenCacheOptions(); PersistentTokenCacheImpl tokenCache = null; if (tokenCachePersistenceOptions != null) { try { tokenCache = new PersistentTokenCacheImpl() .setAllowUnencryptedStorage(tokenCachePersistenceOptions.isUnencryptedStorageAllowed()) .setName(tokenCachePersistenceOptions.getName()); builder.setTokenCacheAccessAspect(tokenCache); } catch (Throwable t) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException( "Shared token cache is unavailable in this environment.", null, t)); } } PublicClientApplication publicClientApplication = builder.build(); return tokenCache != null ? tokenCache.registerCache() .map(ignored -> publicClientApplication) : Mono.just(publicClientApplication); }); } public Mono<MsalToken> authenticateWithIntelliJ(TokenRequestContext request) { try { IntelliJCacheAccessor cacheAccessor = new IntelliJCacheAccessor(options.getIntelliJKeePassDatabasePath()); IntelliJAuthMethodDetails authDetails; try { authDetails = cacheAccessor.getAuthDetailsIfAvailable(); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available.", e))); } if (authDetails == null) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available." + " Please log in with Azure Tools for IntelliJ plugin in the IDE."))); } String authType = authDetails.getAuthMethod(); if ("SP".equalsIgnoreCase(authType)) { Map<String, String> spDetails = cacheAccessor .getIntellijServicePrincipalDetails(authDetails.getCredFilePath()); String authorityUrl = spDetails.get("authURL") + spDetails.get("tenant"); try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(spDetails.get("client"), ClientCredentialFactory.createFromSecret(spDetails.get("key"))) .authority(authorityUrl).validateAuthority(options.getAuthorityValidation()); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } } else if ("DC".equalsIgnoreCase(authType)) { LOGGER.verbose("IntelliJ Authentication => Device Code Authentication scheme detected in Azure Tools" + " for IntelliJ Plugin."); if (isADFSTenant()) { LOGGER.verbose("IntelliJ Authentication => The input tenant is detected to be ADFS and" + " the ADFS tenants are not supported via IntelliJ Authentication currently."); return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJCredential " + "authentication unavailable. ADFS tenant/authorities are not supported."))); } try { JsonNode intelliJCredentials = cacheAccessor.getDeviceCodeCredentials(); String refreshToken = intelliJCredentials.get("refreshToken").textValue(); RefreshTokenParameters.RefreshTokenParametersBuilder refreshTokenParametersBuilder = RefreshTokenParameters.builder(new HashSet<>(request.getScopes()), refreshToken); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); refreshTokenParametersBuilder.claims(customClaimRequest); } return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(refreshTokenParametersBuilder.build())) .map(MsalToken::new)); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, e)); } } else { LOGGER.verbose("IntelliJ Authentication = > Only Service Principal and Device Code Authentication" + " schemes are currently supported via IntelliJ Credential currently. Please ensure you used one" + " of those schemes from Azure Tools for IntelliJ plugin."); return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available." + " Please login with Azure Tools for IntelliJ plugin in the IDE."))); } } catch (IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with Azure CLI. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ /** * Asynchronously acquire a token from Active Directory with Azure Power Shell. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithAzurePowerShell(TokenRequestContext request) { List<CredentialUnavailableException> exceptions = new ArrayList<>(2); PowershellManager defaultPowerShellManager = new PowershellManager(Platform.isWindows() ? DEFAULT_WINDOWS_PS_EXECUTABLE : DEFAULT_LINUX_PS_EXECUTABLE); PowershellManager legacyPowerShellManager = Platform.isWindows() ? new PowershellManager(LEGACY_WINDOWS_PS_EXECUTABLE) : null; List<PowershellManager> powershellManagers = new ArrayList<>(2); powershellManagers.add(defaultPowerShellManager); if (legacyPowerShellManager != null) { powershellManagers.add(legacyPowerShellManager); } return Flux.fromIterable(powershellManagers) .flatMap(powershellManager -> getAccessTokenFromPowerShell(request, powershellManager) .onErrorResume(t -> { if (!t.getClass().getSimpleName().equals("CredentialUnavailableException")) { return Mono.error(new ClientAuthenticationException( "Azure Powershell authentication failed. Error Details: " + t.getMessage() + ". To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: null, t)); } exceptions.add((CredentialUnavailableException) t); return Mono.empty(); }), 1) .next() .switchIfEmpty(Mono.defer(() -> { CredentialUnavailableException last = exceptions.get(exceptions.size() - 1); for (int z = exceptions.size() - 2; z >= 0; z--) { CredentialUnavailableException current = exceptions.get(z); last = new CredentialUnavailableException("Azure PowerShell authentication failed using default" + "powershell(pwsh) with following error: " + current.getMessage() + "\r\n" + "Azure PowerShell authentication failed using powershell-core(powershell)" + " with following error: " + last.getMessage(), last.getCause()); } return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, (last))); })); } /** * Asynchronously acquire a token from Active Directory with Azure PowerShell. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithOBO(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> confidentialClient.acquireToken(OnBehalfOfParameters .builder(new HashSet<>(request.getScopes()), options.getUserAssertion()) .tenant(IdentityUtil.resolveTenantId(tenantId, request, options)) .build())) .map(MsalToken::new)); } private Mono<AccessToken> getAccessTokenFromPowerShell(TokenRequestContext request, PowershellManager powershellManager) { return powershellManager.initSession() .flatMap(manager -> { String azAccountsCommand = "Import-Module Az.Accounts -MinimumVersion 2.2.0 -PassThru"; return manager.runCommand(azAccountsCommand) .flatMap(output -> { if (output.contains("The specified module 'Az.Accounts' with version '2.2.0' was not loaded " + "because no valid module file")) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Az.Account module with version >= 2.2.0 is not installed. It needs to be installed to" + " use Azure PowerShell Credential."))); } LOGGER.verbose("Az.accounts module was found installed."); StringBuilder accessTokenCommand = new StringBuilder("Get-AzAccessToken -ResourceUrl "); accessTokenCommand.append(ScopeUtil.scopesToResource(request.getScopes())); accessTokenCommand.append(" | ConvertTo-Json"); String command = accessTokenCommand.toString(); LOGGER.verbose("Azure Powershell Authentication => Executing the command `%s` in Azure " + "Powershell to retrieve the Access Token.", accessTokenCommand); return manager.runCommand(accessTokenCommand.toString()) .flatMap(out -> { if (out.contains("Run Connect-AzAccount to login")) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Run Connect-AzAccount to login to Azure account in PowerShell."))); } try { LOGGER.verbose("Azure Powershell Authentication => Attempting to deserialize the " + "received response from Azure Powershell."); Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(out, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("Token"); String time = objectMap.get("ExpiresOn"); OffsetDateTime expiresOn = OffsetDateTime.parse(time) .withOffsetSameInstant(ZoneOffset.UTC); return Mono.just(new AccessToken(accessToken, expiresOn)); } catch (IOException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Encountered error when deserializing response from Azure Power Shell.", e))); } }); }); }).doFinally(ignored -> powershellManager.close()); } /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithConfidentialClient(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> { ClientCredentialParameters.ClientCredentialParametersBuilder builder = ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); return confidentialClient.acquireToken(builder.build()); } )).map(MsalToken::new); } private HttpPipeline setupPipeline(HttpClient httpClient) { List<HttpPipelinePolicy> policies = new ArrayList<>(); HttpLogOptions httpLogOptions = new HttpLogOptions(); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(new RetryPolicy()); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); return new HttpPipelineBuilder().httpClient(httpClient) .policies(policies.toArray(new HttpPipelinePolicy[0])).build(); } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> { UserNamePasswordParameters.UserNamePasswordParametersBuilder userNamePasswordParametersBuilder = UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray()); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest .formatAsClaimsRequest(request.getClaims()); userNamePasswordParametersBuilder.claims(customClaimRequest); } userNamePasswordParametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); return pc.acquireToken(userNamePasswordParametersBuilder.build()); } )).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with username and " + "password. To mitigate this issue, please refer to the troubleshooting guidelines " + "here at https: null, t)).map(MsalToken::new); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @param account the account used to log in to acquire the last token * @return a Publisher that emits an AccessToken */ @SuppressWarnings("deprecation") public Mono<MsalToken> authenticateWithPublicClientCache(TokenRequestContext request, IAccount account) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); parametersBuilder.forceRefresh(true); } if (account != null) { parametersBuilder = parametersBuilder.account(account); } parametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); try { return pc.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET))) .switchIfEmpty(Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder forceParametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())).forceRefresh(true); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest .formatAsClaimsRequest(request.getClaims()); forceParametersBuilder.claims(customClaimRequest); } if (account != null) { forceParametersBuilder = forceParametersBuilder.account(account); } forceParametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); try { return pc.acquireTokenSilently(forceParametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new))); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ @SuppressWarnings("deprecation") public Mono<AccessToken> authenticateWithConfidentialClientCache(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())) .tenant(IdentityUtil.resolveTenantId(tenantId, request, options)); try { return confidentialClient.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(ar -> (AccessToken) new MsalToken(ar)) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET)))); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return publicClientApplicationAccessor.getValue().flatMap(pc -> Mono.fromFuture(() -> { DeviceCodeFlowParameters.DeviceCodeFlowParametersBuilder parametersBuilder = DeviceCodeFlowParameters.builder( new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept( new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } return pc.acquireToken(parametersBuilder.build()); }).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with device code", null, t)) .map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with Visual Studio cached refresh token. * * @param request the details of the token request * @return a Publisher that emits an AccessToken. */ public Mono<MsalToken> authenticateWithVsCodeCredential(TokenRequestContext request, String cloud) { if (isADFSTenant()) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("VsCodeCredential " + "authentication unavailable. ADFS tenant/authorities are not supported. " + "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } VisualStudioCacheAccessor accessor = new VisualStudioCacheAccessor(); String credential = null; try { credential = accessor.getCredentials("VS Code Azure", cloud); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, e)); } RefreshTokenParameters.RefreshTokenParametersBuilder parametersBuilder = RefreshTokenParameters .builder(new HashSet<>(request.getScopes()), credential); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(parametersBuilder.build())) .onErrorResume(t -> { if (t instanceof MsalInteractionRequiredException) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("Failed to acquire token with" + " VS code credential." + " To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } return Mono.error(new ClientAuthenticationException("Failed to acquire token with" + " VS code credential", null, t)); }) .map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { AuthorizationCodeParameters.AuthorizationCodeParametersBuilder parametersBuilder = AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } Mono<IAuthenticationResult> acquireToken; if (clientSecret != null) { acquireToken = confidentialClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parametersBuilder.build()))); } else { acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parametersBuilder.build()))); } return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with authorization code", null, t)).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @param redirectUrl the redirect URL to listen on and receive security code * @param loginHint the username suggestion to pre-fill the login page's username/email address field * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, Integer port, String redirectUrl, String loginHint) { URI redirectUri; String redirect; if (port != null) { redirect = HTTP_LOCALHOST + ":" + port; } else if (redirectUrl != null) { redirect = redirectUrl; } else { redirect = HTTP_LOCALHOST; } try { redirectUri = new URI(redirect); } catch (URISyntaxException e) { return Mono.error(LOGGER.logExceptionAsError(new RuntimeException(e))); } InteractiveRequestParameters.InteractiveRequestParametersBuilder builder = InteractiveRequestParameters.builder(redirectUri) .scopes(new HashSet<>(request.getScopes())) .prompt(Prompt.SELECT_ACCOUNT) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); builder.claims(customClaimRequest); } if (loginHint != null) { builder.loginHint(loginHint); } Mono<IAuthenticationResult> acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(builder.build()))); return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with Interactive Browser Authentication.", null, t)).map(MsalToken::new); } /** * Gets token from shared token cache * */ public Mono<MsalToken> authenticateWithSharedTokenCache(TokenRequestContext request, String username) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.getAccounts()) .onErrorMap(t -> new CredentialUnavailableException( "Cannot get accounts from token cache. Error: " + t.getMessage(), t)) .flatMap(set -> { IAccount requestedAccount; Map<String, IAccount> accounts = new HashMap<>(); if (set.isEmpty()) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("SharedTokenCacheCredential " + "authentication unavailable. No accounts were found in the cache."))); } for (IAccount cached : set) { if (username == null || username.equals(cached.username())) { if (!accounts.containsKey(cached.homeAccountId())) { accounts.put(cached.homeAccountId(), cached); } } } if (accounts.isEmpty()) { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. No account matching the specified username: %s was " + "found in the cache.", username))); } else if (accounts.size() > 1) { if (username == null) { return Mono.error(new RuntimeException("SharedTokenCacheCredential authentication " + "unavailable. Multiple accounts were found in the cache. Use username and " + "tenant id to disambiguate.")); } else { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. Multiple accounts matching the specified username: " + "%s were found in the cache.", username))); } } else { requestedAccount = accounts.values().iterator().next(); } return authenticateWithPublicClientCache(request, requestedAccount); })); } /** * Asynchronously acquire a token from the Azure Arc Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToArcManagedIdentityEndpoint(String identityEndpoint, TokenRequestContext request) { return Mono.fromCallable(() -> { HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(ScopeUtil.scopesToResource(request.getScopes()), StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode("2019-11-01", StandardCharsets.UTF_8.name())); URL url = new URL(String.format("%s?%s", identityEndpoint, payload)); String secretKey = null; try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); } catch (IOException e) { if (connection == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Failed to initialize " + "Http URL connection to the endpoint.", null, e)); } int status = connection.getResponseCode(); if (status != 401) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException(String.format("Expected a 401" + " Unauthorized response from Azure Arc Managed Identity Endpoint, received: %d", status), null, e)); } String realm = connection.getHeaderField("WWW-Authenticate"); if (realm == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a value" + " for WWW-Authenticate header in the response from Azure Arc Managed Identity Endpoint", null)); } int separatorIndex = realm.indexOf("="); if (separatorIndex == -1) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a correct value" + " for WWW-Authenticate header in the response from Azure Arc Managed Identity Endpoint", null)); } String secretKeyPath = realm.substring(separatorIndex + 1); secretKey = new String(Files.readAllBytes(Paths.get(secretKeyPath)), StandardCharsets.UTF_8); } finally { if (connection != null) { connection.disconnect(); } } if (secretKey == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a secret value" + " in the response from Azure Arc Managed Identity Endpoint", null)); } try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Authorization", String.format("Basic %s", secretKey)); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner scanner = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = scanner.hasNext() ? scanner.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the Azure Arc Managed Service Identity endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithExchangeToken(TokenRequestContext request) { return clientAssertionAccessor.getValue() .flatMap(assertionToken -> Mono.fromCallable(() -> { String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId + "/oauth2/v2.0/token"; StringBuilder urlParametersBuilder = new StringBuilder(); urlParametersBuilder.append("client_assertion="); urlParametersBuilder.append(assertionToken); urlParametersBuilder.append("&client_assertion_type=urn:ietf:params:oauth:client-assertion-type" + ":jwt-bearer"); urlParametersBuilder.append("&client_id="); urlParametersBuilder.append(clientId); urlParametersBuilder.append("&grant_type=client_credentials"); urlParametersBuilder.append("&scope="); urlParametersBuilder.append(URLEncoder.encode(request.getScopes().get(0), StandardCharsets.UTF_8.name())); String urlParams = urlParametersBuilder.toString(); byte[] postData = urlParams.getBytes(StandardCharsets.UTF_8); int postDataLength = postData.length; HttpURLConnection connection = null; URL url = new URL(authorityUrl); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("POST"); connection.setRequestProperty("Content-Type", "application/x-www-form-urlencoded"); connection.setRequestProperty("Content-Length", Integer.toString(postDataLength)); connection.setDoOutput(true); try (DataOutputStream outputStream = new DataOutputStream(connection.getOutputStream())) { outputStream.write(postData); } connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } })); } /** * Asynchronously acquire a token from the Azure Service Fabric Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param identityHeader the identity header to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToServiceFabricManagedIdentityEndpoint(String identityEndpoint, String identityHeader, String thumbprint, TokenRequestContext request) { return Mono.fromCallable(() -> { HttpsURLConnection connection = null; String endpoint = identityEndpoint; String headerValue = identityHeader; String endpointVersion = SERVICE_FABRIC_MANAGED_IDENTITY_API_VERSION; String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode(endpointVersion, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } try { URL url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpsURLConnection) url.openConnection(); IdentitySslUtil.addTrustedCertificateThumbprint(connection, thumbprint, LOGGER); connection.setRequestMethod("GET"); if (headerValue != null) { connection.setRequestProperty("Secret", headerValue); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param identityHeader the identity header to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String identityEndpoint, String identityHeader, TokenRequestContext request) { return Mono.fromCallable(() -> { String endpoint; String headerValue; String endpointVersion; endpoint = identityEndpoint; headerValue = identityHeader; endpointVersion = IDENTITY_ENDPOINT_VERSION; String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode(endpointVersion, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } try { URL url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (headerValue != null) { if (IDENTITY_ENDPOINT_VERSION.equals(endpointVersion)) { connection.setRequestProperty("X-IDENTITY-HEADER", headerValue); } else { connection.setRequestProperty("Secret", headerValue); } } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", StandardCharsets.UTF_8.name())); payload.append("&resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } } catch (IOException exception) { return Mono.error(exception); } String endpoint = TRAILING_FORWARD_SLASHES.matcher(options.getImdsAuthorityHost()).replaceAll("") + IdentityConstants.DEFAULT_IMDS_TOKENPATH; return checkIMDSAvailable(endpoint).flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw LOGGER.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode; try { responseCode = connection.getResponseCode(); } catch (Exception e) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } if (responseCode == 400) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established.", null)); } if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw LOGGER.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw LOGGER.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); } private Mono<Boolean> checkIMDSAvailable(String endpoint) { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", StandardCharsets.UTF_8.name())); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("%s?%s", endpoint, payload)); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } catch (Exception e) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } private String getSafeWorkingDirectory() { if (isWindowsPlatform()) { if (CoreUtils.isNullOrEmpty(DEFAULT_WINDOWS_SYSTEM_ROOT)) { return null; } return DEFAULT_WINDOWS_SYSTEM_ROOT + "\\system32"; } else { return DEFAULT_MAC_LINUX_PATH; } } private boolean isWindowsPlatform() { return System.getProperty("os.name").contains("Windows"); } private String redactInfo(String input) { return ACCESS_TOKEN_PATTERN.matcher(input).replaceAll("****"); } void openUrl(String url) throws IOException { Runtime rt = Runtime.getRuntime(); String os = System.getProperty("os.name").toLowerCase(Locale.ROOT); if (os.contains("win")) { rt.exec("rundll32 url.dll,FileProtocolHandler " + url); } else if (os.contains("mac")) { rt.exec("open " + url); } else if (os.contains("nix") || os.contains("nux")) { rt.exec("xdg-open " + url); } else { LOGGER.error("Browser could not be opened - please open {} in a browser on this device.", url); } } private CompletableFuture<IAuthenticationResult> getFailedCompletableFuture(Exception e) { CompletableFuture<IAuthenticationResult> completableFuture = new CompletableFuture<>(); completableFuture.completeExceptionally(e); return completableFuture; } private void initializeHttpPipelineAdapter() { HttpPipeline httpPipeline = options.getHttpPipeline(); if (httpPipeline != null) { httpPipelineAdapter = new HttpPipelineAdapter(httpPipeline); } else { HttpClient httpClient = options.getHttpClient(); if (httpClient != null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(httpClient)); } else if (options.getProxyOptions() == null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(HttpClient.createDefault())); } } } /** * Get the configured tenant id. * * @return the tenant id. */ public String getTenantId() { return tenantId; } /** * Get the configured client id. * * @return the client id. */ public String getClientId() { return clientId; } /** * Get the configured identity client options. * * @return the client options. */ public IdentityClientOptions getIdentityClientOptions() { return options; } private boolean isADFSTenant() { return this.tenantId.equals(ADFS_TENANT); } private byte[] getCertificateBytes() throws IOException { if (certificatePath != null) { return Files.readAllBytes(Paths.get(certificatePath)); } else if (certificate != null) { ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); byte[] buffer = new byte[1024]; int read = certificate.read(buffer, 0, buffer.length); while (read != -1) { outputStream.write(buffer, 0, read); read = certificate.read(buffer, 0, buffer.length); } return outputStream.toByteArray(); } else { return new byte[0]; } } private InputStream getCertificateInputStream() throws IOException { if (certificatePath != null) { return new BufferedInputStream(new FileInputStream(certificatePath)); } else { return certificate; } } }
Use try-with-resources to remove a try/finally, also the finally block had a return which doesn't make sense
public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) { StringBuilder azCommand = new StringBuilder("az account get-access-token --output json --resource "); String scopes = ScopeUtil.scopesToResource(request.getScopes()); try { ScopeUtil.validateScope(scopes); } catch (IllegalArgumentException ex) { return Mono.error(LOGGER.logExceptionAsError(ex)); } azCommand.append(scopes); String tenant = IdentityUtil.resolveTenantId(null, request, options); if (!CoreUtils.isNullOrEmpty(tenant)) { azCommand.append("--tenant ").append(tenant); } AccessToken token; try { String starter; String switcher; if (isWindowsPlatform()) { starter = WINDOWS_STARTER; switcher = WINDOWS_SWITCHER; } else { starter = LINUX_MAC_STARTER; switcher = LINUX_MAC_SWITCHER; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, azCommand.toString()); String workingDirectory = getSafeWorkingDirectory(); if (workingDirectory != null) { builder.directory(new File(workingDirectory)); } else { throw LOGGER.logExceptionAsError(new IllegalStateException("A Safe Working directory could not be" + " found to execute CLI command from. To mitigate this issue, please refer to the troubleshooting " + " guidelines here at https: } builder.redirectErrorStream(true); Process process = builder.start(); StringBuilder output = new StringBuilder(); try (BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream(), StandardCharsets.UTF_8.name()))) { String line; while (true) { line = reader.readLine(); if (line == null) { break; } if (line.startsWith(WINDOWS_PROCESS_ERROR_MESSAGE) || LINUX_MAC_PROCESS_ERROR_MESSAGE.matcher(line).matches()) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "AzureCliCredential authentication unavailable. Azure CLI not installed." + "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } output.append(line); } } String processOutput = output.toString(); process.waitFor(10, TimeUnit.SECONDS); if (process.exitValue() != 0) { if (processOutput.length() > 0) { String redactedOutput = redactInfo(processOutput); if (redactedOutput.contains("az login") || redactedOutput.contains("az account set")) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "AzureCliCredential authentication unavailable." + " Please run 'az login' to set up account. To further mitigate this" + " issue, please refer to the troubleshooting guidelines here at " + "https: } throw LOGGER.logExceptionAsError(new ClientAuthenticationException(redactedOutput, null)); } else { throw LOGGER.logExceptionAsError( new ClientAuthenticationException("Failed to invoke Azure CLI ", null)); } } LOGGER.verbose("Azure CLI Authentication => A token response was received from Azure CLI, deserializing the" + " response into an Access Token."); Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("accessToken"); String time = objectMap.get("expiresOn"); String timeToSecond = time.substring(0, time.indexOf(".")); String timeJoinedWithT = String.join("T", timeToSecond.split(" ")); OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME) .atZone(ZoneId.systemDefault()) .toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC); token = new AccessToken(accessToken, expiresOn); } catch (IOException | InterruptedException e) { throw LOGGER.logExceptionAsError(new IllegalStateException(e)); } catch (RuntimeException e) { return Mono.error(e instanceof CredentialUnavailableException ? LoggingUtil.logCredentialUnavailableException(LOGGER, options, (CredentialUnavailableException) e) : LOGGER.logExceptionAsError(e)); } return Mono.just(token); }
try (BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream(),
public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) { StringBuilder azCommand = new StringBuilder("az account get-access-token --output json --resource "); String scopes = ScopeUtil.scopesToResource(request.getScopes()); try { ScopeUtil.validateScope(scopes); } catch (IllegalArgumentException ex) { return Mono.error(LOGGER.logExceptionAsError(ex)); } azCommand.append(scopes); String tenant = IdentityUtil.resolveTenantId(null, request, options); if (!CoreUtils.isNullOrEmpty(tenant)) { azCommand.append("--tenant ").append(tenant); } AccessToken token; try { String starter; String switcher; if (isWindowsPlatform()) { starter = WINDOWS_STARTER; switcher = WINDOWS_SWITCHER; } else { starter = LINUX_MAC_STARTER; switcher = LINUX_MAC_SWITCHER; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, azCommand.toString()); String workingDirectory = getSafeWorkingDirectory(); if (workingDirectory != null) { builder.directory(new File(workingDirectory)); } else { throw LOGGER.logExceptionAsError(new IllegalStateException("A Safe Working directory could not be" + " found to execute CLI command from. To mitigate this issue, please refer to the troubleshooting " + " guidelines here at https: } builder.redirectErrorStream(true); Process process = builder.start(); StringBuilder output = new StringBuilder(); try (BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream(), StandardCharsets.UTF_8.name()))) { String line; while (true) { line = reader.readLine(); if (line == null) { break; } if (line.startsWith(WINDOWS_PROCESS_ERROR_MESSAGE) || LINUX_MAC_PROCESS_ERROR_MESSAGE.matcher(line).matches()) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "AzureCliCredential authentication unavailable. Azure CLI not installed." + "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } output.append(line); } } String processOutput = output.toString(); process.waitFor(10, TimeUnit.SECONDS); if (process.exitValue() != 0) { if (processOutput.length() > 0) { String redactedOutput = redactInfo(processOutput); if (redactedOutput.contains("az login") || redactedOutput.contains("az account set")) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "AzureCliCredential authentication unavailable." + " Please run 'az login' to set up account. To further mitigate this" + " issue, please refer to the troubleshooting guidelines here at " + "https: } throw LOGGER.logExceptionAsError(new ClientAuthenticationException(redactedOutput, null)); } else { throw LOGGER.logExceptionAsError( new ClientAuthenticationException("Failed to invoke Azure CLI ", null)); } } LOGGER.verbose("Azure CLI Authentication => A token response was received from Azure CLI, deserializing the" + " response into an Access Token."); Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("accessToken"); String time = objectMap.get("expiresOn"); String timeToSecond = time.substring(0, time.indexOf(".")); String timeJoinedWithT = String.join("T", timeToSecond.split(" ")); OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME) .atZone(ZoneId.systemDefault()) .toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC); token = new AccessToken(accessToken, expiresOn); } catch (IOException | InterruptedException e) { throw LOGGER.logExceptionAsError(new IllegalStateException(e)); } catch (RuntimeException e) { return Mono.error(e instanceof CredentialUnavailableException ? LoggingUtil.logCredentialUnavailableException(LOGGER, options, (CredentialUnavailableException) e) : LOGGER.logExceptionAsError(e)); } return Mono.just(token); }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private static final String WINDOWS_STARTER = "cmd.exe"; private static final String LINUX_MAC_STARTER = "/bin/sh"; private static final String WINDOWS_SWITCHER = "/c"; private static final String LINUX_MAC_SWITCHER = "-c"; private static final String WINDOWS_PROCESS_ERROR_MESSAGE = "'az' is not recognized"; private static final Pattern LINUX_MAC_PROCESS_ERROR_MESSAGE = Pattern.compile("(.*)az:(.*)not found"); private static final String DEFAULT_WINDOWS_SYSTEM_ROOT = System.getenv("SystemRoot"); private static final String DEFAULT_WINDOWS_PS_EXECUTABLE = "pwsh.exe"; private static final String LEGACY_WINDOWS_PS_EXECUTABLE = "powershell.exe"; private static final String DEFAULT_LINUX_PS_EXECUTABLE = "pwsh"; private static final String DEFAULT_MAC_LINUX_PATH = "/bin/"; private static final Duration REFRESH_OFFSET = Duration.ofMinutes(5); private static final String IDENTITY_ENDPOINT_VERSION = "2019-08-01"; private static final String MSI_ENDPOINT_VERSION = "2017-09-01"; private static final String ADFS_TENANT = "adfs"; private static final String HTTP_LOCALHOST = "http: private static final String SERVICE_FABRIC_MANAGED_IDENTITY_API_VERSION = "2019-07-01-preview"; private static final ClientLogger LOGGER = new ClientLogger(IdentityClient.class); private static final Pattern ACCESS_TOKEN_PATTERN = Pattern.compile("\"accessToken\": \"(.*?)(\"|$)"); private static final Pattern TRAILING_FORWARD_SLASHES = Pattern.compile("/+$"); private final IdentityClientOptions options; private final String tenantId; private final String clientId; private final String resourceId; private final String clientSecret; private final String clientAssertionFilePath; private final InputStream certificate; private final String certificatePath; private final Supplier<String> clientAssertionSupplier; private final String certificatePassword; private HttpPipelineAdapter httpPipelineAdapter; private final SynchronizedAccessor<PublicClientApplication> publicClientApplicationAccessor; private final SynchronizedAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessor; private final SynchronizedAccessor<String> clientAssertionAccessor; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param clientSecret the client secret of the application. * @param resourceId the resource ID of the application * @param certificatePath the path to the PKCS12 or PEM certificate of the application. * @param certificate the PKCS12 or PEM certificate of the application. * @param certificatePassword the password protecting the PFX certificate. * @param isSharedTokenCacheCredential Indicate whether the credential is * {@link com.azure.identity.SharedTokenCacheCredential} or not. * @param clientAssertionTimeout the timeout to use for the client assertion. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, String clientSecret, String certificatePath, String clientAssertionFilePath, String resourceId, Supplier<String> clientAssertionSupplier, InputStream certificate, String certificatePassword, boolean isSharedTokenCacheCredential, Duration clientAssertionTimeout, IdentityClientOptions options) { if (tenantId == null) { tenantId = "organizations"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.resourceId = resourceId; this.clientSecret = clientSecret; this.clientAssertionFilePath = clientAssertionFilePath; this.certificatePath = certificatePath; this.certificate = certificate; this.certificatePassword = certificatePassword; this.clientAssertionSupplier = clientAssertionSupplier; this.options = options; this.publicClientApplicationAccessor = new SynchronizedAccessor<>(() -> getPublicClientApplication(isSharedTokenCacheCredential)); this.confidentialClientApplicationAccessor = new SynchronizedAccessor<>(() -> getConfidentialClientApplication()); this.clientAssertionAccessor = clientAssertionTimeout == null ? new SynchronizedAccessor<>(() -> parseClientAssertion(), Duration.ofMinutes(5)) : new SynchronizedAccessor<>(() -> parseClientAssertion(), clientAssertionTimeout); } private Mono<ConfidentialClientApplication> getConfidentialClientApplication() { return Mono.defer(() -> { if (clientId == null) { return Mono.error(LOGGER.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication."))); } String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId; IClientCredential credential; if (clientSecret != null) { credential = ClientCredentialFactory.createFromSecret(clientSecret); } else if (certificate != null || certificatePath != null) { try { if (certificatePassword == null) { byte[] pemCertificateBytes = getCertificateBytes(); List<X509Certificate> x509CertificateList = CertificateUtil.publicKeyFromPem(pemCertificateBytes); PrivateKey privateKey = CertificateUtil.privateKeyFromPem(pemCertificateBytes); if (x509CertificateList.size() == 1) { credential = ClientCredentialFactory.createFromCertificate( privateKey, x509CertificateList.get(0)); } else { credential = ClientCredentialFactory.createFromCertificateChain( privateKey, x509CertificateList); } } else { try (InputStream pfxCertificateStream = getCertificateInputStream()) { credential = ClientCredentialFactory.createFromCertificate(pfxCertificateStream, certificatePassword); } } } catch (IOException | GeneralSecurityException e) { return Mono.error(LOGGER.logExceptionAsError(new RuntimeException( "Failed to parse the certificate for the credential: " + e.getMessage(), e))); } } else if (clientAssertionSupplier != null) { credential = ClientCredentialFactory.createFromClientAssertion(clientAssertionSupplier.get()); } else { return Mono.error(LOGGER.logExceptionAsError( new IllegalArgumentException("Must provide client secret or client certificate path." + " To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, credential); try { applicationBuilder = applicationBuilder.authority(authorityUrl) .validateAuthority(options.getAuthorityValidation()); } catch (MalformedURLException e) { return Mono.error(LOGGER.logExceptionAsWarning(new IllegalStateException(e))); } applicationBuilder.sendX5c(options.isIncludeX5c()); initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } TokenCachePersistenceOptions tokenCachePersistenceOptions = options.getTokenCacheOptions(); PersistentTokenCacheImpl tokenCache = null; if (tokenCachePersistenceOptions != null) { try { tokenCache = new PersistentTokenCacheImpl() .setAllowUnencryptedStorage(tokenCachePersistenceOptions.isUnencryptedStorageAllowed()) .setName(tokenCachePersistenceOptions.getName()); applicationBuilder.setTokenCacheAccessAspect(tokenCache); } catch (Throwable t) { return Mono.error(LOGGER.logExceptionAsError(new ClientAuthenticationException( "Shared token cache is unavailable in this environment.", null, t))); } } if (options.getRegionalAuthority() != null) { if (options.getRegionalAuthority() == RegionalAuthority.AUTO_DISCOVER_REGION) { applicationBuilder.autoDetectRegion(true); } else { applicationBuilder.azureRegion(options.getRegionalAuthority().toString()); } } ConfidentialClientApplication confidentialClientApplication = applicationBuilder.build(); return tokenCache != null ? tokenCache.registerCache() .map(ignored -> confidentialClientApplication) : Mono.just(confidentialClientApplication); }); } private Mono<String> parseClientAssertion() { return Mono.fromCallable(() -> { if (clientAssertionFilePath != null) { byte[] encoded = Files.readAllBytes(Paths.get(clientAssertionFilePath)); return new String(encoded, StandardCharsets.UTF_8); } else { throw LOGGER.logExceptionAsError(new IllegalStateException( "Client Assertion File Path is not provided." + " It should be provided to authenticate with client assertion." )); } }); } private Mono<PublicClientApplication> getPublicClientApplication(boolean sharedTokenCacheCredential) { return Mono.defer(() -> { if (clientId == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication.")); } String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl) .validateAuthority(options.getAuthorityValidation()); } catch (MalformedURLException e) { throw LOGGER.logExceptionAsWarning(new IllegalStateException(e)); } initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { publicClientApplicationBuilder.httpClient(httpPipelineAdapter); } else { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { publicClientApplicationBuilder.executorService(options.getExecutorService()); } if (!options.isCp1Disabled()) { Set<String> set = new HashSet<>(1); set.add("CP1"); publicClientApplicationBuilder.clientCapabilities(set); } return Mono.just(publicClientApplicationBuilder); }).flatMap(builder -> { TokenCachePersistenceOptions tokenCachePersistenceOptions = options.getTokenCacheOptions(); PersistentTokenCacheImpl tokenCache = null; if (tokenCachePersistenceOptions != null) { try { tokenCache = new PersistentTokenCacheImpl() .setAllowUnencryptedStorage(tokenCachePersistenceOptions.isUnencryptedStorageAllowed()) .setName(tokenCachePersistenceOptions.getName()); builder.setTokenCacheAccessAspect(tokenCache); } catch (Throwable t) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException( "Shared token cache is unavailable in this environment.", null, t)); } } PublicClientApplication publicClientApplication = builder.build(); return tokenCache != null ? tokenCache.registerCache() .map(ignored -> publicClientApplication) : Mono.just(publicClientApplication); }); } public Mono<MsalToken> authenticateWithIntelliJ(TokenRequestContext request) { try { IntelliJCacheAccessor cacheAccessor = new IntelliJCacheAccessor(options.getIntelliJKeePassDatabasePath()); IntelliJAuthMethodDetails authDetails; try { authDetails = cacheAccessor.getAuthDetailsIfAvailable(); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available.", e))); } if (authDetails == null) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available." + " Please log in with Azure Tools for IntelliJ plugin in the IDE."))); } String authType = authDetails.getAuthMethod(); if ("SP".equalsIgnoreCase(authType)) { Map<String, String> spDetails = cacheAccessor .getIntellijServicePrincipalDetails(authDetails.getCredFilePath()); String authorityUrl = spDetails.get("authURL") + spDetails.get("tenant"); try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(spDetails.get("client"), ClientCredentialFactory.createFromSecret(spDetails.get("key"))) .authority(authorityUrl).validateAuthority(options.getAuthorityValidation()); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } } else if ("DC".equalsIgnoreCase(authType)) { LOGGER.verbose("IntelliJ Authentication => Device Code Authentication scheme detected in Azure Tools" + " for IntelliJ Plugin."); if (isADFSTenant()) { LOGGER.verbose("IntelliJ Authentication => The input tenant is detected to be ADFS and" + " the ADFS tenants are not supported via IntelliJ Authentication currently."); return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJCredential " + "authentication unavailable. ADFS tenant/authorities are not supported."))); } try { JsonNode intelliJCredentials = cacheAccessor.getDeviceCodeCredentials(); String refreshToken = intelliJCredentials.get("refreshToken").textValue(); RefreshTokenParameters.RefreshTokenParametersBuilder refreshTokenParametersBuilder = RefreshTokenParameters.builder(new HashSet<>(request.getScopes()), refreshToken); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); refreshTokenParametersBuilder.claims(customClaimRequest); } return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(refreshTokenParametersBuilder.build())) .map(MsalToken::new)); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, e)); } } else { LOGGER.verbose("IntelliJ Authentication = > Only Service Principal and Device Code Authentication" + " schemes are currently supported via IntelliJ Credential currently. Please ensure you used one" + " of those schemes from Azure Tools for IntelliJ plugin."); return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available." + " Please login with Azure Tools for IntelliJ plugin in the IDE."))); } } catch (IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with Azure CLI. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ /** * Asynchronously acquire a token from Active Directory with Azure Power Shell. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithAzurePowerShell(TokenRequestContext request) { List<CredentialUnavailableException> exceptions = new ArrayList<>(2); PowershellManager defaultPowerShellManager = new PowershellManager(Platform.isWindows() ? DEFAULT_WINDOWS_PS_EXECUTABLE : DEFAULT_LINUX_PS_EXECUTABLE); PowershellManager legacyPowerShellManager = Platform.isWindows() ? new PowershellManager(LEGACY_WINDOWS_PS_EXECUTABLE) : null; List<PowershellManager> powershellManagers = new ArrayList<>(2); powershellManagers.add(defaultPowerShellManager); if (legacyPowerShellManager != null) { powershellManagers.add(legacyPowerShellManager); } return Flux.fromIterable(powershellManagers) .flatMap(powershellManager -> getAccessTokenFromPowerShell(request, powershellManager) .onErrorResume(t -> { if (!t.getClass().getSimpleName().equals("CredentialUnavailableException")) { return Mono.error(new ClientAuthenticationException( "Azure Powershell authentication failed. Error Details: " + t.getMessage() + ". To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: null, t)); } exceptions.add((CredentialUnavailableException) t); return Mono.empty(); }), 1) .next() .switchIfEmpty(Mono.defer(() -> { CredentialUnavailableException last = exceptions.get(exceptions.size() - 1); for (int z = exceptions.size() - 2; z >= 0; z--) { CredentialUnavailableException current = exceptions.get(z); last = new CredentialUnavailableException("Azure PowerShell authentication failed using default" + "powershell(pwsh) with following error: " + current.getMessage() + "\r\n" + "Azure PowerShell authentication failed using powershell-core(powershell)" + " with following error: " + last.getMessage(), last.getCause()); } return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, (last))); })); } /** * Asynchronously acquire a token from Active Directory with Azure PowerShell. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithOBO(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> confidentialClient.acquireToken(OnBehalfOfParameters .builder(new HashSet<>(request.getScopes()), options.getUserAssertion()) .tenant(IdentityUtil.resolveTenantId(tenantId, request, options)) .build())) .map(MsalToken::new)); } private Mono<AccessToken> getAccessTokenFromPowerShell(TokenRequestContext request, PowershellManager powershellManager) { return powershellManager.initSession() .flatMap(manager -> { String azAccountsCommand = "Import-Module Az.Accounts -MinimumVersion 2.2.0 -PassThru"; return manager.runCommand(azAccountsCommand) .flatMap(output -> { if (output.contains("The specified module 'Az.Accounts' with version '2.2.0' was not loaded " + "because no valid module file")) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Az.Account module with version >= 2.2.0 is not installed. It needs to be installed to" + " use Azure PowerShell Credential."))); } LOGGER.verbose("Az.accounts module was found installed."); StringBuilder accessTokenCommand = new StringBuilder("Get-AzAccessToken -ResourceUrl "); accessTokenCommand.append(ScopeUtil.scopesToResource(request.getScopes())); accessTokenCommand.append(" | ConvertTo-Json"); String command = accessTokenCommand.toString(); LOGGER.verbose("Azure Powershell Authentication => Executing the command `%s` in Azure " + "Powershell to retrieve the Access Token.", accessTokenCommand); return manager.runCommand(accessTokenCommand.toString()) .flatMap(out -> { if (out.contains("Run Connect-AzAccount to login")) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Run Connect-AzAccount to login to Azure account in PowerShell."))); } try { LOGGER.verbose("Azure Powershell Authentication => Attempting to deserialize the " + "received response from Azure Powershell."); Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(out, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("Token"); String time = objectMap.get("ExpiresOn"); OffsetDateTime expiresOn = OffsetDateTime.parse(time) .withOffsetSameInstant(ZoneOffset.UTC); return Mono.just(new AccessToken(accessToken, expiresOn)); } catch (IOException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Encountered error when deserializing response from Azure Power Shell.", e))); } }); }); }).doFinally(ignored -> powershellManager.close()); } /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithConfidentialClient(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> { ClientCredentialParameters.ClientCredentialParametersBuilder builder = ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); return confidentialClient.acquireToken(builder.build()); } )).map(MsalToken::new); } private HttpPipeline setupPipeline(HttpClient httpClient) { List<HttpPipelinePolicy> policies = new ArrayList<>(); HttpLogOptions httpLogOptions = new HttpLogOptions(); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(new RetryPolicy()); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); return new HttpPipelineBuilder().httpClient(httpClient) .policies(policies.toArray(new HttpPipelinePolicy[0])).build(); } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> { UserNamePasswordParameters.UserNamePasswordParametersBuilder userNamePasswordParametersBuilder = UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray()); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest .formatAsClaimsRequest(request.getClaims()); userNamePasswordParametersBuilder.claims(customClaimRequest); } userNamePasswordParametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); return pc.acquireToken(userNamePasswordParametersBuilder.build()); } )).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with username and " + "password. To mitigate this issue, please refer to the troubleshooting guidelines " + "here at https: null, t)).map(MsalToken::new); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @param account the account used to log in to acquire the last token * @return a Publisher that emits an AccessToken */ @SuppressWarnings("deprecation") public Mono<MsalToken> authenticateWithPublicClientCache(TokenRequestContext request, IAccount account) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); parametersBuilder.forceRefresh(true); } if (account != null) { parametersBuilder = parametersBuilder.account(account); } parametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); try { return pc.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET))) .switchIfEmpty(Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder forceParametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())).forceRefresh(true); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest .formatAsClaimsRequest(request.getClaims()); forceParametersBuilder.claims(customClaimRequest); } if (account != null) { forceParametersBuilder = forceParametersBuilder.account(account); } forceParametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); try { return pc.acquireTokenSilently(forceParametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new))); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ @SuppressWarnings("deprecation") public Mono<AccessToken> authenticateWithConfidentialClientCache(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())) .tenant(IdentityUtil.resolveTenantId(tenantId, request, options)); try { return confidentialClient.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(ar -> (AccessToken) new MsalToken(ar)) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET)))); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return publicClientApplicationAccessor.getValue().flatMap(pc -> Mono.fromFuture(() -> { DeviceCodeFlowParameters.DeviceCodeFlowParametersBuilder parametersBuilder = DeviceCodeFlowParameters.builder( new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept( new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } return pc.acquireToken(parametersBuilder.build()); }).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with device code", null, t)) .map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with Visual Studio cached refresh token. * * @param request the details of the token request * @return a Publisher that emits an AccessToken. */ public Mono<MsalToken> authenticateWithVsCodeCredential(TokenRequestContext request, String cloud) { if (isADFSTenant()) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("VsCodeCredential " + "authentication unavailable. ADFS tenant/authorities are not supported. " + "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } VisualStudioCacheAccessor accessor = new VisualStudioCacheAccessor(); String credential = null; try { credential = accessor.getCredentials("VS Code Azure", cloud); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, e)); } RefreshTokenParameters.RefreshTokenParametersBuilder parametersBuilder = RefreshTokenParameters .builder(new HashSet<>(request.getScopes()), credential); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(parametersBuilder.build())) .onErrorResume(t -> { if (t instanceof MsalInteractionRequiredException) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("Failed to acquire token with" + " VS code credential." + " To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } return Mono.error(new ClientAuthenticationException("Failed to acquire token with" + " VS code credential", null, t)); }) .map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { AuthorizationCodeParameters.AuthorizationCodeParametersBuilder parametersBuilder = AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } Mono<IAuthenticationResult> acquireToken; if (clientSecret != null) { acquireToken = confidentialClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parametersBuilder.build()))); } else { acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parametersBuilder.build()))); } return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with authorization code", null, t)).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @param redirectUrl the redirect URL to listen on and receive security code * @param loginHint the username suggestion to pre-fill the login page's username/email address field * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, Integer port, String redirectUrl, String loginHint) { URI redirectUri; String redirect; if (port != null) { redirect = HTTP_LOCALHOST + ":" + port; } else if (redirectUrl != null) { redirect = redirectUrl; } else { redirect = HTTP_LOCALHOST; } try { redirectUri = new URI(redirect); } catch (URISyntaxException e) { return Mono.error(LOGGER.logExceptionAsError(new RuntimeException(e))); } InteractiveRequestParameters.InteractiveRequestParametersBuilder builder = InteractiveRequestParameters.builder(redirectUri) .scopes(new HashSet<>(request.getScopes())) .prompt(Prompt.SELECT_ACCOUNT) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); builder.claims(customClaimRequest); } if (loginHint != null) { builder.loginHint(loginHint); } Mono<IAuthenticationResult> acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(builder.build()))); return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with Interactive Browser Authentication.", null, t)).map(MsalToken::new); } /** * Gets token from shared token cache * */ public Mono<MsalToken> authenticateWithSharedTokenCache(TokenRequestContext request, String username) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.getAccounts()) .onErrorMap(t -> new CredentialUnavailableException( "Cannot get accounts from token cache. Error: " + t.getMessage(), t)) .flatMap(set -> { IAccount requestedAccount; Map<String, IAccount> accounts = new HashMap<>(); if (set.isEmpty()) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("SharedTokenCacheCredential " + "authentication unavailable. No accounts were found in the cache."))); } for (IAccount cached : set) { if (username == null || username.equals(cached.username())) { if (!accounts.containsKey(cached.homeAccountId())) { accounts.put(cached.homeAccountId(), cached); } } } if (accounts.isEmpty()) { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. No account matching the specified username: %s was " + "found in the cache.", username))); } else if (accounts.size() > 1) { if (username == null) { return Mono.error(new RuntimeException("SharedTokenCacheCredential authentication " + "unavailable. Multiple accounts were found in the cache. Use username and " + "tenant id to disambiguate.")); } else { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. Multiple accounts matching the specified username: " + "%s were found in the cache.", username))); } } else { requestedAccount = accounts.values().iterator().next(); } return authenticateWithPublicClientCache(request, requestedAccount); })); } /** * Asynchronously acquire a token from the Azure Arc Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToArcManagedIdentityEndpoint(String identityEndpoint, TokenRequestContext request) { return Mono.fromCallable(() -> { HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(ScopeUtil.scopesToResource(request.getScopes()), StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode("2019-11-01", StandardCharsets.UTF_8.name())); URL url = new URL(String.format("%s?%s", identityEndpoint, payload)); String secretKey = null; try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); } catch (IOException e) { if (connection == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Failed to initialize " + "Http URL connection to the endpoint.", null, e)); } int status = connection.getResponseCode(); if (status != 401) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException(String.format("Expected a 401" + " Unauthorized response from Azure Arc Managed Identity Endpoint, received: %d", status), null, e)); } String realm = connection.getHeaderField("WWW-Authenticate"); if (realm == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a value" + " for WWW-Authenticate header in the response from Azure Arc Managed Identity Endpoint", null)); } int separatorIndex = realm.indexOf("="); if (separatorIndex == -1) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a correct value" + " for WWW-Authenticate header in the response from Azure Arc Managed Identity Endpoint", null)); } String secretKeyPath = realm.substring(separatorIndex + 1); secretKey = new String(Files.readAllBytes(Paths.get(secretKeyPath)), StandardCharsets.UTF_8); } finally { if (connection != null) { connection.disconnect(); } } if (secretKey == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a secret value" + " in the response from Azure Arc Managed Identity Endpoint", null)); } try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Authorization", String.format("Basic %s", secretKey)); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner scanner = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = scanner.hasNext() ? scanner.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the Azure Arc Managed Service Identity endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithExchangeToken(TokenRequestContext request) { return clientAssertionAccessor.getValue() .flatMap(assertionToken -> Mono.fromCallable(() -> { String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId + "/oauth2/v2.0/token"; StringBuilder urlParametersBuilder = new StringBuilder(); urlParametersBuilder.append("client_assertion="); urlParametersBuilder.append(assertionToken); urlParametersBuilder.append("&client_assertion_type=urn:ietf:params:oauth:client-assertion-type" + ":jwt-bearer"); urlParametersBuilder.append("&client_id="); urlParametersBuilder.append(clientId); urlParametersBuilder.append("&grant_type=client_credentials"); urlParametersBuilder.append("&scope="); urlParametersBuilder.append(URLEncoder.encode(request.getScopes().get(0), StandardCharsets.UTF_8.name())); String urlParams = urlParametersBuilder.toString(); byte[] postData = urlParams.getBytes(StandardCharsets.UTF_8); int postDataLength = postData.length; HttpURLConnection connection = null; URL url = new URL(authorityUrl); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("POST"); connection.setRequestProperty("Content-Type", "application/x-www-form-urlencoded"); connection.setRequestProperty("Content-Length", Integer.toString(postDataLength)); connection.setDoOutput(true); try (DataOutputStream outputStream = new DataOutputStream(connection.getOutputStream())) { outputStream.write(postData); } connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } })); } /** * Asynchronously acquire a token from the Azure Service Fabric Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param identityHeader the identity header to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToServiceFabricManagedIdentityEndpoint(String identityEndpoint, String identityHeader, String thumbprint, TokenRequestContext request) { return Mono.fromCallable(() -> { HttpsURLConnection connection = null; String endpoint = identityEndpoint; String headerValue = identityHeader; String endpointVersion = SERVICE_FABRIC_MANAGED_IDENTITY_API_VERSION; String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode(endpointVersion, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } try { URL url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpsURLConnection) url.openConnection(); IdentitySslUtil.addTrustedCertificateThumbprint(connection, thumbprint, LOGGER); connection.setRequestMethod("GET"); if (headerValue != null) { connection.setRequestProperty("Secret", headerValue); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param identityHeader the identity header to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String identityEndpoint, String identityHeader, TokenRequestContext request) { return Mono.fromCallable(() -> { String endpoint; String headerValue; String endpointVersion; endpoint = identityEndpoint; headerValue = identityHeader; endpointVersion = IDENTITY_ENDPOINT_VERSION; String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode(endpointVersion, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } try { URL url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (headerValue != null) { if (IDENTITY_ENDPOINT_VERSION.equals(endpointVersion)) { connection.setRequestProperty("X-IDENTITY-HEADER", headerValue); } else { connection.setRequestProperty("Secret", headerValue); } } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", StandardCharsets.UTF_8.name())); payload.append("&resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } } catch (IOException exception) { return Mono.error(exception); } String endpoint = TRAILING_FORWARD_SLASHES.matcher(options.getImdsAuthorityHost()).replaceAll("") + IdentityConstants.DEFAULT_IMDS_TOKENPATH; return checkIMDSAvailable(endpoint).flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw LOGGER.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode; try { responseCode = connection.getResponseCode(); } catch (Exception e) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } if (responseCode == 400) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established.", null)); } if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw LOGGER.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw LOGGER.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); } private Mono<Boolean> checkIMDSAvailable(String endpoint) { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", StandardCharsets.UTF_8.name())); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("%s?%s", endpoint, payload)); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } catch (Exception e) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } private String getSafeWorkingDirectory() { if (isWindowsPlatform()) { if (CoreUtils.isNullOrEmpty(DEFAULT_WINDOWS_SYSTEM_ROOT)) { return null; } return DEFAULT_WINDOWS_SYSTEM_ROOT + "\\system32"; } else { return DEFAULT_MAC_LINUX_PATH; } } private boolean isWindowsPlatform() { return System.getProperty("os.name").contains("Windows"); } private String redactInfo(String input) { return ACCESS_TOKEN_PATTERN.matcher(input).replaceAll("****"); } void openUrl(String url) throws IOException { Runtime rt = Runtime.getRuntime(); String os = System.getProperty("os.name").toLowerCase(Locale.ROOT); if (os.contains("win")) { rt.exec("rundll32 url.dll,FileProtocolHandler " + url); } else if (os.contains("mac")) { rt.exec("open " + url); } else if (os.contains("nix") || os.contains("nux")) { rt.exec("xdg-open " + url); } else { LOGGER.error("Browser could not be opened - please open {} in a browser on this device.", url); } } private CompletableFuture<IAuthenticationResult> getFailedCompletableFuture(Exception e) { CompletableFuture<IAuthenticationResult> completableFuture = new CompletableFuture<>(); completableFuture.completeExceptionally(e); return completableFuture; } private void initializeHttpPipelineAdapter() { HttpPipeline httpPipeline = options.getHttpPipeline(); if (httpPipeline != null) { httpPipelineAdapter = new HttpPipelineAdapter(httpPipeline); } else { HttpClient httpClient = options.getHttpClient(); if (httpClient != null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(httpClient)); } else if (options.getProxyOptions() == null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(HttpClient.createDefault())); } } } /** * Get the configured tenant id. * * @return the tenant id. */ public String getTenantId() { return tenantId; } /** * Get the configured client id. * * @return the client id. */ public String getClientId() { return clientId; } /** * Get the configured identity client options. * * @return the client options. */ public IdentityClientOptions getIdentityClientOptions() { return options; } private boolean isADFSTenant() { return this.tenantId.equals(ADFS_TENANT); } private byte[] getCertificateBytes() throws IOException { if (certificatePath != null) { return Files.readAllBytes(Paths.get(certificatePath)); } else if (certificate != null) { ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); byte[] buffer = new byte[1024]; int read = certificate.read(buffer, 0, buffer.length); while (read != -1) { outputStream.write(buffer, 0, read); read = certificate.read(buffer, 0, buffer.length); } return outputStream.toByteArray(); } else { return new byte[0]; } } private InputStream getCertificateInputStream() throws IOException { if (certificatePath != null) { return new BufferedInputStream(new FileInputStream(certificatePath)); } else { return certificate; } } }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private static final String WINDOWS_STARTER = "cmd.exe"; private static final String LINUX_MAC_STARTER = "/bin/sh"; private static final String WINDOWS_SWITCHER = "/c"; private static final String LINUX_MAC_SWITCHER = "-c"; private static final String WINDOWS_PROCESS_ERROR_MESSAGE = "'az' is not recognized"; private static final Pattern LINUX_MAC_PROCESS_ERROR_MESSAGE = Pattern.compile("(.*)az:(.*)not found"); private static final String DEFAULT_WINDOWS_SYSTEM_ROOT = System.getenv("SystemRoot"); private static final String DEFAULT_WINDOWS_PS_EXECUTABLE = "pwsh.exe"; private static final String LEGACY_WINDOWS_PS_EXECUTABLE = "powershell.exe"; private static final String DEFAULT_LINUX_PS_EXECUTABLE = "pwsh"; private static final String DEFAULT_MAC_LINUX_PATH = "/bin/"; private static final Duration REFRESH_OFFSET = Duration.ofMinutes(5); private static final String IDENTITY_ENDPOINT_VERSION = "2019-08-01"; private static final String MSI_ENDPOINT_VERSION = "2017-09-01"; private static final String ADFS_TENANT = "adfs"; private static final String HTTP_LOCALHOST = "http: private static final String SERVICE_FABRIC_MANAGED_IDENTITY_API_VERSION = "2019-07-01-preview"; private static final ClientLogger LOGGER = new ClientLogger(IdentityClient.class); private static final Pattern ACCESS_TOKEN_PATTERN = Pattern.compile("\"accessToken\": \"(.*?)(\"|$)"); private static final Pattern TRAILING_FORWARD_SLASHES = Pattern.compile("/+$"); private final IdentityClientOptions options; private final String tenantId; private final String clientId; private final String resourceId; private final String clientSecret; private final String clientAssertionFilePath; private final InputStream certificate; private final String certificatePath; private final Supplier<String> clientAssertionSupplier; private final String certificatePassword; private HttpPipelineAdapter httpPipelineAdapter; private final SynchronizedAccessor<PublicClientApplication> publicClientApplicationAccessor; private final SynchronizedAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessor; private final SynchronizedAccessor<String> clientAssertionAccessor; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param clientSecret the client secret of the application. * @param resourceId the resource ID of the application * @param certificatePath the path to the PKCS12 or PEM certificate of the application. * @param certificate the PKCS12 or PEM certificate of the application. * @param certificatePassword the password protecting the PFX certificate. * @param isSharedTokenCacheCredential Indicate whether the credential is * {@link com.azure.identity.SharedTokenCacheCredential} or not. * @param clientAssertionTimeout the timeout to use for the client assertion. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, String clientSecret, String certificatePath, String clientAssertionFilePath, String resourceId, Supplier<String> clientAssertionSupplier, InputStream certificate, String certificatePassword, boolean isSharedTokenCacheCredential, Duration clientAssertionTimeout, IdentityClientOptions options) { if (tenantId == null) { tenantId = "organizations"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.resourceId = resourceId; this.clientSecret = clientSecret; this.clientAssertionFilePath = clientAssertionFilePath; this.certificatePath = certificatePath; this.certificate = certificate; this.certificatePassword = certificatePassword; this.clientAssertionSupplier = clientAssertionSupplier; this.options = options; this.publicClientApplicationAccessor = new SynchronizedAccessor<>(() -> getPublicClientApplication(isSharedTokenCacheCredential)); this.confidentialClientApplicationAccessor = new SynchronizedAccessor<>(() -> getConfidentialClientApplication()); this.clientAssertionAccessor = clientAssertionTimeout == null ? new SynchronizedAccessor<>(() -> parseClientAssertion(), Duration.ofMinutes(5)) : new SynchronizedAccessor<>(() -> parseClientAssertion(), clientAssertionTimeout); } private Mono<ConfidentialClientApplication> getConfidentialClientApplication() { return Mono.defer(() -> { if (clientId == null) { return Mono.error(LOGGER.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication."))); } String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId; IClientCredential credential; if (clientSecret != null) { credential = ClientCredentialFactory.createFromSecret(clientSecret); } else if (certificate != null || certificatePath != null) { try { if (certificatePassword == null) { byte[] pemCertificateBytes = getCertificateBytes(); List<X509Certificate> x509CertificateList = CertificateUtil.publicKeyFromPem(pemCertificateBytes); PrivateKey privateKey = CertificateUtil.privateKeyFromPem(pemCertificateBytes); if (x509CertificateList.size() == 1) { credential = ClientCredentialFactory.createFromCertificate( privateKey, x509CertificateList.get(0)); } else { credential = ClientCredentialFactory.createFromCertificateChain( privateKey, x509CertificateList); } } else { try (InputStream pfxCertificateStream = getCertificateInputStream()) { credential = ClientCredentialFactory.createFromCertificate(pfxCertificateStream, certificatePassword); } } } catch (IOException | GeneralSecurityException e) { return Mono.error(LOGGER.logExceptionAsError(new RuntimeException( "Failed to parse the certificate for the credential: " + e.getMessage(), e))); } } else if (clientAssertionSupplier != null) { credential = ClientCredentialFactory.createFromClientAssertion(clientAssertionSupplier.get()); } else { return Mono.error(LOGGER.logExceptionAsError( new IllegalArgumentException("Must provide client secret or client certificate path." + " To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, credential); try { applicationBuilder = applicationBuilder.authority(authorityUrl) .validateAuthority(options.getAuthorityValidation()); } catch (MalformedURLException e) { return Mono.error(LOGGER.logExceptionAsWarning(new IllegalStateException(e))); } applicationBuilder.sendX5c(options.isIncludeX5c()); initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } TokenCachePersistenceOptions tokenCachePersistenceOptions = options.getTokenCacheOptions(); PersistentTokenCacheImpl tokenCache = null; if (tokenCachePersistenceOptions != null) { try { tokenCache = new PersistentTokenCacheImpl() .setAllowUnencryptedStorage(tokenCachePersistenceOptions.isUnencryptedStorageAllowed()) .setName(tokenCachePersistenceOptions.getName()); applicationBuilder.setTokenCacheAccessAspect(tokenCache); } catch (Throwable t) { return Mono.error(LOGGER.logExceptionAsError(new ClientAuthenticationException( "Shared token cache is unavailable in this environment.", null, t))); } } if (options.getRegionalAuthority() != null) { if (options.getRegionalAuthority() == RegionalAuthority.AUTO_DISCOVER_REGION) { applicationBuilder.autoDetectRegion(true); } else { applicationBuilder.azureRegion(options.getRegionalAuthority().toString()); } } ConfidentialClientApplication confidentialClientApplication = applicationBuilder.build(); return tokenCache != null ? tokenCache.registerCache() .map(ignored -> confidentialClientApplication) : Mono.just(confidentialClientApplication); }); } private Mono<String> parseClientAssertion() { return Mono.fromCallable(() -> { if (clientAssertionFilePath != null) { byte[] encoded = Files.readAllBytes(Paths.get(clientAssertionFilePath)); return new String(encoded, StandardCharsets.UTF_8); } else { throw LOGGER.logExceptionAsError(new IllegalStateException( "Client Assertion File Path is not provided." + " It should be provided to authenticate with client assertion." )); } }); } private Mono<PublicClientApplication> getPublicClientApplication(boolean sharedTokenCacheCredential) { return Mono.defer(() -> { if (clientId == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication.")); } String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl) .validateAuthority(options.getAuthorityValidation()); } catch (MalformedURLException e) { throw LOGGER.logExceptionAsWarning(new IllegalStateException(e)); } initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { publicClientApplicationBuilder.httpClient(httpPipelineAdapter); } else { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { publicClientApplicationBuilder.executorService(options.getExecutorService()); } if (!options.isCp1Disabled()) { Set<String> set = new HashSet<>(1); set.add("CP1"); publicClientApplicationBuilder.clientCapabilities(set); } return Mono.just(publicClientApplicationBuilder); }).flatMap(builder -> { TokenCachePersistenceOptions tokenCachePersistenceOptions = options.getTokenCacheOptions(); PersistentTokenCacheImpl tokenCache = null; if (tokenCachePersistenceOptions != null) { try { tokenCache = new PersistentTokenCacheImpl() .setAllowUnencryptedStorage(tokenCachePersistenceOptions.isUnencryptedStorageAllowed()) .setName(tokenCachePersistenceOptions.getName()); builder.setTokenCacheAccessAspect(tokenCache); } catch (Throwable t) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException( "Shared token cache is unavailable in this environment.", null, t)); } } PublicClientApplication publicClientApplication = builder.build(); return tokenCache != null ? tokenCache.registerCache() .map(ignored -> publicClientApplication) : Mono.just(publicClientApplication); }); } public Mono<MsalToken> authenticateWithIntelliJ(TokenRequestContext request) { try { IntelliJCacheAccessor cacheAccessor = new IntelliJCacheAccessor(options.getIntelliJKeePassDatabasePath()); IntelliJAuthMethodDetails authDetails; try { authDetails = cacheAccessor.getAuthDetailsIfAvailable(); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available.", e))); } if (authDetails == null) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available." + " Please log in with Azure Tools for IntelliJ plugin in the IDE."))); } String authType = authDetails.getAuthMethod(); if ("SP".equalsIgnoreCase(authType)) { Map<String, String> spDetails = cacheAccessor .getIntellijServicePrincipalDetails(authDetails.getCredFilePath()); String authorityUrl = spDetails.get("authURL") + spDetails.get("tenant"); try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(spDetails.get("client"), ClientCredentialFactory.createFromSecret(spDetails.get("key"))) .authority(authorityUrl).validateAuthority(options.getAuthorityValidation()); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } } else if ("DC".equalsIgnoreCase(authType)) { LOGGER.verbose("IntelliJ Authentication => Device Code Authentication scheme detected in Azure Tools" + " for IntelliJ Plugin."); if (isADFSTenant()) { LOGGER.verbose("IntelliJ Authentication => The input tenant is detected to be ADFS and" + " the ADFS tenants are not supported via IntelliJ Authentication currently."); return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJCredential " + "authentication unavailable. ADFS tenant/authorities are not supported."))); } try { JsonNode intelliJCredentials = cacheAccessor.getDeviceCodeCredentials(); String refreshToken = intelliJCredentials.get("refreshToken").textValue(); RefreshTokenParameters.RefreshTokenParametersBuilder refreshTokenParametersBuilder = RefreshTokenParameters.builder(new HashSet<>(request.getScopes()), refreshToken); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); refreshTokenParametersBuilder.claims(customClaimRequest); } return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(refreshTokenParametersBuilder.build())) .map(MsalToken::new)); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, e)); } } else { LOGGER.verbose("IntelliJ Authentication = > Only Service Principal and Device Code Authentication" + " schemes are currently supported via IntelliJ Credential currently. Please ensure you used one" + " of those schemes from Azure Tools for IntelliJ plugin."); return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available." + " Please login with Azure Tools for IntelliJ plugin in the IDE."))); } } catch (IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with Azure CLI. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ /** * Asynchronously acquire a token from Active Directory with Azure Power Shell. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithAzurePowerShell(TokenRequestContext request) { List<CredentialUnavailableException> exceptions = new ArrayList<>(2); PowershellManager defaultPowerShellManager = new PowershellManager(Platform.isWindows() ? DEFAULT_WINDOWS_PS_EXECUTABLE : DEFAULT_LINUX_PS_EXECUTABLE); PowershellManager legacyPowerShellManager = Platform.isWindows() ? new PowershellManager(LEGACY_WINDOWS_PS_EXECUTABLE) : null; List<PowershellManager> powershellManagers = new ArrayList<>(2); powershellManagers.add(defaultPowerShellManager); if (legacyPowerShellManager != null) { powershellManagers.add(legacyPowerShellManager); } return Flux.fromIterable(powershellManagers) .flatMap(powershellManager -> getAccessTokenFromPowerShell(request, powershellManager) .onErrorResume(t -> { if (!t.getClass().getSimpleName().equals("CredentialUnavailableException")) { return Mono.error(new ClientAuthenticationException( "Azure Powershell authentication failed. Error Details: " + t.getMessage() + ". To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: null, t)); } exceptions.add((CredentialUnavailableException) t); return Mono.empty(); }), 1) .next() .switchIfEmpty(Mono.defer(() -> { CredentialUnavailableException last = exceptions.get(exceptions.size() - 1); for (int z = exceptions.size() - 2; z >= 0; z--) { CredentialUnavailableException current = exceptions.get(z); last = new CredentialUnavailableException("Azure PowerShell authentication failed using default" + "powershell(pwsh) with following error: " + current.getMessage() + "\r\n" + "Azure PowerShell authentication failed using powershell-core(powershell)" + " with following error: " + last.getMessage(), last.getCause()); } return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, (last))); })); } /** * Asynchronously acquire a token from Active Directory with Azure PowerShell. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithOBO(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> confidentialClient.acquireToken(OnBehalfOfParameters .builder(new HashSet<>(request.getScopes()), options.getUserAssertion()) .tenant(IdentityUtil.resolveTenantId(tenantId, request, options)) .build())) .map(MsalToken::new)); } private Mono<AccessToken> getAccessTokenFromPowerShell(TokenRequestContext request, PowershellManager powershellManager) { return powershellManager.initSession() .flatMap(manager -> { String azAccountsCommand = "Import-Module Az.Accounts -MinimumVersion 2.2.0 -PassThru"; return manager.runCommand(azAccountsCommand) .flatMap(output -> { if (output.contains("The specified module 'Az.Accounts' with version '2.2.0' was not loaded " + "because no valid module file")) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Az.Account module with version >= 2.2.0 is not installed. It needs to be installed to" + " use Azure PowerShell Credential."))); } LOGGER.verbose("Az.accounts module was found installed."); StringBuilder accessTokenCommand = new StringBuilder("Get-AzAccessToken -ResourceUrl "); accessTokenCommand.append(ScopeUtil.scopesToResource(request.getScopes())); accessTokenCommand.append(" | ConvertTo-Json"); String command = accessTokenCommand.toString(); LOGGER.verbose("Azure Powershell Authentication => Executing the command `%s` in Azure " + "Powershell to retrieve the Access Token.", accessTokenCommand); return manager.runCommand(accessTokenCommand.toString()) .flatMap(out -> { if (out.contains("Run Connect-AzAccount to login")) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Run Connect-AzAccount to login to Azure account in PowerShell."))); } try { LOGGER.verbose("Azure Powershell Authentication => Attempting to deserialize the " + "received response from Azure Powershell."); Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(out, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("Token"); String time = objectMap.get("ExpiresOn"); OffsetDateTime expiresOn = OffsetDateTime.parse(time) .withOffsetSameInstant(ZoneOffset.UTC); return Mono.just(new AccessToken(accessToken, expiresOn)); } catch (IOException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Encountered error when deserializing response from Azure Power Shell.", e))); } }); }); }).doFinally(ignored -> powershellManager.close()); } /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithConfidentialClient(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> { ClientCredentialParameters.ClientCredentialParametersBuilder builder = ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); return confidentialClient.acquireToken(builder.build()); } )).map(MsalToken::new); } private HttpPipeline setupPipeline(HttpClient httpClient) { List<HttpPipelinePolicy> policies = new ArrayList<>(); HttpLogOptions httpLogOptions = new HttpLogOptions(); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(new RetryPolicy()); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); return new HttpPipelineBuilder().httpClient(httpClient) .policies(policies.toArray(new HttpPipelinePolicy[0])).build(); } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> { UserNamePasswordParameters.UserNamePasswordParametersBuilder userNamePasswordParametersBuilder = UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray()); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest .formatAsClaimsRequest(request.getClaims()); userNamePasswordParametersBuilder.claims(customClaimRequest); } userNamePasswordParametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); return pc.acquireToken(userNamePasswordParametersBuilder.build()); } )).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with username and " + "password. To mitigate this issue, please refer to the troubleshooting guidelines " + "here at https: null, t)).map(MsalToken::new); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @param account the account used to log in to acquire the last token * @return a Publisher that emits an AccessToken */ @SuppressWarnings("deprecation") public Mono<MsalToken> authenticateWithPublicClientCache(TokenRequestContext request, IAccount account) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); parametersBuilder.forceRefresh(true); } if (account != null) { parametersBuilder = parametersBuilder.account(account); } parametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); try { return pc.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET))) .switchIfEmpty(Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder forceParametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())).forceRefresh(true); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest .formatAsClaimsRequest(request.getClaims()); forceParametersBuilder.claims(customClaimRequest); } if (account != null) { forceParametersBuilder = forceParametersBuilder.account(account); } forceParametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); try { return pc.acquireTokenSilently(forceParametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new))); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ @SuppressWarnings("deprecation") public Mono<AccessToken> authenticateWithConfidentialClientCache(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())) .tenant(IdentityUtil.resolveTenantId(tenantId, request, options)); try { return confidentialClient.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(ar -> (AccessToken) new MsalToken(ar)) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET)))); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return publicClientApplicationAccessor.getValue().flatMap(pc -> Mono.fromFuture(() -> { DeviceCodeFlowParameters.DeviceCodeFlowParametersBuilder parametersBuilder = DeviceCodeFlowParameters.builder( new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept( new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } return pc.acquireToken(parametersBuilder.build()); }).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with device code", null, t)) .map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with Visual Studio cached refresh token. * * @param request the details of the token request * @return a Publisher that emits an AccessToken. */ public Mono<MsalToken> authenticateWithVsCodeCredential(TokenRequestContext request, String cloud) { if (isADFSTenant()) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("VsCodeCredential " + "authentication unavailable. ADFS tenant/authorities are not supported. " + "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } VisualStudioCacheAccessor accessor = new VisualStudioCacheAccessor(); String credential = null; try { credential = accessor.getCredentials("VS Code Azure", cloud); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, e)); } RefreshTokenParameters.RefreshTokenParametersBuilder parametersBuilder = RefreshTokenParameters .builder(new HashSet<>(request.getScopes()), credential); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(parametersBuilder.build())) .onErrorResume(t -> { if (t instanceof MsalInteractionRequiredException) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("Failed to acquire token with" + " VS code credential." + " To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } return Mono.error(new ClientAuthenticationException("Failed to acquire token with" + " VS code credential", null, t)); }) .map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { AuthorizationCodeParameters.AuthorizationCodeParametersBuilder parametersBuilder = AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } Mono<IAuthenticationResult> acquireToken; if (clientSecret != null) { acquireToken = confidentialClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parametersBuilder.build()))); } else { acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parametersBuilder.build()))); } return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with authorization code", null, t)).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @param redirectUrl the redirect URL to listen on and receive security code * @param loginHint the username suggestion to pre-fill the login page's username/email address field * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, Integer port, String redirectUrl, String loginHint) { URI redirectUri; String redirect; if (port != null) { redirect = HTTP_LOCALHOST + ":" + port; } else if (redirectUrl != null) { redirect = redirectUrl; } else { redirect = HTTP_LOCALHOST; } try { redirectUri = new URI(redirect); } catch (URISyntaxException e) { return Mono.error(LOGGER.logExceptionAsError(new RuntimeException(e))); } InteractiveRequestParameters.InteractiveRequestParametersBuilder builder = InteractiveRequestParameters.builder(redirectUri) .scopes(new HashSet<>(request.getScopes())) .prompt(Prompt.SELECT_ACCOUNT) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); builder.claims(customClaimRequest); } if (loginHint != null) { builder.loginHint(loginHint); } Mono<IAuthenticationResult> acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(builder.build()))); return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with Interactive Browser Authentication.", null, t)).map(MsalToken::new); } /** * Gets token from shared token cache * */ public Mono<MsalToken> authenticateWithSharedTokenCache(TokenRequestContext request, String username) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.getAccounts()) .onErrorMap(t -> new CredentialUnavailableException( "Cannot get accounts from token cache. Error: " + t.getMessage(), t)) .flatMap(set -> { IAccount requestedAccount; Map<String, IAccount> accounts = new HashMap<>(); if (set.isEmpty()) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("SharedTokenCacheCredential " + "authentication unavailable. No accounts were found in the cache."))); } for (IAccount cached : set) { if (username == null || username.equals(cached.username())) { if (!accounts.containsKey(cached.homeAccountId())) { accounts.put(cached.homeAccountId(), cached); } } } if (accounts.isEmpty()) { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. No account matching the specified username: %s was " + "found in the cache.", username))); } else if (accounts.size() > 1) { if (username == null) { return Mono.error(new RuntimeException("SharedTokenCacheCredential authentication " + "unavailable. Multiple accounts were found in the cache. Use username and " + "tenant id to disambiguate.")); } else { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. Multiple accounts matching the specified username: " + "%s were found in the cache.", username))); } } else { requestedAccount = accounts.values().iterator().next(); } return authenticateWithPublicClientCache(request, requestedAccount); })); } /** * Asynchronously acquire a token from the Azure Arc Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToArcManagedIdentityEndpoint(String identityEndpoint, TokenRequestContext request) { return Mono.fromCallable(() -> { HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(ScopeUtil.scopesToResource(request.getScopes()), StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode("2019-11-01", StandardCharsets.UTF_8.name())); URL url = new URL(String.format("%s?%s", identityEndpoint, payload)); String secretKey = null; try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); } catch (IOException e) { if (connection == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Failed to initialize " + "Http URL connection to the endpoint.", null, e)); } int status = connection.getResponseCode(); if (status != 401) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException(String.format("Expected a 401" + " Unauthorized response from Azure Arc Managed Identity Endpoint, received: %d", status), null, e)); } String realm = connection.getHeaderField("WWW-Authenticate"); if (realm == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a value" + " for WWW-Authenticate header in the response from Azure Arc Managed Identity Endpoint", null)); } int separatorIndex = realm.indexOf("="); if (separatorIndex == -1) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a correct value" + " for WWW-Authenticate header in the response from Azure Arc Managed Identity Endpoint", null)); } String secretKeyPath = realm.substring(separatorIndex + 1); secretKey = new String(Files.readAllBytes(Paths.get(secretKeyPath)), StandardCharsets.UTF_8); } finally { if (connection != null) { connection.disconnect(); } } if (secretKey == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a secret value" + " in the response from Azure Arc Managed Identity Endpoint", null)); } try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Authorization", String.format("Basic %s", secretKey)); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner scanner = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = scanner.hasNext() ? scanner.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the Azure Arc Managed Service Identity endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithExchangeToken(TokenRequestContext request) { return clientAssertionAccessor.getValue() .flatMap(assertionToken -> Mono.fromCallable(() -> { String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId + "/oauth2/v2.0/token"; StringBuilder urlParametersBuilder = new StringBuilder(); urlParametersBuilder.append("client_assertion="); urlParametersBuilder.append(assertionToken); urlParametersBuilder.append("&client_assertion_type=urn:ietf:params:oauth:client-assertion-type" + ":jwt-bearer"); urlParametersBuilder.append("&client_id="); urlParametersBuilder.append(clientId); urlParametersBuilder.append("&grant_type=client_credentials"); urlParametersBuilder.append("&scope="); urlParametersBuilder.append(URLEncoder.encode(request.getScopes().get(0), StandardCharsets.UTF_8.name())); String urlParams = urlParametersBuilder.toString(); byte[] postData = urlParams.getBytes(StandardCharsets.UTF_8); int postDataLength = postData.length; HttpURLConnection connection = null; URL url = new URL(authorityUrl); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("POST"); connection.setRequestProperty("Content-Type", "application/x-www-form-urlencoded"); connection.setRequestProperty("Content-Length", Integer.toString(postDataLength)); connection.setDoOutput(true); try (DataOutputStream outputStream = new DataOutputStream(connection.getOutputStream())) { outputStream.write(postData); } connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } })); } /** * Asynchronously acquire a token from the Azure Service Fabric Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param identityHeader the identity header to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToServiceFabricManagedIdentityEndpoint(String identityEndpoint, String identityHeader, String thumbprint, TokenRequestContext request) { return Mono.fromCallable(() -> { HttpsURLConnection connection = null; String endpoint = identityEndpoint; String headerValue = identityHeader; String endpointVersion = SERVICE_FABRIC_MANAGED_IDENTITY_API_VERSION; String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode(endpointVersion, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } try { URL url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpsURLConnection) url.openConnection(); IdentitySslUtil.addTrustedCertificateThumbprint(connection, thumbprint, LOGGER); connection.setRequestMethod("GET"); if (headerValue != null) { connection.setRequestProperty("Secret", headerValue); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param identityHeader the identity header to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String identityEndpoint, String identityHeader, TokenRequestContext request) { return Mono.fromCallable(() -> { String endpoint; String headerValue; String endpointVersion; endpoint = identityEndpoint; headerValue = identityHeader; endpointVersion = IDENTITY_ENDPOINT_VERSION; String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode(endpointVersion, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } try { URL url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (headerValue != null) { if (IDENTITY_ENDPOINT_VERSION.equals(endpointVersion)) { connection.setRequestProperty("X-IDENTITY-HEADER", headerValue); } else { connection.setRequestProperty("Secret", headerValue); } } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", StandardCharsets.UTF_8.name())); payload.append("&resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } } catch (IOException exception) { return Mono.error(exception); } String endpoint = TRAILING_FORWARD_SLASHES.matcher(options.getImdsAuthorityHost()).replaceAll("") + IdentityConstants.DEFAULT_IMDS_TOKENPATH; return checkIMDSAvailable(endpoint).flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw LOGGER.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode; try { responseCode = connection.getResponseCode(); } catch (Exception e) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } if (responseCode == 400) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established.", null)); } if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw LOGGER.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw LOGGER.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); } private Mono<Boolean> checkIMDSAvailable(String endpoint) { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", StandardCharsets.UTF_8.name())); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("%s?%s", endpoint, payload)); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } catch (Exception e) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } private String getSafeWorkingDirectory() { if (isWindowsPlatform()) { if (CoreUtils.isNullOrEmpty(DEFAULT_WINDOWS_SYSTEM_ROOT)) { return null; } return DEFAULT_WINDOWS_SYSTEM_ROOT + "\\system32"; } else { return DEFAULT_MAC_LINUX_PATH; } } private boolean isWindowsPlatform() { return System.getProperty("os.name").contains("Windows"); } private String redactInfo(String input) { return ACCESS_TOKEN_PATTERN.matcher(input).replaceAll("****"); } void openUrl(String url) throws IOException { Runtime rt = Runtime.getRuntime(); String os = System.getProperty("os.name").toLowerCase(Locale.ROOT); if (os.contains("win")) { rt.exec("rundll32 url.dll,FileProtocolHandler " + url); } else if (os.contains("mac")) { rt.exec("open " + url); } else if (os.contains("nix") || os.contains("nux")) { rt.exec("xdg-open " + url); } else { LOGGER.error("Browser could not be opened - please open {} in a browser on this device.", url); } } private CompletableFuture<IAuthenticationResult> getFailedCompletableFuture(Exception e) { CompletableFuture<IAuthenticationResult> completableFuture = new CompletableFuture<>(); completableFuture.completeExceptionally(e); return completableFuture; } private void initializeHttpPipelineAdapter() { HttpPipeline httpPipeline = options.getHttpPipeline(); if (httpPipeline != null) { httpPipelineAdapter = new HttpPipelineAdapter(httpPipeline); } else { HttpClient httpClient = options.getHttpClient(); if (httpClient != null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(httpClient)); } else if (options.getProxyOptions() == null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(HttpClient.createDefault())); } } } /** * Get the configured tenant id. * * @return the tenant id. */ public String getTenantId() { return tenantId; } /** * Get the configured client id. * * @return the client id. */ public String getClientId() { return clientId; } /** * Get the configured identity client options. * * @return the client options. */ public IdentityClientOptions getIdentityClientOptions() { return options; } private boolean isADFSTenant() { return this.tenantId.equals(ADFS_TENANT); } private byte[] getCertificateBytes() throws IOException { if (certificatePath != null) { return Files.readAllBytes(Paths.get(certificatePath)); } else if (certificate != null) { ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); byte[] buffer = new byte[1024]; int read = certificate.read(buffer, 0, buffer.length); while (read != -1) { outputStream.write(buffer, 0, read); read = certificate.read(buffer, 0, buffer.length); } return outputStream.toByteArray(); } else { return new byte[0]; } } private InputStream getCertificateInputStream() throws IOException { if (certificatePath != null) { return new BufferedInputStream(new FileInputStream(certificatePath)); } else { return certificate; } } }
Long term we should look into replacing simple String.formats with string concatenation, in most cases it will be easier to read but also be better performance wise as String.format needs to parse the formattable string to determine replacement locations and their handling
public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", StandardCharsets.UTF_8.name())); payload.append("&resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } } catch (IOException exception) { return Mono.error(exception); } String endpoint = TRAILING_FORWARD_SLASHES.matcher(options.getImdsAuthorityHost()).replaceAll("") + IdentityConstants.DEFAULT_IMDS_TOKENPATH; return checkIMDSAvailable(endpoint).flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw LOGGER.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode; try { responseCode = connection.getResponseCode(); } catch (Exception e) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } if (responseCode == 400) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established.", null)); } if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw LOGGER.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw LOGGER.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); }
url = new URL(String.format("%s?%s", endpoint, payload));
public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", StandardCharsets.UTF_8.name())); payload.append("&resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } } catch (IOException exception) { return Mono.error(exception); } String endpoint = TRAILING_FORWARD_SLASHES.matcher(options.getImdsAuthorityHost()).replaceAll("") + IdentityConstants.DEFAULT_IMDS_TOKENPATH; return checkIMDSAvailable(endpoint).flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw LOGGER.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode; try { responseCode = connection.getResponseCode(); } catch (Exception e) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } if (responseCode == 400) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established.", null)); } if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw LOGGER.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw LOGGER.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private static final String WINDOWS_STARTER = "cmd.exe"; private static final String LINUX_MAC_STARTER = "/bin/sh"; private static final String WINDOWS_SWITCHER = "/c"; private static final String LINUX_MAC_SWITCHER = "-c"; private static final String WINDOWS_PROCESS_ERROR_MESSAGE = "'az' is not recognized"; private static final Pattern LINUX_MAC_PROCESS_ERROR_MESSAGE = Pattern.compile("(.*)az:(.*)not found"); private static final String DEFAULT_WINDOWS_SYSTEM_ROOT = System.getenv("SystemRoot"); private static final String DEFAULT_WINDOWS_PS_EXECUTABLE = "pwsh.exe"; private static final String LEGACY_WINDOWS_PS_EXECUTABLE = "powershell.exe"; private static final String DEFAULT_LINUX_PS_EXECUTABLE = "pwsh"; private static final String DEFAULT_MAC_LINUX_PATH = "/bin/"; private static final Duration REFRESH_OFFSET = Duration.ofMinutes(5); private static final String IDENTITY_ENDPOINT_VERSION = "2019-08-01"; private static final String MSI_ENDPOINT_VERSION = "2017-09-01"; private static final String ADFS_TENANT = "adfs"; private static final String HTTP_LOCALHOST = "http: private static final String SERVICE_FABRIC_MANAGED_IDENTITY_API_VERSION = "2019-07-01-preview"; private static final ClientLogger LOGGER = new ClientLogger(IdentityClient.class); private static final Pattern ACCESS_TOKEN_PATTERN = Pattern.compile("\"accessToken\": \"(.*?)(\"|$)"); private static final Pattern TRAILING_FORWARD_SLASHES = Pattern.compile("/+$"); private final IdentityClientOptions options; private final String tenantId; private final String clientId; private final String resourceId; private final String clientSecret; private final String clientAssertionFilePath; private final InputStream certificate; private final String certificatePath; private final Supplier<String> clientAssertionSupplier; private final String certificatePassword; private HttpPipelineAdapter httpPipelineAdapter; private final SynchronizedAccessor<PublicClientApplication> publicClientApplicationAccessor; private final SynchronizedAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessor; private final SynchronizedAccessor<String> clientAssertionAccessor; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param clientSecret the client secret of the application. * @param resourceId the resource ID of the application * @param certificatePath the path to the PKCS12 or PEM certificate of the application. * @param certificate the PKCS12 or PEM certificate of the application. * @param certificatePassword the password protecting the PFX certificate. * @param isSharedTokenCacheCredential Indicate whether the credential is * {@link com.azure.identity.SharedTokenCacheCredential} or not. * @param clientAssertionTimeout the timeout to use for the client assertion. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, String clientSecret, String certificatePath, String clientAssertionFilePath, String resourceId, Supplier<String> clientAssertionSupplier, InputStream certificate, String certificatePassword, boolean isSharedTokenCacheCredential, Duration clientAssertionTimeout, IdentityClientOptions options) { if (tenantId == null) { tenantId = "organizations"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.resourceId = resourceId; this.clientSecret = clientSecret; this.clientAssertionFilePath = clientAssertionFilePath; this.certificatePath = certificatePath; this.certificate = certificate; this.certificatePassword = certificatePassword; this.clientAssertionSupplier = clientAssertionSupplier; this.options = options; this.publicClientApplicationAccessor = new SynchronizedAccessor<>(() -> getPublicClientApplication(isSharedTokenCacheCredential)); this.confidentialClientApplicationAccessor = new SynchronizedAccessor<>(() -> getConfidentialClientApplication()); this.clientAssertionAccessor = clientAssertionTimeout == null ? new SynchronizedAccessor<>(() -> parseClientAssertion(), Duration.ofMinutes(5)) : new SynchronizedAccessor<>(() -> parseClientAssertion(), clientAssertionTimeout); } private Mono<ConfidentialClientApplication> getConfidentialClientApplication() { return Mono.defer(() -> { if (clientId == null) { return Mono.error(LOGGER.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication."))); } String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId; IClientCredential credential; if (clientSecret != null) { credential = ClientCredentialFactory.createFromSecret(clientSecret); } else if (certificate != null || certificatePath != null) { try { if (certificatePassword == null) { byte[] pemCertificateBytes = getCertificateBytes(); List<X509Certificate> x509CertificateList = CertificateUtil.publicKeyFromPem(pemCertificateBytes); PrivateKey privateKey = CertificateUtil.privateKeyFromPem(pemCertificateBytes); if (x509CertificateList.size() == 1) { credential = ClientCredentialFactory.createFromCertificate( privateKey, x509CertificateList.get(0)); } else { credential = ClientCredentialFactory.createFromCertificateChain( privateKey, x509CertificateList); } } else { try (InputStream pfxCertificateStream = getCertificateInputStream()) { credential = ClientCredentialFactory.createFromCertificate(pfxCertificateStream, certificatePassword); } } } catch (IOException | GeneralSecurityException e) { return Mono.error(LOGGER.logExceptionAsError(new RuntimeException( "Failed to parse the certificate for the credential: " + e.getMessage(), e))); } } else if (clientAssertionSupplier != null) { credential = ClientCredentialFactory.createFromClientAssertion(clientAssertionSupplier.get()); } else { return Mono.error(LOGGER.logExceptionAsError( new IllegalArgumentException("Must provide client secret or client certificate path." + " To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, credential); try { applicationBuilder = applicationBuilder.authority(authorityUrl) .validateAuthority(options.getAuthorityValidation()); } catch (MalformedURLException e) { return Mono.error(LOGGER.logExceptionAsWarning(new IllegalStateException(e))); } applicationBuilder.sendX5c(options.isIncludeX5c()); initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } TokenCachePersistenceOptions tokenCachePersistenceOptions = options.getTokenCacheOptions(); PersistentTokenCacheImpl tokenCache = null; if (tokenCachePersistenceOptions != null) { try { tokenCache = new PersistentTokenCacheImpl() .setAllowUnencryptedStorage(tokenCachePersistenceOptions.isUnencryptedStorageAllowed()) .setName(tokenCachePersistenceOptions.getName()); applicationBuilder.setTokenCacheAccessAspect(tokenCache); } catch (Throwable t) { return Mono.error(LOGGER.logExceptionAsError(new ClientAuthenticationException( "Shared token cache is unavailable in this environment.", null, t))); } } if (options.getRegionalAuthority() != null) { if (options.getRegionalAuthority() == RegionalAuthority.AUTO_DISCOVER_REGION) { applicationBuilder.autoDetectRegion(true); } else { applicationBuilder.azureRegion(options.getRegionalAuthority().toString()); } } ConfidentialClientApplication confidentialClientApplication = applicationBuilder.build(); return tokenCache != null ? tokenCache.registerCache() .map(ignored -> confidentialClientApplication) : Mono.just(confidentialClientApplication); }); } private Mono<String> parseClientAssertion() { return Mono.fromCallable(() -> { if (clientAssertionFilePath != null) { byte[] encoded = Files.readAllBytes(Paths.get(clientAssertionFilePath)); return new String(encoded, StandardCharsets.UTF_8); } else { throw LOGGER.logExceptionAsError(new IllegalStateException( "Client Assertion File Path is not provided." + " It should be provided to authenticate with client assertion." )); } }); } private Mono<PublicClientApplication> getPublicClientApplication(boolean sharedTokenCacheCredential) { return Mono.defer(() -> { if (clientId == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication.")); } String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl) .validateAuthority(options.getAuthorityValidation()); } catch (MalformedURLException e) { throw LOGGER.logExceptionAsWarning(new IllegalStateException(e)); } initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { publicClientApplicationBuilder.httpClient(httpPipelineAdapter); } else { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { publicClientApplicationBuilder.executorService(options.getExecutorService()); } if (!options.isCp1Disabled()) { Set<String> set = new HashSet<>(1); set.add("CP1"); publicClientApplicationBuilder.clientCapabilities(set); } return Mono.just(publicClientApplicationBuilder); }).flatMap(builder -> { TokenCachePersistenceOptions tokenCachePersistenceOptions = options.getTokenCacheOptions(); PersistentTokenCacheImpl tokenCache = null; if (tokenCachePersistenceOptions != null) { try { tokenCache = new PersistentTokenCacheImpl() .setAllowUnencryptedStorage(tokenCachePersistenceOptions.isUnencryptedStorageAllowed()) .setName(tokenCachePersistenceOptions.getName()); builder.setTokenCacheAccessAspect(tokenCache); } catch (Throwable t) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException( "Shared token cache is unavailable in this environment.", null, t)); } } PublicClientApplication publicClientApplication = builder.build(); return tokenCache != null ? tokenCache.registerCache() .map(ignored -> publicClientApplication) : Mono.just(publicClientApplication); }); } public Mono<MsalToken> authenticateWithIntelliJ(TokenRequestContext request) { try { IntelliJCacheAccessor cacheAccessor = new IntelliJCacheAccessor(options.getIntelliJKeePassDatabasePath()); IntelliJAuthMethodDetails authDetails; try { authDetails = cacheAccessor.getAuthDetailsIfAvailable(); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available.", e))); } if (authDetails == null) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available." + " Please log in with Azure Tools for IntelliJ plugin in the IDE."))); } String authType = authDetails.getAuthMethod(); if ("SP".equalsIgnoreCase(authType)) { Map<String, String> spDetails = cacheAccessor .getIntellijServicePrincipalDetails(authDetails.getCredFilePath()); String authorityUrl = spDetails.get("authURL") + spDetails.get("tenant"); try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(spDetails.get("client"), ClientCredentialFactory.createFromSecret(spDetails.get("key"))) .authority(authorityUrl).validateAuthority(options.getAuthorityValidation()); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } } else if ("DC".equalsIgnoreCase(authType)) { LOGGER.verbose("IntelliJ Authentication => Device Code Authentication scheme detected in Azure Tools" + " for IntelliJ Plugin."); if (isADFSTenant()) { LOGGER.verbose("IntelliJ Authentication => The input tenant is detected to be ADFS and" + " the ADFS tenants are not supported via IntelliJ Authentication currently."); return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJCredential " + "authentication unavailable. ADFS tenant/authorities are not supported."))); } try { JsonNode intelliJCredentials = cacheAccessor.getDeviceCodeCredentials(); String refreshToken = intelliJCredentials.get("refreshToken").textValue(); RefreshTokenParameters.RefreshTokenParametersBuilder refreshTokenParametersBuilder = RefreshTokenParameters.builder(new HashSet<>(request.getScopes()), refreshToken); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); refreshTokenParametersBuilder.claims(customClaimRequest); } return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(refreshTokenParametersBuilder.build())) .map(MsalToken::new)); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, e)); } } else { LOGGER.verbose("IntelliJ Authentication = > Only Service Principal and Device Code Authentication" + " schemes are currently supported via IntelliJ Credential currently. Please ensure you used one" + " of those schemes from Azure Tools for IntelliJ plugin."); return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available." + " Please login with Azure Tools for IntelliJ plugin in the IDE."))); } } catch (IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with Azure CLI. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) { StringBuilder azCommand = new StringBuilder("az account get-access-token --output json --resource "); String scopes = ScopeUtil.scopesToResource(request.getScopes()); try { ScopeUtil.validateScope(scopes); } catch (IllegalArgumentException ex) { return Mono.error(LOGGER.logExceptionAsError(ex)); } azCommand.append(scopes); String tenant = IdentityUtil.resolveTenantId(null, request, options); if (!CoreUtils.isNullOrEmpty(tenant)) { azCommand.append("--tenant ").append(tenant); } AccessToken token; try { String starter; String switcher; if (isWindowsPlatform()) { starter = WINDOWS_STARTER; switcher = WINDOWS_SWITCHER; } else { starter = LINUX_MAC_STARTER; switcher = LINUX_MAC_SWITCHER; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, azCommand.toString()); String workingDirectory = getSafeWorkingDirectory(); if (workingDirectory != null) { builder.directory(new File(workingDirectory)); } else { throw LOGGER.logExceptionAsError(new IllegalStateException("A Safe Working directory could not be" + " found to execute CLI command from. To mitigate this issue, please refer to the troubleshooting " + " guidelines here at https: } builder.redirectErrorStream(true); Process process = builder.start(); StringBuilder output = new StringBuilder(); try (BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream(), StandardCharsets.UTF_8.name()))) { String line; while (true) { line = reader.readLine(); if (line == null) { break; } if (line.startsWith(WINDOWS_PROCESS_ERROR_MESSAGE) || LINUX_MAC_PROCESS_ERROR_MESSAGE.matcher(line).matches()) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "AzureCliCredential authentication unavailable. Azure CLI not installed." + "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } output.append(line); } } String processOutput = output.toString(); process.waitFor(10, TimeUnit.SECONDS); if (process.exitValue() != 0) { if (processOutput.length() > 0) { String redactedOutput = redactInfo(processOutput); if (redactedOutput.contains("az login") || redactedOutput.contains("az account set")) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "AzureCliCredential authentication unavailable." + " Please run 'az login' to set up account. To further mitigate this" + " issue, please refer to the troubleshooting guidelines here at " + "https: } throw LOGGER.logExceptionAsError(new ClientAuthenticationException(redactedOutput, null)); } else { throw LOGGER.logExceptionAsError( new ClientAuthenticationException("Failed to invoke Azure CLI ", null)); } } LOGGER.verbose("Azure CLI Authentication => A token response was received from Azure CLI, deserializing the" + " response into an Access Token."); Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("accessToken"); String time = objectMap.get("expiresOn"); String timeToSecond = time.substring(0, time.indexOf(".")); String timeJoinedWithT = String.join("T", timeToSecond.split(" ")); OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME) .atZone(ZoneId.systemDefault()) .toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC); token = new AccessToken(accessToken, expiresOn); } catch (IOException | InterruptedException e) { throw LOGGER.logExceptionAsError(new IllegalStateException(e)); } catch (RuntimeException e) { return Mono.error(e instanceof CredentialUnavailableException ? LoggingUtil.logCredentialUnavailableException(LOGGER, options, (CredentialUnavailableException) e) : LOGGER.logExceptionAsError(e)); } return Mono.just(token); } /** * Asynchronously acquire a token from Active Directory with Azure Power Shell. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithAzurePowerShell(TokenRequestContext request) { List<CredentialUnavailableException> exceptions = new ArrayList<>(2); PowershellManager defaultPowerShellManager = new PowershellManager(Platform.isWindows() ? DEFAULT_WINDOWS_PS_EXECUTABLE : DEFAULT_LINUX_PS_EXECUTABLE); PowershellManager legacyPowerShellManager = Platform.isWindows() ? new PowershellManager(LEGACY_WINDOWS_PS_EXECUTABLE) : null; List<PowershellManager> powershellManagers = new ArrayList<>(2); powershellManagers.add(defaultPowerShellManager); if (legacyPowerShellManager != null) { powershellManagers.add(legacyPowerShellManager); } return Flux.fromIterable(powershellManagers) .flatMap(powershellManager -> getAccessTokenFromPowerShell(request, powershellManager) .onErrorResume(t -> { if (!t.getClass().getSimpleName().equals("CredentialUnavailableException")) { return Mono.error(new ClientAuthenticationException( "Azure Powershell authentication failed. Error Details: " + t.getMessage() + ". To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: null, t)); } exceptions.add((CredentialUnavailableException) t); return Mono.empty(); }), 1) .next() .switchIfEmpty(Mono.defer(() -> { CredentialUnavailableException last = exceptions.get(exceptions.size() - 1); for (int z = exceptions.size() - 2; z >= 0; z--) { CredentialUnavailableException current = exceptions.get(z); last = new CredentialUnavailableException("Azure PowerShell authentication failed using default" + "powershell(pwsh) with following error: " + current.getMessage() + "\r\n" + "Azure PowerShell authentication failed using powershell-core(powershell)" + " with following error: " + last.getMessage(), last.getCause()); } return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, (last))); })); } /** * Asynchronously acquire a token from Active Directory with Azure PowerShell. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithOBO(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> confidentialClient.acquireToken(OnBehalfOfParameters .builder(new HashSet<>(request.getScopes()), options.getUserAssertion()) .tenant(IdentityUtil.resolveTenantId(tenantId, request, options)) .build())) .map(MsalToken::new)); } private Mono<AccessToken> getAccessTokenFromPowerShell(TokenRequestContext request, PowershellManager powershellManager) { return powershellManager.initSession() .flatMap(manager -> { String azAccountsCommand = "Import-Module Az.Accounts -MinimumVersion 2.2.0 -PassThru"; return manager.runCommand(azAccountsCommand) .flatMap(output -> { if (output.contains("The specified module 'Az.Accounts' with version '2.2.0' was not loaded " + "because no valid module file")) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Az.Account module with version >= 2.2.0 is not installed. It needs to be installed to" + " use Azure PowerShell Credential."))); } LOGGER.verbose("Az.accounts module was found installed."); StringBuilder accessTokenCommand = new StringBuilder("Get-AzAccessToken -ResourceUrl "); accessTokenCommand.append(ScopeUtil.scopesToResource(request.getScopes())); accessTokenCommand.append(" | ConvertTo-Json"); String command = accessTokenCommand.toString(); LOGGER.verbose("Azure Powershell Authentication => Executing the command `%s` in Azure " + "Powershell to retrieve the Access Token.", accessTokenCommand); return manager.runCommand(accessTokenCommand.toString()) .flatMap(out -> { if (out.contains("Run Connect-AzAccount to login")) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Run Connect-AzAccount to login to Azure account in PowerShell."))); } try { LOGGER.verbose("Azure Powershell Authentication => Attempting to deserialize the " + "received response from Azure Powershell."); Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(out, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("Token"); String time = objectMap.get("ExpiresOn"); OffsetDateTime expiresOn = OffsetDateTime.parse(time) .withOffsetSameInstant(ZoneOffset.UTC); return Mono.just(new AccessToken(accessToken, expiresOn)); } catch (IOException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Encountered error when deserializing response from Azure Power Shell.", e))); } }); }); }).doFinally(ignored -> powershellManager.close()); } /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithConfidentialClient(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> { ClientCredentialParameters.ClientCredentialParametersBuilder builder = ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); return confidentialClient.acquireToken(builder.build()); } )).map(MsalToken::new); } private HttpPipeline setupPipeline(HttpClient httpClient) { List<HttpPipelinePolicy> policies = new ArrayList<>(); HttpLogOptions httpLogOptions = new HttpLogOptions(); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(new RetryPolicy()); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); return new HttpPipelineBuilder().httpClient(httpClient) .policies(policies.toArray(new HttpPipelinePolicy[0])).build(); } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> { UserNamePasswordParameters.UserNamePasswordParametersBuilder userNamePasswordParametersBuilder = UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray()); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest .formatAsClaimsRequest(request.getClaims()); userNamePasswordParametersBuilder.claims(customClaimRequest); } userNamePasswordParametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); return pc.acquireToken(userNamePasswordParametersBuilder.build()); } )).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with username and " + "password. To mitigate this issue, please refer to the troubleshooting guidelines " + "here at https: null, t)).map(MsalToken::new); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @param account the account used to log in to acquire the last token * @return a Publisher that emits an AccessToken */ @SuppressWarnings("deprecation") public Mono<MsalToken> authenticateWithPublicClientCache(TokenRequestContext request, IAccount account) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); parametersBuilder.forceRefresh(true); } if (account != null) { parametersBuilder = parametersBuilder.account(account); } parametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); try { return pc.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET))) .switchIfEmpty(Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder forceParametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())).forceRefresh(true); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest .formatAsClaimsRequest(request.getClaims()); forceParametersBuilder.claims(customClaimRequest); } if (account != null) { forceParametersBuilder = forceParametersBuilder.account(account); } forceParametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); try { return pc.acquireTokenSilently(forceParametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new))); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ @SuppressWarnings("deprecation") public Mono<AccessToken> authenticateWithConfidentialClientCache(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())) .tenant(IdentityUtil.resolveTenantId(tenantId, request, options)); try { return confidentialClient.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(ar -> (AccessToken) new MsalToken(ar)) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET)))); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return publicClientApplicationAccessor.getValue().flatMap(pc -> Mono.fromFuture(() -> { DeviceCodeFlowParameters.DeviceCodeFlowParametersBuilder parametersBuilder = DeviceCodeFlowParameters.builder( new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept( new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } return pc.acquireToken(parametersBuilder.build()); }).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with device code", null, t)) .map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with Visual Studio cached refresh token. * * @param request the details of the token request * @return a Publisher that emits an AccessToken. */ public Mono<MsalToken> authenticateWithVsCodeCredential(TokenRequestContext request, String cloud) { if (isADFSTenant()) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("VsCodeCredential " + "authentication unavailable. ADFS tenant/authorities are not supported. " + "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } VisualStudioCacheAccessor accessor = new VisualStudioCacheAccessor(); String credential = null; try { credential = accessor.getCredentials("VS Code Azure", cloud); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, e)); } RefreshTokenParameters.RefreshTokenParametersBuilder parametersBuilder = RefreshTokenParameters .builder(new HashSet<>(request.getScopes()), credential); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(parametersBuilder.build())) .onErrorResume(t -> { if (t instanceof MsalInteractionRequiredException) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("Failed to acquire token with" + " VS code credential." + " To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } return Mono.error(new ClientAuthenticationException("Failed to acquire token with" + " VS code credential", null, t)); }) .map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { AuthorizationCodeParameters.AuthorizationCodeParametersBuilder parametersBuilder = AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } Mono<IAuthenticationResult> acquireToken; if (clientSecret != null) { acquireToken = confidentialClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parametersBuilder.build()))); } else { acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parametersBuilder.build()))); } return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with authorization code", null, t)).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @param redirectUrl the redirect URL to listen on and receive security code * @param loginHint the username suggestion to pre-fill the login page's username/email address field * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, Integer port, String redirectUrl, String loginHint) { URI redirectUri; String redirect; if (port != null) { redirect = HTTP_LOCALHOST + ":" + port; } else if (redirectUrl != null) { redirect = redirectUrl; } else { redirect = HTTP_LOCALHOST; } try { redirectUri = new URI(redirect); } catch (URISyntaxException e) { return Mono.error(LOGGER.logExceptionAsError(new RuntimeException(e))); } InteractiveRequestParameters.InteractiveRequestParametersBuilder builder = InteractiveRequestParameters.builder(redirectUri) .scopes(new HashSet<>(request.getScopes())) .prompt(Prompt.SELECT_ACCOUNT) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); builder.claims(customClaimRequest); } if (loginHint != null) { builder.loginHint(loginHint); } Mono<IAuthenticationResult> acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(builder.build()))); return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with Interactive Browser Authentication.", null, t)).map(MsalToken::new); } /** * Gets token from shared token cache * */ public Mono<MsalToken> authenticateWithSharedTokenCache(TokenRequestContext request, String username) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.getAccounts()) .onErrorMap(t -> new CredentialUnavailableException( "Cannot get accounts from token cache. Error: " + t.getMessage(), t)) .flatMap(set -> { IAccount requestedAccount; Map<String, IAccount> accounts = new HashMap<>(); if (set.isEmpty()) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("SharedTokenCacheCredential " + "authentication unavailable. No accounts were found in the cache."))); } for (IAccount cached : set) { if (username == null || username.equals(cached.username())) { if (!accounts.containsKey(cached.homeAccountId())) { accounts.put(cached.homeAccountId(), cached); } } } if (accounts.isEmpty()) { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. No account matching the specified username: %s was " + "found in the cache.", username))); } else if (accounts.size() > 1) { if (username == null) { return Mono.error(new RuntimeException("SharedTokenCacheCredential authentication " + "unavailable. Multiple accounts were found in the cache. Use username and " + "tenant id to disambiguate.")); } else { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. Multiple accounts matching the specified username: " + "%s were found in the cache.", username))); } } else { requestedAccount = accounts.values().iterator().next(); } return authenticateWithPublicClientCache(request, requestedAccount); })); } /** * Asynchronously acquire a token from the Azure Arc Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToArcManagedIdentityEndpoint(String identityEndpoint, TokenRequestContext request) { return Mono.fromCallable(() -> { HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(ScopeUtil.scopesToResource(request.getScopes()), StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode("2019-11-01", StandardCharsets.UTF_8.name())); URL url = new URL(String.format("%s?%s", identityEndpoint, payload)); String secretKey = null; try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); } catch (IOException e) { if (connection == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Failed to initialize " + "Http URL connection to the endpoint.", null, e)); } int status = connection.getResponseCode(); if (status != 401) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException(String.format("Expected a 401" + " Unauthorized response from Azure Arc Managed Identity Endpoint, received: %d", status), null, e)); } String realm = connection.getHeaderField("WWW-Authenticate"); if (realm == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a value" + " for WWW-Authenticate header in the response from Azure Arc Managed Identity Endpoint", null)); } int separatorIndex = realm.indexOf("="); if (separatorIndex == -1) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a correct value" + " for WWW-Authenticate header in the response from Azure Arc Managed Identity Endpoint", null)); } String secretKeyPath = realm.substring(separatorIndex + 1); secretKey = new String(Files.readAllBytes(Paths.get(secretKeyPath)), StandardCharsets.UTF_8); } finally { if (connection != null) { connection.disconnect(); } } if (secretKey == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a secret value" + " in the response from Azure Arc Managed Identity Endpoint", null)); } try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Authorization", String.format("Basic %s", secretKey)); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner scanner = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = scanner.hasNext() ? scanner.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the Azure Arc Managed Service Identity endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithExchangeToken(TokenRequestContext request) { return clientAssertionAccessor.getValue() .flatMap(assertionToken -> Mono.fromCallable(() -> { String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId + "/oauth2/v2.0/token"; StringBuilder urlParametersBuilder = new StringBuilder(); urlParametersBuilder.append("client_assertion="); urlParametersBuilder.append(assertionToken); urlParametersBuilder.append("&client_assertion_type=urn:ietf:params:oauth:client-assertion-type" + ":jwt-bearer"); urlParametersBuilder.append("&client_id="); urlParametersBuilder.append(clientId); urlParametersBuilder.append("&grant_type=client_credentials"); urlParametersBuilder.append("&scope="); urlParametersBuilder.append(URLEncoder.encode(request.getScopes().get(0), StandardCharsets.UTF_8.name())); String urlParams = urlParametersBuilder.toString(); byte[] postData = urlParams.getBytes(StandardCharsets.UTF_8); int postDataLength = postData.length; HttpURLConnection connection = null; URL url = new URL(authorityUrl); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("POST"); connection.setRequestProperty("Content-Type", "application/x-www-form-urlencoded"); connection.setRequestProperty("Content-Length", Integer.toString(postDataLength)); connection.setDoOutput(true); try (DataOutputStream outputStream = new DataOutputStream(connection.getOutputStream())) { outputStream.write(postData); } connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } })); } /** * Asynchronously acquire a token from the Azure Service Fabric Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param identityHeader the identity header to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToServiceFabricManagedIdentityEndpoint(String identityEndpoint, String identityHeader, String thumbprint, TokenRequestContext request) { return Mono.fromCallable(() -> { HttpsURLConnection connection = null; String endpoint = identityEndpoint; String headerValue = identityHeader; String endpointVersion = SERVICE_FABRIC_MANAGED_IDENTITY_API_VERSION; String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode(endpointVersion, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } try { URL url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpsURLConnection) url.openConnection(); IdentitySslUtil.addTrustedCertificateThumbprint(connection, thumbprint, LOGGER); connection.setRequestMethod("GET"); if (headerValue != null) { connection.setRequestProperty("Secret", headerValue); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param identityHeader the identity header to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String identityEndpoint, String identityHeader, TokenRequestContext request) { return Mono.fromCallable(() -> { String endpoint; String headerValue; String endpointVersion; endpoint = identityEndpoint; headerValue = identityHeader; endpointVersion = IDENTITY_ENDPOINT_VERSION; String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode(endpointVersion, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } try { URL url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (headerValue != null) { if (IDENTITY_ENDPOINT_VERSION.equals(endpointVersion)) { connection.setRequestProperty("X-IDENTITY-HEADER", headerValue); } else { connection.setRequestProperty("Secret", headerValue); } } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ private Mono<Boolean> checkIMDSAvailable(String endpoint) { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", StandardCharsets.UTF_8.name())); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("%s?%s", endpoint, payload)); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } catch (Exception e) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } private String getSafeWorkingDirectory() { if (isWindowsPlatform()) { if (CoreUtils.isNullOrEmpty(DEFAULT_WINDOWS_SYSTEM_ROOT)) { return null; } return DEFAULT_WINDOWS_SYSTEM_ROOT + "\\system32"; } else { return DEFAULT_MAC_LINUX_PATH; } } private boolean isWindowsPlatform() { return System.getProperty("os.name").contains("Windows"); } private String redactInfo(String input) { return ACCESS_TOKEN_PATTERN.matcher(input).replaceAll("****"); } void openUrl(String url) throws IOException { Runtime rt = Runtime.getRuntime(); String os = System.getProperty("os.name").toLowerCase(Locale.ROOT); if (os.contains("win")) { rt.exec("rundll32 url.dll,FileProtocolHandler " + url); } else if (os.contains("mac")) { rt.exec("open " + url); } else if (os.contains("nix") || os.contains("nux")) { rt.exec("xdg-open " + url); } else { LOGGER.error("Browser could not be opened - please open {} in a browser on this device.", url); } } private CompletableFuture<IAuthenticationResult> getFailedCompletableFuture(Exception e) { CompletableFuture<IAuthenticationResult> completableFuture = new CompletableFuture<>(); completableFuture.completeExceptionally(e); return completableFuture; } private void initializeHttpPipelineAdapter() { HttpPipeline httpPipeline = options.getHttpPipeline(); if (httpPipeline != null) { httpPipelineAdapter = new HttpPipelineAdapter(httpPipeline); } else { HttpClient httpClient = options.getHttpClient(); if (httpClient != null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(httpClient)); } else if (options.getProxyOptions() == null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(HttpClient.createDefault())); } } } /** * Get the configured tenant id. * * @return the tenant id. */ public String getTenantId() { return tenantId; } /** * Get the configured client id. * * @return the client id. */ public String getClientId() { return clientId; } /** * Get the configured identity client options. * * @return the client options. */ public IdentityClientOptions getIdentityClientOptions() { return options; } private boolean isADFSTenant() { return this.tenantId.equals(ADFS_TENANT); } private byte[] getCertificateBytes() throws IOException { if (certificatePath != null) { return Files.readAllBytes(Paths.get(certificatePath)); } else if (certificate != null) { ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); byte[] buffer = new byte[1024]; int read = certificate.read(buffer, 0, buffer.length); while (read != -1) { outputStream.write(buffer, 0, read); read = certificate.read(buffer, 0, buffer.length); } return outputStream.toByteArray(); } else { return new byte[0]; } } private InputStream getCertificateInputStream() throws IOException { if (certificatePath != null) { return new BufferedInputStream(new FileInputStream(certificatePath)); } else { return certificate; } } }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private static final String WINDOWS_STARTER = "cmd.exe"; private static final String LINUX_MAC_STARTER = "/bin/sh"; private static final String WINDOWS_SWITCHER = "/c"; private static final String LINUX_MAC_SWITCHER = "-c"; private static final String WINDOWS_PROCESS_ERROR_MESSAGE = "'az' is not recognized"; private static final Pattern LINUX_MAC_PROCESS_ERROR_MESSAGE = Pattern.compile("(.*)az:(.*)not found"); private static final String DEFAULT_WINDOWS_SYSTEM_ROOT = System.getenv("SystemRoot"); private static final String DEFAULT_WINDOWS_PS_EXECUTABLE = "pwsh.exe"; private static final String LEGACY_WINDOWS_PS_EXECUTABLE = "powershell.exe"; private static final String DEFAULT_LINUX_PS_EXECUTABLE = "pwsh"; private static final String DEFAULT_MAC_LINUX_PATH = "/bin/"; private static final Duration REFRESH_OFFSET = Duration.ofMinutes(5); private static final String IDENTITY_ENDPOINT_VERSION = "2019-08-01"; private static final String MSI_ENDPOINT_VERSION = "2017-09-01"; private static final String ADFS_TENANT = "adfs"; private static final String HTTP_LOCALHOST = "http: private static final String SERVICE_FABRIC_MANAGED_IDENTITY_API_VERSION = "2019-07-01-preview"; private static final ClientLogger LOGGER = new ClientLogger(IdentityClient.class); private static final Pattern ACCESS_TOKEN_PATTERN = Pattern.compile("\"accessToken\": \"(.*?)(\"|$)"); private static final Pattern TRAILING_FORWARD_SLASHES = Pattern.compile("/+$"); private final IdentityClientOptions options; private final String tenantId; private final String clientId; private final String resourceId; private final String clientSecret; private final String clientAssertionFilePath; private final InputStream certificate; private final String certificatePath; private final Supplier<String> clientAssertionSupplier; private final String certificatePassword; private HttpPipelineAdapter httpPipelineAdapter; private final SynchronizedAccessor<PublicClientApplication> publicClientApplicationAccessor; private final SynchronizedAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessor; private final SynchronizedAccessor<String> clientAssertionAccessor; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param clientSecret the client secret of the application. * @param resourceId the resource ID of the application * @param certificatePath the path to the PKCS12 or PEM certificate of the application. * @param certificate the PKCS12 or PEM certificate of the application. * @param certificatePassword the password protecting the PFX certificate. * @param isSharedTokenCacheCredential Indicate whether the credential is * {@link com.azure.identity.SharedTokenCacheCredential} or not. * @param clientAssertionTimeout the timeout to use for the client assertion. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, String clientSecret, String certificatePath, String clientAssertionFilePath, String resourceId, Supplier<String> clientAssertionSupplier, InputStream certificate, String certificatePassword, boolean isSharedTokenCacheCredential, Duration clientAssertionTimeout, IdentityClientOptions options) { if (tenantId == null) { tenantId = "organizations"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.resourceId = resourceId; this.clientSecret = clientSecret; this.clientAssertionFilePath = clientAssertionFilePath; this.certificatePath = certificatePath; this.certificate = certificate; this.certificatePassword = certificatePassword; this.clientAssertionSupplier = clientAssertionSupplier; this.options = options; this.publicClientApplicationAccessor = new SynchronizedAccessor<>(() -> getPublicClientApplication(isSharedTokenCacheCredential)); this.confidentialClientApplicationAccessor = new SynchronizedAccessor<>(() -> getConfidentialClientApplication()); this.clientAssertionAccessor = clientAssertionTimeout == null ? new SynchronizedAccessor<>(() -> parseClientAssertion(), Duration.ofMinutes(5)) : new SynchronizedAccessor<>(() -> parseClientAssertion(), clientAssertionTimeout); } private Mono<ConfidentialClientApplication> getConfidentialClientApplication() { return Mono.defer(() -> { if (clientId == null) { return Mono.error(LOGGER.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication."))); } String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId; IClientCredential credential; if (clientSecret != null) { credential = ClientCredentialFactory.createFromSecret(clientSecret); } else if (certificate != null || certificatePath != null) { try { if (certificatePassword == null) { byte[] pemCertificateBytes = getCertificateBytes(); List<X509Certificate> x509CertificateList = CertificateUtil.publicKeyFromPem(pemCertificateBytes); PrivateKey privateKey = CertificateUtil.privateKeyFromPem(pemCertificateBytes); if (x509CertificateList.size() == 1) { credential = ClientCredentialFactory.createFromCertificate( privateKey, x509CertificateList.get(0)); } else { credential = ClientCredentialFactory.createFromCertificateChain( privateKey, x509CertificateList); } } else { try (InputStream pfxCertificateStream = getCertificateInputStream()) { credential = ClientCredentialFactory.createFromCertificate(pfxCertificateStream, certificatePassword); } } } catch (IOException | GeneralSecurityException e) { return Mono.error(LOGGER.logExceptionAsError(new RuntimeException( "Failed to parse the certificate for the credential: " + e.getMessage(), e))); } } else if (clientAssertionSupplier != null) { credential = ClientCredentialFactory.createFromClientAssertion(clientAssertionSupplier.get()); } else { return Mono.error(LOGGER.logExceptionAsError( new IllegalArgumentException("Must provide client secret or client certificate path." + " To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, credential); try { applicationBuilder = applicationBuilder.authority(authorityUrl) .validateAuthority(options.getAuthorityValidation()); } catch (MalformedURLException e) { return Mono.error(LOGGER.logExceptionAsWarning(new IllegalStateException(e))); } applicationBuilder.sendX5c(options.isIncludeX5c()); initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } TokenCachePersistenceOptions tokenCachePersistenceOptions = options.getTokenCacheOptions(); PersistentTokenCacheImpl tokenCache = null; if (tokenCachePersistenceOptions != null) { try { tokenCache = new PersistentTokenCacheImpl() .setAllowUnencryptedStorage(tokenCachePersistenceOptions.isUnencryptedStorageAllowed()) .setName(tokenCachePersistenceOptions.getName()); applicationBuilder.setTokenCacheAccessAspect(tokenCache); } catch (Throwable t) { return Mono.error(LOGGER.logExceptionAsError(new ClientAuthenticationException( "Shared token cache is unavailable in this environment.", null, t))); } } if (options.getRegionalAuthority() != null) { if (options.getRegionalAuthority() == RegionalAuthority.AUTO_DISCOVER_REGION) { applicationBuilder.autoDetectRegion(true); } else { applicationBuilder.azureRegion(options.getRegionalAuthority().toString()); } } ConfidentialClientApplication confidentialClientApplication = applicationBuilder.build(); return tokenCache != null ? tokenCache.registerCache() .map(ignored -> confidentialClientApplication) : Mono.just(confidentialClientApplication); }); } private Mono<String> parseClientAssertion() { return Mono.fromCallable(() -> { if (clientAssertionFilePath != null) { byte[] encoded = Files.readAllBytes(Paths.get(clientAssertionFilePath)); return new String(encoded, StandardCharsets.UTF_8); } else { throw LOGGER.logExceptionAsError(new IllegalStateException( "Client Assertion File Path is not provided." + " It should be provided to authenticate with client assertion." )); } }); } private Mono<PublicClientApplication> getPublicClientApplication(boolean sharedTokenCacheCredential) { return Mono.defer(() -> { if (clientId == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication.")); } String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl) .validateAuthority(options.getAuthorityValidation()); } catch (MalformedURLException e) { throw LOGGER.logExceptionAsWarning(new IllegalStateException(e)); } initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { publicClientApplicationBuilder.httpClient(httpPipelineAdapter); } else { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { publicClientApplicationBuilder.executorService(options.getExecutorService()); } if (!options.isCp1Disabled()) { Set<String> set = new HashSet<>(1); set.add("CP1"); publicClientApplicationBuilder.clientCapabilities(set); } return Mono.just(publicClientApplicationBuilder); }).flatMap(builder -> { TokenCachePersistenceOptions tokenCachePersistenceOptions = options.getTokenCacheOptions(); PersistentTokenCacheImpl tokenCache = null; if (tokenCachePersistenceOptions != null) { try { tokenCache = new PersistentTokenCacheImpl() .setAllowUnencryptedStorage(tokenCachePersistenceOptions.isUnencryptedStorageAllowed()) .setName(tokenCachePersistenceOptions.getName()); builder.setTokenCacheAccessAspect(tokenCache); } catch (Throwable t) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException( "Shared token cache is unavailable in this environment.", null, t)); } } PublicClientApplication publicClientApplication = builder.build(); return tokenCache != null ? tokenCache.registerCache() .map(ignored -> publicClientApplication) : Mono.just(publicClientApplication); }); } public Mono<MsalToken> authenticateWithIntelliJ(TokenRequestContext request) { try { IntelliJCacheAccessor cacheAccessor = new IntelliJCacheAccessor(options.getIntelliJKeePassDatabasePath()); IntelliJAuthMethodDetails authDetails; try { authDetails = cacheAccessor.getAuthDetailsIfAvailable(); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available.", e))); } if (authDetails == null) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available." + " Please log in with Azure Tools for IntelliJ plugin in the IDE."))); } String authType = authDetails.getAuthMethod(); if ("SP".equalsIgnoreCase(authType)) { Map<String, String> spDetails = cacheAccessor .getIntellijServicePrincipalDetails(authDetails.getCredFilePath()); String authorityUrl = spDetails.get("authURL") + spDetails.get("tenant"); try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(spDetails.get("client"), ClientCredentialFactory.createFromSecret(spDetails.get("key"))) .authority(authorityUrl).validateAuthority(options.getAuthorityValidation()); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } } else if ("DC".equalsIgnoreCase(authType)) { LOGGER.verbose("IntelliJ Authentication => Device Code Authentication scheme detected in Azure Tools" + " for IntelliJ Plugin."); if (isADFSTenant()) { LOGGER.verbose("IntelliJ Authentication => The input tenant is detected to be ADFS and" + " the ADFS tenants are not supported via IntelliJ Authentication currently."); return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJCredential " + "authentication unavailable. ADFS tenant/authorities are not supported."))); } try { JsonNode intelliJCredentials = cacheAccessor.getDeviceCodeCredentials(); String refreshToken = intelliJCredentials.get("refreshToken").textValue(); RefreshTokenParameters.RefreshTokenParametersBuilder refreshTokenParametersBuilder = RefreshTokenParameters.builder(new HashSet<>(request.getScopes()), refreshToken); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); refreshTokenParametersBuilder.claims(customClaimRequest); } return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(refreshTokenParametersBuilder.build())) .map(MsalToken::new)); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, e)); } } else { LOGGER.verbose("IntelliJ Authentication = > Only Service Principal and Device Code Authentication" + " schemes are currently supported via IntelliJ Credential currently. Please ensure you used one" + " of those schemes from Azure Tools for IntelliJ plugin."); return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available." + " Please login with Azure Tools for IntelliJ plugin in the IDE."))); } } catch (IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with Azure CLI. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) { StringBuilder azCommand = new StringBuilder("az account get-access-token --output json --resource "); String scopes = ScopeUtil.scopesToResource(request.getScopes()); try { ScopeUtil.validateScope(scopes); } catch (IllegalArgumentException ex) { return Mono.error(LOGGER.logExceptionAsError(ex)); } azCommand.append(scopes); String tenant = IdentityUtil.resolveTenantId(null, request, options); if (!CoreUtils.isNullOrEmpty(tenant)) { azCommand.append("--tenant ").append(tenant); } AccessToken token; try { String starter; String switcher; if (isWindowsPlatform()) { starter = WINDOWS_STARTER; switcher = WINDOWS_SWITCHER; } else { starter = LINUX_MAC_STARTER; switcher = LINUX_MAC_SWITCHER; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, azCommand.toString()); String workingDirectory = getSafeWorkingDirectory(); if (workingDirectory != null) { builder.directory(new File(workingDirectory)); } else { throw LOGGER.logExceptionAsError(new IllegalStateException("A Safe Working directory could not be" + " found to execute CLI command from. To mitigate this issue, please refer to the troubleshooting " + " guidelines here at https: } builder.redirectErrorStream(true); Process process = builder.start(); StringBuilder output = new StringBuilder(); try (BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream(), StandardCharsets.UTF_8.name()))) { String line; while (true) { line = reader.readLine(); if (line == null) { break; } if (line.startsWith(WINDOWS_PROCESS_ERROR_MESSAGE) || LINUX_MAC_PROCESS_ERROR_MESSAGE.matcher(line).matches()) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "AzureCliCredential authentication unavailable. Azure CLI not installed." + "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } output.append(line); } } String processOutput = output.toString(); process.waitFor(10, TimeUnit.SECONDS); if (process.exitValue() != 0) { if (processOutput.length() > 0) { String redactedOutput = redactInfo(processOutput); if (redactedOutput.contains("az login") || redactedOutput.contains("az account set")) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "AzureCliCredential authentication unavailable." + " Please run 'az login' to set up account. To further mitigate this" + " issue, please refer to the troubleshooting guidelines here at " + "https: } throw LOGGER.logExceptionAsError(new ClientAuthenticationException(redactedOutput, null)); } else { throw LOGGER.logExceptionAsError( new ClientAuthenticationException("Failed to invoke Azure CLI ", null)); } } LOGGER.verbose("Azure CLI Authentication => A token response was received from Azure CLI, deserializing the" + " response into an Access Token."); Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("accessToken"); String time = objectMap.get("expiresOn"); String timeToSecond = time.substring(0, time.indexOf(".")); String timeJoinedWithT = String.join("T", timeToSecond.split(" ")); OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME) .atZone(ZoneId.systemDefault()) .toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC); token = new AccessToken(accessToken, expiresOn); } catch (IOException | InterruptedException e) { throw LOGGER.logExceptionAsError(new IllegalStateException(e)); } catch (RuntimeException e) { return Mono.error(e instanceof CredentialUnavailableException ? LoggingUtil.logCredentialUnavailableException(LOGGER, options, (CredentialUnavailableException) e) : LOGGER.logExceptionAsError(e)); } return Mono.just(token); } /** * Asynchronously acquire a token from Active Directory with Azure Power Shell. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithAzurePowerShell(TokenRequestContext request) { List<CredentialUnavailableException> exceptions = new ArrayList<>(2); PowershellManager defaultPowerShellManager = new PowershellManager(Platform.isWindows() ? DEFAULT_WINDOWS_PS_EXECUTABLE : DEFAULT_LINUX_PS_EXECUTABLE); PowershellManager legacyPowerShellManager = Platform.isWindows() ? new PowershellManager(LEGACY_WINDOWS_PS_EXECUTABLE) : null; List<PowershellManager> powershellManagers = new ArrayList<>(2); powershellManagers.add(defaultPowerShellManager); if (legacyPowerShellManager != null) { powershellManagers.add(legacyPowerShellManager); } return Flux.fromIterable(powershellManagers) .flatMap(powershellManager -> getAccessTokenFromPowerShell(request, powershellManager) .onErrorResume(t -> { if (!t.getClass().getSimpleName().equals("CredentialUnavailableException")) { return Mono.error(new ClientAuthenticationException( "Azure Powershell authentication failed. Error Details: " + t.getMessage() + ". To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: null, t)); } exceptions.add((CredentialUnavailableException) t); return Mono.empty(); }), 1) .next() .switchIfEmpty(Mono.defer(() -> { CredentialUnavailableException last = exceptions.get(exceptions.size() - 1); for (int z = exceptions.size() - 2; z >= 0; z--) { CredentialUnavailableException current = exceptions.get(z); last = new CredentialUnavailableException("Azure PowerShell authentication failed using default" + "powershell(pwsh) with following error: " + current.getMessage() + "\r\n" + "Azure PowerShell authentication failed using powershell-core(powershell)" + " with following error: " + last.getMessage(), last.getCause()); } return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, (last))); })); } /** * Asynchronously acquire a token from Active Directory with Azure PowerShell. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithOBO(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> confidentialClient.acquireToken(OnBehalfOfParameters .builder(new HashSet<>(request.getScopes()), options.getUserAssertion()) .tenant(IdentityUtil.resolveTenantId(tenantId, request, options)) .build())) .map(MsalToken::new)); } private Mono<AccessToken> getAccessTokenFromPowerShell(TokenRequestContext request, PowershellManager powershellManager) { return powershellManager.initSession() .flatMap(manager -> { String azAccountsCommand = "Import-Module Az.Accounts -MinimumVersion 2.2.0 -PassThru"; return manager.runCommand(azAccountsCommand) .flatMap(output -> { if (output.contains("The specified module 'Az.Accounts' with version '2.2.0' was not loaded " + "because no valid module file")) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Az.Account module with version >= 2.2.0 is not installed. It needs to be installed to" + " use Azure PowerShell Credential."))); } LOGGER.verbose("Az.accounts module was found installed."); StringBuilder accessTokenCommand = new StringBuilder("Get-AzAccessToken -ResourceUrl "); accessTokenCommand.append(ScopeUtil.scopesToResource(request.getScopes())); accessTokenCommand.append(" | ConvertTo-Json"); String command = accessTokenCommand.toString(); LOGGER.verbose("Azure Powershell Authentication => Executing the command `%s` in Azure " + "Powershell to retrieve the Access Token.", accessTokenCommand); return manager.runCommand(accessTokenCommand.toString()) .flatMap(out -> { if (out.contains("Run Connect-AzAccount to login")) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Run Connect-AzAccount to login to Azure account in PowerShell."))); } try { LOGGER.verbose("Azure Powershell Authentication => Attempting to deserialize the " + "received response from Azure Powershell."); Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(out, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("Token"); String time = objectMap.get("ExpiresOn"); OffsetDateTime expiresOn = OffsetDateTime.parse(time) .withOffsetSameInstant(ZoneOffset.UTC); return Mono.just(new AccessToken(accessToken, expiresOn)); } catch (IOException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Encountered error when deserializing response from Azure Power Shell.", e))); } }); }); }).doFinally(ignored -> powershellManager.close()); } /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithConfidentialClient(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> { ClientCredentialParameters.ClientCredentialParametersBuilder builder = ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); return confidentialClient.acquireToken(builder.build()); } )).map(MsalToken::new); } private HttpPipeline setupPipeline(HttpClient httpClient) { List<HttpPipelinePolicy> policies = new ArrayList<>(); HttpLogOptions httpLogOptions = new HttpLogOptions(); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(new RetryPolicy()); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); return new HttpPipelineBuilder().httpClient(httpClient) .policies(policies.toArray(new HttpPipelinePolicy[0])).build(); } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> { UserNamePasswordParameters.UserNamePasswordParametersBuilder userNamePasswordParametersBuilder = UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray()); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest .formatAsClaimsRequest(request.getClaims()); userNamePasswordParametersBuilder.claims(customClaimRequest); } userNamePasswordParametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); return pc.acquireToken(userNamePasswordParametersBuilder.build()); } )).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with username and " + "password. To mitigate this issue, please refer to the troubleshooting guidelines " + "here at https: null, t)).map(MsalToken::new); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @param account the account used to log in to acquire the last token * @return a Publisher that emits an AccessToken */ @SuppressWarnings("deprecation") public Mono<MsalToken> authenticateWithPublicClientCache(TokenRequestContext request, IAccount account) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); parametersBuilder.forceRefresh(true); } if (account != null) { parametersBuilder = parametersBuilder.account(account); } parametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); try { return pc.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET))) .switchIfEmpty(Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder forceParametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())).forceRefresh(true); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest .formatAsClaimsRequest(request.getClaims()); forceParametersBuilder.claims(customClaimRequest); } if (account != null) { forceParametersBuilder = forceParametersBuilder.account(account); } forceParametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); try { return pc.acquireTokenSilently(forceParametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new))); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ @SuppressWarnings("deprecation") public Mono<AccessToken> authenticateWithConfidentialClientCache(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())) .tenant(IdentityUtil.resolveTenantId(tenantId, request, options)); try { return confidentialClient.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(ar -> (AccessToken) new MsalToken(ar)) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET)))); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return publicClientApplicationAccessor.getValue().flatMap(pc -> Mono.fromFuture(() -> { DeviceCodeFlowParameters.DeviceCodeFlowParametersBuilder parametersBuilder = DeviceCodeFlowParameters.builder( new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept( new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } return pc.acquireToken(parametersBuilder.build()); }).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with device code", null, t)) .map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with Visual Studio cached refresh token. * * @param request the details of the token request * @return a Publisher that emits an AccessToken. */ public Mono<MsalToken> authenticateWithVsCodeCredential(TokenRequestContext request, String cloud) { if (isADFSTenant()) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("VsCodeCredential " + "authentication unavailable. ADFS tenant/authorities are not supported. " + "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } VisualStudioCacheAccessor accessor = new VisualStudioCacheAccessor(); String credential = null; try { credential = accessor.getCredentials("VS Code Azure", cloud); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, e)); } RefreshTokenParameters.RefreshTokenParametersBuilder parametersBuilder = RefreshTokenParameters .builder(new HashSet<>(request.getScopes()), credential); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(parametersBuilder.build())) .onErrorResume(t -> { if (t instanceof MsalInteractionRequiredException) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("Failed to acquire token with" + " VS code credential." + " To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } return Mono.error(new ClientAuthenticationException("Failed to acquire token with" + " VS code credential", null, t)); }) .map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { AuthorizationCodeParameters.AuthorizationCodeParametersBuilder parametersBuilder = AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } Mono<IAuthenticationResult> acquireToken; if (clientSecret != null) { acquireToken = confidentialClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parametersBuilder.build()))); } else { acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parametersBuilder.build()))); } return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with authorization code", null, t)).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @param redirectUrl the redirect URL to listen on and receive security code * @param loginHint the username suggestion to pre-fill the login page's username/email address field * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, Integer port, String redirectUrl, String loginHint) { URI redirectUri; String redirect; if (port != null) { redirect = HTTP_LOCALHOST + ":" + port; } else if (redirectUrl != null) { redirect = redirectUrl; } else { redirect = HTTP_LOCALHOST; } try { redirectUri = new URI(redirect); } catch (URISyntaxException e) { return Mono.error(LOGGER.logExceptionAsError(new RuntimeException(e))); } InteractiveRequestParameters.InteractiveRequestParametersBuilder builder = InteractiveRequestParameters.builder(redirectUri) .scopes(new HashSet<>(request.getScopes())) .prompt(Prompt.SELECT_ACCOUNT) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); builder.claims(customClaimRequest); } if (loginHint != null) { builder.loginHint(loginHint); } Mono<IAuthenticationResult> acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(builder.build()))); return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with Interactive Browser Authentication.", null, t)).map(MsalToken::new); } /** * Gets token from shared token cache * */ public Mono<MsalToken> authenticateWithSharedTokenCache(TokenRequestContext request, String username) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.getAccounts()) .onErrorMap(t -> new CredentialUnavailableException( "Cannot get accounts from token cache. Error: " + t.getMessage(), t)) .flatMap(set -> { IAccount requestedAccount; Map<String, IAccount> accounts = new HashMap<>(); if (set.isEmpty()) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("SharedTokenCacheCredential " + "authentication unavailable. No accounts were found in the cache."))); } for (IAccount cached : set) { if (username == null || username.equals(cached.username())) { if (!accounts.containsKey(cached.homeAccountId())) { accounts.put(cached.homeAccountId(), cached); } } } if (accounts.isEmpty()) { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. No account matching the specified username: %s was " + "found in the cache.", username))); } else if (accounts.size() > 1) { if (username == null) { return Mono.error(new RuntimeException("SharedTokenCacheCredential authentication " + "unavailable. Multiple accounts were found in the cache. Use username and " + "tenant id to disambiguate.")); } else { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. Multiple accounts matching the specified username: " + "%s were found in the cache.", username))); } } else { requestedAccount = accounts.values().iterator().next(); } return authenticateWithPublicClientCache(request, requestedAccount); })); } /** * Asynchronously acquire a token from the Azure Arc Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToArcManagedIdentityEndpoint(String identityEndpoint, TokenRequestContext request) { return Mono.fromCallable(() -> { HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(ScopeUtil.scopesToResource(request.getScopes()), StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode("2019-11-01", StandardCharsets.UTF_8.name())); URL url = new URL(String.format("%s?%s", identityEndpoint, payload)); String secretKey = null; try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); } catch (IOException e) { if (connection == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Failed to initialize " + "Http URL connection to the endpoint.", null, e)); } int status = connection.getResponseCode(); if (status != 401) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException(String.format("Expected a 401" + " Unauthorized response from Azure Arc Managed Identity Endpoint, received: %d", status), null, e)); } String realm = connection.getHeaderField("WWW-Authenticate"); if (realm == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a value" + " for WWW-Authenticate header in the response from Azure Arc Managed Identity Endpoint", null)); } int separatorIndex = realm.indexOf("="); if (separatorIndex == -1) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a correct value" + " for WWW-Authenticate header in the response from Azure Arc Managed Identity Endpoint", null)); } String secretKeyPath = realm.substring(separatorIndex + 1); secretKey = new String(Files.readAllBytes(Paths.get(secretKeyPath)), StandardCharsets.UTF_8); } finally { if (connection != null) { connection.disconnect(); } } if (secretKey == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a secret value" + " in the response from Azure Arc Managed Identity Endpoint", null)); } try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Authorization", String.format("Basic %s", secretKey)); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner scanner = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = scanner.hasNext() ? scanner.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the Azure Arc Managed Service Identity endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithExchangeToken(TokenRequestContext request) { return clientAssertionAccessor.getValue() .flatMap(assertionToken -> Mono.fromCallable(() -> { String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId + "/oauth2/v2.0/token"; StringBuilder urlParametersBuilder = new StringBuilder(); urlParametersBuilder.append("client_assertion="); urlParametersBuilder.append(assertionToken); urlParametersBuilder.append("&client_assertion_type=urn:ietf:params:oauth:client-assertion-type" + ":jwt-bearer"); urlParametersBuilder.append("&client_id="); urlParametersBuilder.append(clientId); urlParametersBuilder.append("&grant_type=client_credentials"); urlParametersBuilder.append("&scope="); urlParametersBuilder.append(URLEncoder.encode(request.getScopes().get(0), StandardCharsets.UTF_8.name())); String urlParams = urlParametersBuilder.toString(); byte[] postData = urlParams.getBytes(StandardCharsets.UTF_8); int postDataLength = postData.length; HttpURLConnection connection = null; URL url = new URL(authorityUrl); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("POST"); connection.setRequestProperty("Content-Type", "application/x-www-form-urlencoded"); connection.setRequestProperty("Content-Length", Integer.toString(postDataLength)); connection.setDoOutput(true); try (DataOutputStream outputStream = new DataOutputStream(connection.getOutputStream())) { outputStream.write(postData); } connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } })); } /** * Asynchronously acquire a token from the Azure Service Fabric Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param identityHeader the identity header to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToServiceFabricManagedIdentityEndpoint(String identityEndpoint, String identityHeader, String thumbprint, TokenRequestContext request) { return Mono.fromCallable(() -> { HttpsURLConnection connection = null; String endpoint = identityEndpoint; String headerValue = identityHeader; String endpointVersion = SERVICE_FABRIC_MANAGED_IDENTITY_API_VERSION; String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode(endpointVersion, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } try { URL url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpsURLConnection) url.openConnection(); IdentitySslUtil.addTrustedCertificateThumbprint(connection, thumbprint, LOGGER); connection.setRequestMethod("GET"); if (headerValue != null) { connection.setRequestProperty("Secret", headerValue); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param identityHeader the identity header to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String identityEndpoint, String identityHeader, TokenRequestContext request) { return Mono.fromCallable(() -> { String endpoint; String headerValue; String endpointVersion; endpoint = identityEndpoint; headerValue = identityHeader; endpointVersion = IDENTITY_ENDPOINT_VERSION; String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode(endpointVersion, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } try { URL url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (headerValue != null) { if (IDENTITY_ENDPOINT_VERSION.equals(endpointVersion)) { connection.setRequestProperty("X-IDENTITY-HEADER", headerValue); } else { connection.setRequestProperty("Secret", headerValue); } } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ private Mono<Boolean> checkIMDSAvailable(String endpoint) { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", StandardCharsets.UTF_8.name())); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("%s?%s", endpoint, payload)); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } catch (Exception e) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } private String getSafeWorkingDirectory() { if (isWindowsPlatform()) { if (CoreUtils.isNullOrEmpty(DEFAULT_WINDOWS_SYSTEM_ROOT)) { return null; } return DEFAULT_WINDOWS_SYSTEM_ROOT + "\\system32"; } else { return DEFAULT_MAC_LINUX_PATH; } } private boolean isWindowsPlatform() { return System.getProperty("os.name").contains("Windows"); } private String redactInfo(String input) { return ACCESS_TOKEN_PATTERN.matcher(input).replaceAll("****"); } void openUrl(String url) throws IOException { Runtime rt = Runtime.getRuntime(); String os = System.getProperty("os.name").toLowerCase(Locale.ROOT); if (os.contains("win")) { rt.exec("rundll32 url.dll,FileProtocolHandler " + url); } else if (os.contains("mac")) { rt.exec("open " + url); } else if (os.contains("nix") || os.contains("nux")) { rt.exec("xdg-open " + url); } else { LOGGER.error("Browser could not be opened - please open {} in a browser on this device.", url); } } private CompletableFuture<IAuthenticationResult> getFailedCompletableFuture(Exception e) { CompletableFuture<IAuthenticationResult> completableFuture = new CompletableFuture<>(); completableFuture.completeExceptionally(e); return completableFuture; } private void initializeHttpPipelineAdapter() { HttpPipeline httpPipeline = options.getHttpPipeline(); if (httpPipeline != null) { httpPipelineAdapter = new HttpPipelineAdapter(httpPipeline); } else { HttpClient httpClient = options.getHttpClient(); if (httpClient != null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(httpClient)); } else if (options.getProxyOptions() == null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(HttpClient.createDefault())); } } } /** * Get the configured tenant id. * * @return the tenant id. */ public String getTenantId() { return tenantId; } /** * Get the configured client id. * * @return the client id. */ public String getClientId() { return clientId; } /** * Get the configured identity client options. * * @return the client options. */ public IdentityClientOptions getIdentityClientOptions() { return options; } private boolean isADFSTenant() { return this.tenantId.equals(ADFS_TENANT); } private byte[] getCertificateBytes() throws IOException { if (certificatePath != null) { return Files.readAllBytes(Paths.get(certificatePath)); } else if (certificate != null) { ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); byte[] buffer = new byte[1024]; int read = certificate.read(buffer, 0, buffer.length); while (read != -1) { outputStream.write(buffer, 0, read); read = certificate.read(buffer, 0, buffer.length); } return outputStream.toByteArray(); } else { return new byte[0]; } } private InputStream getCertificateInputStream() throws IOException { if (certificatePath != null) { return new BufferedInputStream(new FileInputStream(certificatePath)); } else { return certificate; } } }
Use a `BufferedInputStream` to prevent reading the file byte-by-byte which has a much higher overhead
private InputStream getCertificateInputStream() throws IOException { if (certificatePath != null) { return new BufferedInputStream(new FileInputStream(certificatePath)); } else { return certificate; } }
return new BufferedInputStream(new FileInputStream(certificatePath));
private InputStream getCertificateInputStream() throws IOException { if (certificatePath != null) { return new BufferedInputStream(new FileInputStream(certificatePath)); } else { return certificate; } }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private static final String WINDOWS_STARTER = "cmd.exe"; private static final String LINUX_MAC_STARTER = "/bin/sh"; private static final String WINDOWS_SWITCHER = "/c"; private static final String LINUX_MAC_SWITCHER = "-c"; private static final String WINDOWS_PROCESS_ERROR_MESSAGE = "'az' is not recognized"; private static final Pattern LINUX_MAC_PROCESS_ERROR_MESSAGE = Pattern.compile("(.*)az:(.*)not found"); private static final String DEFAULT_WINDOWS_SYSTEM_ROOT = System.getenv("SystemRoot"); private static final String DEFAULT_WINDOWS_PS_EXECUTABLE = "pwsh.exe"; private static final String LEGACY_WINDOWS_PS_EXECUTABLE = "powershell.exe"; private static final String DEFAULT_LINUX_PS_EXECUTABLE = "pwsh"; private static final String DEFAULT_MAC_LINUX_PATH = "/bin/"; private static final Duration REFRESH_OFFSET = Duration.ofMinutes(5); private static final String IDENTITY_ENDPOINT_VERSION = "2019-08-01"; private static final String MSI_ENDPOINT_VERSION = "2017-09-01"; private static final String ADFS_TENANT = "adfs"; private static final String HTTP_LOCALHOST = "http: private static final String SERVICE_FABRIC_MANAGED_IDENTITY_API_VERSION = "2019-07-01-preview"; private static final ClientLogger LOGGER = new ClientLogger(IdentityClient.class); private static final Pattern ACCESS_TOKEN_PATTERN = Pattern.compile("\"accessToken\": \"(.*?)(\"|$)"); private static final Pattern TRAILING_FORWARD_SLASHES = Pattern.compile("/+$"); private final IdentityClientOptions options; private final String tenantId; private final String clientId; private final String resourceId; private final String clientSecret; private final String clientAssertionFilePath; private final InputStream certificate; private final String certificatePath; private final Supplier<String> clientAssertionSupplier; private final String certificatePassword; private HttpPipelineAdapter httpPipelineAdapter; private final SynchronizedAccessor<PublicClientApplication> publicClientApplicationAccessor; private final SynchronizedAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessor; private final SynchronizedAccessor<String> clientAssertionAccessor; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param clientSecret the client secret of the application. * @param resourceId the resource ID of the application * @param certificatePath the path to the PKCS12 or PEM certificate of the application. * @param certificate the PKCS12 or PEM certificate of the application. * @param certificatePassword the password protecting the PFX certificate. * @param isSharedTokenCacheCredential Indicate whether the credential is * {@link com.azure.identity.SharedTokenCacheCredential} or not. * @param clientAssertionTimeout the timeout to use for the client assertion. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, String clientSecret, String certificatePath, String clientAssertionFilePath, String resourceId, Supplier<String> clientAssertionSupplier, InputStream certificate, String certificatePassword, boolean isSharedTokenCacheCredential, Duration clientAssertionTimeout, IdentityClientOptions options) { if (tenantId == null) { tenantId = "organizations"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.resourceId = resourceId; this.clientSecret = clientSecret; this.clientAssertionFilePath = clientAssertionFilePath; this.certificatePath = certificatePath; this.certificate = certificate; this.certificatePassword = certificatePassword; this.clientAssertionSupplier = clientAssertionSupplier; this.options = options; this.publicClientApplicationAccessor = new SynchronizedAccessor<>(() -> getPublicClientApplication(isSharedTokenCacheCredential)); this.confidentialClientApplicationAccessor = new SynchronizedAccessor<>(() -> getConfidentialClientApplication()); this.clientAssertionAccessor = clientAssertionTimeout == null ? new SynchronizedAccessor<>(() -> parseClientAssertion(), Duration.ofMinutes(5)) : new SynchronizedAccessor<>(() -> parseClientAssertion(), clientAssertionTimeout); } private Mono<ConfidentialClientApplication> getConfidentialClientApplication() { return Mono.defer(() -> { if (clientId == null) { return Mono.error(LOGGER.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication."))); } String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId; IClientCredential credential; if (clientSecret != null) { credential = ClientCredentialFactory.createFromSecret(clientSecret); } else if (certificate != null || certificatePath != null) { try { if (certificatePassword == null) { byte[] pemCertificateBytes = getCertificateBytes(); List<X509Certificate> x509CertificateList = CertificateUtil.publicKeyFromPem(pemCertificateBytes); PrivateKey privateKey = CertificateUtil.privateKeyFromPem(pemCertificateBytes); if (x509CertificateList.size() == 1) { credential = ClientCredentialFactory.createFromCertificate( privateKey, x509CertificateList.get(0)); } else { credential = ClientCredentialFactory.createFromCertificateChain( privateKey, x509CertificateList); } } else { try (InputStream pfxCertificateStream = getCertificateInputStream()) { credential = ClientCredentialFactory.createFromCertificate(pfxCertificateStream, certificatePassword); } } } catch (IOException | GeneralSecurityException e) { return Mono.error(LOGGER.logExceptionAsError(new RuntimeException( "Failed to parse the certificate for the credential: " + e.getMessage(), e))); } } else if (clientAssertionSupplier != null) { credential = ClientCredentialFactory.createFromClientAssertion(clientAssertionSupplier.get()); } else { return Mono.error(LOGGER.logExceptionAsError( new IllegalArgumentException("Must provide client secret or client certificate path." + " To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, credential); try { applicationBuilder = applicationBuilder.authority(authorityUrl) .validateAuthority(options.getAuthorityValidation()); } catch (MalformedURLException e) { return Mono.error(LOGGER.logExceptionAsWarning(new IllegalStateException(e))); } applicationBuilder.sendX5c(options.isIncludeX5c()); initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } TokenCachePersistenceOptions tokenCachePersistenceOptions = options.getTokenCacheOptions(); PersistentTokenCacheImpl tokenCache = null; if (tokenCachePersistenceOptions != null) { try { tokenCache = new PersistentTokenCacheImpl() .setAllowUnencryptedStorage(tokenCachePersistenceOptions.isUnencryptedStorageAllowed()) .setName(tokenCachePersistenceOptions.getName()); applicationBuilder.setTokenCacheAccessAspect(tokenCache); } catch (Throwable t) { return Mono.error(LOGGER.logExceptionAsError(new ClientAuthenticationException( "Shared token cache is unavailable in this environment.", null, t))); } } if (options.getRegionalAuthority() != null) { if (options.getRegionalAuthority() == RegionalAuthority.AUTO_DISCOVER_REGION) { applicationBuilder.autoDetectRegion(true); } else { applicationBuilder.azureRegion(options.getRegionalAuthority().toString()); } } ConfidentialClientApplication confidentialClientApplication = applicationBuilder.build(); return tokenCache != null ? tokenCache.registerCache() .map(ignored -> confidentialClientApplication) : Mono.just(confidentialClientApplication); }); } private Mono<String> parseClientAssertion() { return Mono.fromCallable(() -> { if (clientAssertionFilePath != null) { byte[] encoded = Files.readAllBytes(Paths.get(clientAssertionFilePath)); return new String(encoded, StandardCharsets.UTF_8); } else { throw LOGGER.logExceptionAsError(new IllegalStateException( "Client Assertion File Path is not provided." + " It should be provided to authenticate with client assertion." )); } }); } private Mono<PublicClientApplication> getPublicClientApplication(boolean sharedTokenCacheCredential) { return Mono.defer(() -> { if (clientId == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication.")); } String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl) .validateAuthority(options.getAuthorityValidation()); } catch (MalformedURLException e) { throw LOGGER.logExceptionAsWarning(new IllegalStateException(e)); } initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { publicClientApplicationBuilder.httpClient(httpPipelineAdapter); } else { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { publicClientApplicationBuilder.executorService(options.getExecutorService()); } if (!options.isCp1Disabled()) { Set<String> set = new HashSet<>(1); set.add("CP1"); publicClientApplicationBuilder.clientCapabilities(set); } return Mono.just(publicClientApplicationBuilder); }).flatMap(builder -> { TokenCachePersistenceOptions tokenCachePersistenceOptions = options.getTokenCacheOptions(); PersistentTokenCacheImpl tokenCache = null; if (tokenCachePersistenceOptions != null) { try { tokenCache = new PersistentTokenCacheImpl() .setAllowUnencryptedStorage(tokenCachePersistenceOptions.isUnencryptedStorageAllowed()) .setName(tokenCachePersistenceOptions.getName()); builder.setTokenCacheAccessAspect(tokenCache); } catch (Throwable t) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException( "Shared token cache is unavailable in this environment.", null, t)); } } PublicClientApplication publicClientApplication = builder.build(); return tokenCache != null ? tokenCache.registerCache() .map(ignored -> publicClientApplication) : Mono.just(publicClientApplication); }); } public Mono<MsalToken> authenticateWithIntelliJ(TokenRequestContext request) { try { IntelliJCacheAccessor cacheAccessor = new IntelliJCacheAccessor(options.getIntelliJKeePassDatabasePath()); IntelliJAuthMethodDetails authDetails; try { authDetails = cacheAccessor.getAuthDetailsIfAvailable(); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available.", e))); } if (authDetails == null) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available." + " Please log in with Azure Tools for IntelliJ plugin in the IDE."))); } String authType = authDetails.getAuthMethod(); if ("SP".equalsIgnoreCase(authType)) { Map<String, String> spDetails = cacheAccessor .getIntellijServicePrincipalDetails(authDetails.getCredFilePath()); String authorityUrl = spDetails.get("authURL") + spDetails.get("tenant"); try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(spDetails.get("client"), ClientCredentialFactory.createFromSecret(spDetails.get("key"))) .authority(authorityUrl).validateAuthority(options.getAuthorityValidation()); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } } else if ("DC".equalsIgnoreCase(authType)) { LOGGER.verbose("IntelliJ Authentication => Device Code Authentication scheme detected in Azure Tools" + " for IntelliJ Plugin."); if (isADFSTenant()) { LOGGER.verbose("IntelliJ Authentication => The input tenant is detected to be ADFS and" + " the ADFS tenants are not supported via IntelliJ Authentication currently."); return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJCredential " + "authentication unavailable. ADFS tenant/authorities are not supported."))); } try { JsonNode intelliJCredentials = cacheAccessor.getDeviceCodeCredentials(); String refreshToken = intelliJCredentials.get("refreshToken").textValue(); RefreshTokenParameters.RefreshTokenParametersBuilder refreshTokenParametersBuilder = RefreshTokenParameters.builder(new HashSet<>(request.getScopes()), refreshToken); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); refreshTokenParametersBuilder.claims(customClaimRequest); } return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(refreshTokenParametersBuilder.build())) .map(MsalToken::new)); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, e)); } } else { LOGGER.verbose("IntelliJ Authentication = > Only Service Principal and Device Code Authentication" + " schemes are currently supported via IntelliJ Credential currently. Please ensure you used one" + " of those schemes from Azure Tools for IntelliJ plugin."); return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available." + " Please login with Azure Tools for IntelliJ plugin in the IDE."))); } } catch (IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with Azure CLI. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) { StringBuilder azCommand = new StringBuilder("az account get-access-token --output json --resource "); String scopes = ScopeUtil.scopesToResource(request.getScopes()); try { ScopeUtil.validateScope(scopes); } catch (IllegalArgumentException ex) { return Mono.error(LOGGER.logExceptionAsError(ex)); } azCommand.append(scopes); String tenant = IdentityUtil.resolveTenantId(null, request, options); if (!CoreUtils.isNullOrEmpty(tenant)) { azCommand.append("--tenant ").append(tenant); } AccessToken token; try { String starter; String switcher; if (isWindowsPlatform()) { starter = WINDOWS_STARTER; switcher = WINDOWS_SWITCHER; } else { starter = LINUX_MAC_STARTER; switcher = LINUX_MAC_SWITCHER; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, azCommand.toString()); String workingDirectory = getSafeWorkingDirectory(); if (workingDirectory != null) { builder.directory(new File(workingDirectory)); } else { throw LOGGER.logExceptionAsError(new IllegalStateException("A Safe Working directory could not be" + " found to execute CLI command from. To mitigate this issue, please refer to the troubleshooting " + " guidelines here at https: } builder.redirectErrorStream(true); Process process = builder.start(); StringBuilder output = new StringBuilder(); try (BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream(), StandardCharsets.UTF_8.name()))) { String line; while (true) { line = reader.readLine(); if (line == null) { break; } if (line.startsWith(WINDOWS_PROCESS_ERROR_MESSAGE) || LINUX_MAC_PROCESS_ERROR_MESSAGE.matcher(line).matches()) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "AzureCliCredential authentication unavailable. Azure CLI not installed." + "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } output.append(line); } } String processOutput = output.toString(); process.waitFor(10, TimeUnit.SECONDS); if (process.exitValue() != 0) { if (processOutput.length() > 0) { String redactedOutput = redactInfo(processOutput); if (redactedOutput.contains("az login") || redactedOutput.contains("az account set")) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "AzureCliCredential authentication unavailable." + " Please run 'az login' to set up account. To further mitigate this" + " issue, please refer to the troubleshooting guidelines here at " + "https: } throw LOGGER.logExceptionAsError(new ClientAuthenticationException(redactedOutput, null)); } else { throw LOGGER.logExceptionAsError( new ClientAuthenticationException("Failed to invoke Azure CLI ", null)); } } LOGGER.verbose("Azure CLI Authentication => A token response was received from Azure CLI, deserializing the" + " response into an Access Token."); Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("accessToken"); String time = objectMap.get("expiresOn"); String timeToSecond = time.substring(0, time.indexOf(".")); String timeJoinedWithT = String.join("T", timeToSecond.split(" ")); OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME) .atZone(ZoneId.systemDefault()) .toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC); token = new AccessToken(accessToken, expiresOn); } catch (IOException | InterruptedException e) { throw LOGGER.logExceptionAsError(new IllegalStateException(e)); } catch (RuntimeException e) { return Mono.error(e instanceof CredentialUnavailableException ? LoggingUtil.logCredentialUnavailableException(LOGGER, options, (CredentialUnavailableException) e) : LOGGER.logExceptionAsError(e)); } return Mono.just(token); } /** * Asynchronously acquire a token from Active Directory with Azure Power Shell. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithAzurePowerShell(TokenRequestContext request) { List<CredentialUnavailableException> exceptions = new ArrayList<>(2); PowershellManager defaultPowerShellManager = new PowershellManager(Platform.isWindows() ? DEFAULT_WINDOWS_PS_EXECUTABLE : DEFAULT_LINUX_PS_EXECUTABLE); PowershellManager legacyPowerShellManager = Platform.isWindows() ? new PowershellManager(LEGACY_WINDOWS_PS_EXECUTABLE) : null; List<PowershellManager> powershellManagers = new ArrayList<>(2); powershellManagers.add(defaultPowerShellManager); if (legacyPowerShellManager != null) { powershellManagers.add(legacyPowerShellManager); } return Flux.fromIterable(powershellManagers) .flatMap(powershellManager -> getAccessTokenFromPowerShell(request, powershellManager) .onErrorResume(t -> { if (!t.getClass().getSimpleName().equals("CredentialUnavailableException")) { return Mono.error(new ClientAuthenticationException( "Azure Powershell authentication failed. Error Details: " + t.getMessage() + ". To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: null, t)); } exceptions.add((CredentialUnavailableException) t); return Mono.empty(); }), 1) .next() .switchIfEmpty(Mono.defer(() -> { CredentialUnavailableException last = exceptions.get(exceptions.size() - 1); for (int z = exceptions.size() - 2; z >= 0; z--) { CredentialUnavailableException current = exceptions.get(z); last = new CredentialUnavailableException("Azure PowerShell authentication failed using default" + "powershell(pwsh) with following error: " + current.getMessage() + "\r\n" + "Azure PowerShell authentication failed using powershell-core(powershell)" + " with following error: " + last.getMessage(), last.getCause()); } return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, (last))); })); } /** * Asynchronously acquire a token from Active Directory with Azure PowerShell. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithOBO(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> confidentialClient.acquireToken(OnBehalfOfParameters .builder(new HashSet<>(request.getScopes()), options.getUserAssertion()) .tenant(IdentityUtil.resolveTenantId(tenantId, request, options)) .build())) .map(MsalToken::new)); } private Mono<AccessToken> getAccessTokenFromPowerShell(TokenRequestContext request, PowershellManager powershellManager) { return powershellManager.initSession() .flatMap(manager -> { String azAccountsCommand = "Import-Module Az.Accounts -MinimumVersion 2.2.0 -PassThru"; return manager.runCommand(azAccountsCommand) .flatMap(output -> { if (output.contains("The specified module 'Az.Accounts' with version '2.2.0' was not loaded " + "because no valid module file")) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Az.Account module with version >= 2.2.0 is not installed. It needs to be installed to" + " use Azure PowerShell Credential."))); } LOGGER.verbose("Az.accounts module was found installed."); StringBuilder accessTokenCommand = new StringBuilder("Get-AzAccessToken -ResourceUrl "); accessTokenCommand.append(ScopeUtil.scopesToResource(request.getScopes())); accessTokenCommand.append(" | ConvertTo-Json"); String command = accessTokenCommand.toString(); LOGGER.verbose("Azure Powershell Authentication => Executing the command `%s` in Azure " + "Powershell to retrieve the Access Token.", accessTokenCommand); return manager.runCommand(accessTokenCommand.toString()) .flatMap(out -> { if (out.contains("Run Connect-AzAccount to login")) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Run Connect-AzAccount to login to Azure account in PowerShell."))); } try { LOGGER.verbose("Azure Powershell Authentication => Attempting to deserialize the " + "received response from Azure Powershell."); Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(out, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("Token"); String time = objectMap.get("ExpiresOn"); OffsetDateTime expiresOn = OffsetDateTime.parse(time) .withOffsetSameInstant(ZoneOffset.UTC); return Mono.just(new AccessToken(accessToken, expiresOn)); } catch (IOException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Encountered error when deserializing response from Azure Power Shell.", e))); } }); }); }).doFinally(ignored -> powershellManager.close()); } /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithConfidentialClient(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> { ClientCredentialParameters.ClientCredentialParametersBuilder builder = ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); return confidentialClient.acquireToken(builder.build()); } )).map(MsalToken::new); } private HttpPipeline setupPipeline(HttpClient httpClient) { List<HttpPipelinePolicy> policies = new ArrayList<>(); HttpLogOptions httpLogOptions = new HttpLogOptions(); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(new RetryPolicy()); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); return new HttpPipelineBuilder().httpClient(httpClient) .policies(policies.toArray(new HttpPipelinePolicy[0])).build(); } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> { UserNamePasswordParameters.UserNamePasswordParametersBuilder userNamePasswordParametersBuilder = UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray()); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest .formatAsClaimsRequest(request.getClaims()); userNamePasswordParametersBuilder.claims(customClaimRequest); } userNamePasswordParametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); return pc.acquireToken(userNamePasswordParametersBuilder.build()); } )).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with username and " + "password. To mitigate this issue, please refer to the troubleshooting guidelines " + "here at https: null, t)).map(MsalToken::new); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @param account the account used to log in to acquire the last token * @return a Publisher that emits an AccessToken */ @SuppressWarnings("deprecation") public Mono<MsalToken> authenticateWithPublicClientCache(TokenRequestContext request, IAccount account) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); parametersBuilder.forceRefresh(true); } if (account != null) { parametersBuilder = parametersBuilder.account(account); } parametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); try { return pc.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET))) .switchIfEmpty(Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder forceParametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())).forceRefresh(true); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest .formatAsClaimsRequest(request.getClaims()); forceParametersBuilder.claims(customClaimRequest); } if (account != null) { forceParametersBuilder = forceParametersBuilder.account(account); } forceParametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); try { return pc.acquireTokenSilently(forceParametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new))); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ @SuppressWarnings("deprecation") public Mono<AccessToken> authenticateWithConfidentialClientCache(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())) .tenant(IdentityUtil.resolveTenantId(tenantId, request, options)); try { return confidentialClient.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(ar -> (AccessToken) new MsalToken(ar)) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET)))); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return publicClientApplicationAccessor.getValue().flatMap(pc -> Mono.fromFuture(() -> { DeviceCodeFlowParameters.DeviceCodeFlowParametersBuilder parametersBuilder = DeviceCodeFlowParameters.builder( new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept( new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } return pc.acquireToken(parametersBuilder.build()); }).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with device code", null, t)) .map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with Visual Studio cached refresh token. * * @param request the details of the token request * @return a Publisher that emits an AccessToken. */ public Mono<MsalToken> authenticateWithVsCodeCredential(TokenRequestContext request, String cloud) { if (isADFSTenant()) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("VsCodeCredential " + "authentication unavailable. ADFS tenant/authorities are not supported. " + "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } VisualStudioCacheAccessor accessor = new VisualStudioCacheAccessor(); String credential = null; try { credential = accessor.getCredentials("VS Code Azure", cloud); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, e)); } RefreshTokenParameters.RefreshTokenParametersBuilder parametersBuilder = RefreshTokenParameters .builder(new HashSet<>(request.getScopes()), credential); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(parametersBuilder.build())) .onErrorResume(t -> { if (t instanceof MsalInteractionRequiredException) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("Failed to acquire token with" + " VS code credential." + " To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } return Mono.error(new ClientAuthenticationException("Failed to acquire token with" + " VS code credential", null, t)); }) .map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { AuthorizationCodeParameters.AuthorizationCodeParametersBuilder parametersBuilder = AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } Mono<IAuthenticationResult> acquireToken; if (clientSecret != null) { acquireToken = confidentialClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parametersBuilder.build()))); } else { acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parametersBuilder.build()))); } return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with authorization code", null, t)).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @param redirectUrl the redirect URL to listen on and receive security code * @param loginHint the username suggestion to pre-fill the login page's username/email address field * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, Integer port, String redirectUrl, String loginHint) { URI redirectUri; String redirect; if (port != null) { redirect = HTTP_LOCALHOST + ":" + port; } else if (redirectUrl != null) { redirect = redirectUrl; } else { redirect = HTTP_LOCALHOST; } try { redirectUri = new URI(redirect); } catch (URISyntaxException e) { return Mono.error(LOGGER.logExceptionAsError(new RuntimeException(e))); } InteractiveRequestParameters.InteractiveRequestParametersBuilder builder = InteractiveRequestParameters.builder(redirectUri) .scopes(new HashSet<>(request.getScopes())) .prompt(Prompt.SELECT_ACCOUNT) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); builder.claims(customClaimRequest); } if (loginHint != null) { builder.loginHint(loginHint); } Mono<IAuthenticationResult> acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(builder.build()))); return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with Interactive Browser Authentication.", null, t)).map(MsalToken::new); } /** * Gets token from shared token cache * */ public Mono<MsalToken> authenticateWithSharedTokenCache(TokenRequestContext request, String username) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.getAccounts()) .onErrorMap(t -> new CredentialUnavailableException( "Cannot get accounts from token cache. Error: " + t.getMessage(), t)) .flatMap(set -> { IAccount requestedAccount; Map<String, IAccount> accounts = new HashMap<>(); if (set.isEmpty()) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("SharedTokenCacheCredential " + "authentication unavailable. No accounts were found in the cache."))); } for (IAccount cached : set) { if (username == null || username.equals(cached.username())) { if (!accounts.containsKey(cached.homeAccountId())) { accounts.put(cached.homeAccountId(), cached); } } } if (accounts.isEmpty()) { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. No account matching the specified username: %s was " + "found in the cache.", username))); } else if (accounts.size() > 1) { if (username == null) { return Mono.error(new RuntimeException("SharedTokenCacheCredential authentication " + "unavailable. Multiple accounts were found in the cache. Use username and " + "tenant id to disambiguate.")); } else { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. Multiple accounts matching the specified username: " + "%s were found in the cache.", username))); } } else { requestedAccount = accounts.values().iterator().next(); } return authenticateWithPublicClientCache(request, requestedAccount); })); } /** * Asynchronously acquire a token from the Azure Arc Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToArcManagedIdentityEndpoint(String identityEndpoint, TokenRequestContext request) { return Mono.fromCallable(() -> { HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(ScopeUtil.scopesToResource(request.getScopes()), StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode("2019-11-01", StandardCharsets.UTF_8.name())); URL url = new URL(String.format("%s?%s", identityEndpoint, payload)); String secretKey = null; try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); } catch (IOException e) { if (connection == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Failed to initialize " + "Http URL connection to the endpoint.", null, e)); } int status = connection.getResponseCode(); if (status != 401) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException(String.format("Expected a 401" + " Unauthorized response from Azure Arc Managed Identity Endpoint, received: %d", status), null, e)); } String realm = connection.getHeaderField("WWW-Authenticate"); if (realm == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a value" + " for WWW-Authenticate header in the response from Azure Arc Managed Identity Endpoint", null)); } int separatorIndex = realm.indexOf("="); if (separatorIndex == -1) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a correct value" + " for WWW-Authenticate header in the response from Azure Arc Managed Identity Endpoint", null)); } String secretKeyPath = realm.substring(separatorIndex + 1); secretKey = new String(Files.readAllBytes(Paths.get(secretKeyPath)), StandardCharsets.UTF_8); } finally { if (connection != null) { connection.disconnect(); } } if (secretKey == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a secret value" + " in the response from Azure Arc Managed Identity Endpoint", null)); } try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Authorization", String.format("Basic %s", secretKey)); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner scanner = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = scanner.hasNext() ? scanner.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the Azure Arc Managed Service Identity endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithExchangeToken(TokenRequestContext request) { return clientAssertionAccessor.getValue() .flatMap(assertionToken -> Mono.fromCallable(() -> { String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId + "/oauth2/v2.0/token"; StringBuilder urlParametersBuilder = new StringBuilder(); urlParametersBuilder.append("client_assertion="); urlParametersBuilder.append(assertionToken); urlParametersBuilder.append("&client_assertion_type=urn:ietf:params:oauth:client-assertion-type" + ":jwt-bearer"); urlParametersBuilder.append("&client_id="); urlParametersBuilder.append(clientId); urlParametersBuilder.append("&grant_type=client_credentials"); urlParametersBuilder.append("&scope="); urlParametersBuilder.append(URLEncoder.encode(request.getScopes().get(0), StandardCharsets.UTF_8.name())); String urlParams = urlParametersBuilder.toString(); byte[] postData = urlParams.getBytes(StandardCharsets.UTF_8); int postDataLength = postData.length; HttpURLConnection connection = null; URL url = new URL(authorityUrl); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("POST"); connection.setRequestProperty("Content-Type", "application/x-www-form-urlencoded"); connection.setRequestProperty("Content-Length", Integer.toString(postDataLength)); connection.setDoOutput(true); try (DataOutputStream outputStream = new DataOutputStream(connection.getOutputStream())) { outputStream.write(postData); } connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } })); } /** * Asynchronously acquire a token from the Azure Service Fabric Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param identityHeader the identity header to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToServiceFabricManagedIdentityEndpoint(String identityEndpoint, String identityHeader, String thumbprint, TokenRequestContext request) { return Mono.fromCallable(() -> { HttpsURLConnection connection = null; String endpoint = identityEndpoint; String headerValue = identityHeader; String endpointVersion = SERVICE_FABRIC_MANAGED_IDENTITY_API_VERSION; String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode(endpointVersion, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } try { URL url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpsURLConnection) url.openConnection(); IdentitySslUtil.addTrustedCertificateThumbprint(connection, thumbprint, LOGGER); connection.setRequestMethod("GET"); if (headerValue != null) { connection.setRequestProperty("Secret", headerValue); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param identityHeader the identity header to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String identityEndpoint, String identityHeader, TokenRequestContext request) { return Mono.fromCallable(() -> { String endpoint; String headerValue; String endpointVersion; endpoint = identityEndpoint; headerValue = identityHeader; endpointVersion = IDENTITY_ENDPOINT_VERSION; String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode(endpointVersion, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } try { URL url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (headerValue != null) { if (IDENTITY_ENDPOINT_VERSION.equals(endpointVersion)) { connection.setRequestProperty("X-IDENTITY-HEADER", headerValue); } else { connection.setRequestProperty("Secret", headerValue); } } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", StandardCharsets.UTF_8.name())); payload.append("&resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } } catch (IOException exception) { return Mono.error(exception); } String endpoint = TRAILING_FORWARD_SLASHES.matcher(options.getImdsAuthorityHost()).replaceAll("") + IdentityConstants.DEFAULT_IMDS_TOKENPATH; return checkIMDSAvailable(endpoint).flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw LOGGER.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode; try { responseCode = connection.getResponseCode(); } catch (Exception e) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } if (responseCode == 400) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established.", null)); } if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw LOGGER.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw LOGGER.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); } private Mono<Boolean> checkIMDSAvailable(String endpoint) { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", StandardCharsets.UTF_8.name())); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("%s?%s", endpoint, payload)); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } catch (Exception e) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } private String getSafeWorkingDirectory() { if (isWindowsPlatform()) { if (CoreUtils.isNullOrEmpty(DEFAULT_WINDOWS_SYSTEM_ROOT)) { return null; } return DEFAULT_WINDOWS_SYSTEM_ROOT + "\\system32"; } else { return DEFAULT_MAC_LINUX_PATH; } } private boolean isWindowsPlatform() { return System.getProperty("os.name").contains("Windows"); } private String redactInfo(String input) { return ACCESS_TOKEN_PATTERN.matcher(input).replaceAll("****"); } void openUrl(String url) throws IOException { Runtime rt = Runtime.getRuntime(); String os = System.getProperty("os.name").toLowerCase(Locale.ROOT); if (os.contains("win")) { rt.exec("rundll32 url.dll,FileProtocolHandler " + url); } else if (os.contains("mac")) { rt.exec("open " + url); } else if (os.contains("nix") || os.contains("nux")) { rt.exec("xdg-open " + url); } else { LOGGER.error("Browser could not be opened - please open {} in a browser on this device.", url); } } private CompletableFuture<IAuthenticationResult> getFailedCompletableFuture(Exception e) { CompletableFuture<IAuthenticationResult> completableFuture = new CompletableFuture<>(); completableFuture.completeExceptionally(e); return completableFuture; } private void initializeHttpPipelineAdapter() { HttpPipeline httpPipeline = options.getHttpPipeline(); if (httpPipeline != null) { httpPipelineAdapter = new HttpPipelineAdapter(httpPipeline); } else { HttpClient httpClient = options.getHttpClient(); if (httpClient != null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(httpClient)); } else if (options.getProxyOptions() == null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(HttpClient.createDefault())); } } } /** * Get the configured tenant id. * * @return the tenant id. */ public String getTenantId() { return tenantId; } /** * Get the configured client id. * * @return the client id. */ public String getClientId() { return clientId; } /** * Get the configured identity client options. * * @return the client options. */ public IdentityClientOptions getIdentityClientOptions() { return options; } private boolean isADFSTenant() { return this.tenantId.equals(ADFS_TENANT); } private byte[] getCertificateBytes() throws IOException { if (certificatePath != null) { return Files.readAllBytes(Paths.get(certificatePath)); } else if (certificate != null) { ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); byte[] buffer = new byte[1024]; int read = certificate.read(buffer, 0, buffer.length); while (read != -1) { outputStream.write(buffer, 0, read); read = certificate.read(buffer, 0, buffer.length); } return outputStream.toByteArray(); } else { return new byte[0]; } } }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private static final String WINDOWS_STARTER = "cmd.exe"; private static final String LINUX_MAC_STARTER = "/bin/sh"; private static final String WINDOWS_SWITCHER = "/c"; private static final String LINUX_MAC_SWITCHER = "-c"; private static final String WINDOWS_PROCESS_ERROR_MESSAGE = "'az' is not recognized"; private static final Pattern LINUX_MAC_PROCESS_ERROR_MESSAGE = Pattern.compile("(.*)az:(.*)not found"); private static final String DEFAULT_WINDOWS_SYSTEM_ROOT = System.getenv("SystemRoot"); private static final String DEFAULT_WINDOWS_PS_EXECUTABLE = "pwsh.exe"; private static final String LEGACY_WINDOWS_PS_EXECUTABLE = "powershell.exe"; private static final String DEFAULT_LINUX_PS_EXECUTABLE = "pwsh"; private static final String DEFAULT_MAC_LINUX_PATH = "/bin/"; private static final Duration REFRESH_OFFSET = Duration.ofMinutes(5); private static final String IDENTITY_ENDPOINT_VERSION = "2019-08-01"; private static final String MSI_ENDPOINT_VERSION = "2017-09-01"; private static final String ADFS_TENANT = "adfs"; private static final String HTTP_LOCALHOST = "http: private static final String SERVICE_FABRIC_MANAGED_IDENTITY_API_VERSION = "2019-07-01-preview"; private static final ClientLogger LOGGER = new ClientLogger(IdentityClient.class); private static final Pattern ACCESS_TOKEN_PATTERN = Pattern.compile("\"accessToken\": \"(.*?)(\"|$)"); private static final Pattern TRAILING_FORWARD_SLASHES = Pattern.compile("/+$"); private final IdentityClientOptions options; private final String tenantId; private final String clientId; private final String resourceId; private final String clientSecret; private final String clientAssertionFilePath; private final InputStream certificate; private final String certificatePath; private final Supplier<String> clientAssertionSupplier; private final String certificatePassword; private HttpPipelineAdapter httpPipelineAdapter; private final SynchronizedAccessor<PublicClientApplication> publicClientApplicationAccessor; private final SynchronizedAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessor; private final SynchronizedAccessor<String> clientAssertionAccessor; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param clientSecret the client secret of the application. * @param resourceId the resource ID of the application * @param certificatePath the path to the PKCS12 or PEM certificate of the application. * @param certificate the PKCS12 or PEM certificate of the application. * @param certificatePassword the password protecting the PFX certificate. * @param isSharedTokenCacheCredential Indicate whether the credential is * {@link com.azure.identity.SharedTokenCacheCredential} or not. * @param clientAssertionTimeout the timeout to use for the client assertion. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, String clientSecret, String certificatePath, String clientAssertionFilePath, String resourceId, Supplier<String> clientAssertionSupplier, InputStream certificate, String certificatePassword, boolean isSharedTokenCacheCredential, Duration clientAssertionTimeout, IdentityClientOptions options) { if (tenantId == null) { tenantId = "organizations"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.resourceId = resourceId; this.clientSecret = clientSecret; this.clientAssertionFilePath = clientAssertionFilePath; this.certificatePath = certificatePath; this.certificate = certificate; this.certificatePassword = certificatePassword; this.clientAssertionSupplier = clientAssertionSupplier; this.options = options; this.publicClientApplicationAccessor = new SynchronizedAccessor<>(() -> getPublicClientApplication(isSharedTokenCacheCredential)); this.confidentialClientApplicationAccessor = new SynchronizedAccessor<>(() -> getConfidentialClientApplication()); this.clientAssertionAccessor = clientAssertionTimeout == null ? new SynchronizedAccessor<>(() -> parseClientAssertion(), Duration.ofMinutes(5)) : new SynchronizedAccessor<>(() -> parseClientAssertion(), clientAssertionTimeout); } private Mono<ConfidentialClientApplication> getConfidentialClientApplication() { return Mono.defer(() -> { if (clientId == null) { return Mono.error(LOGGER.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication."))); } String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId; IClientCredential credential; if (clientSecret != null) { credential = ClientCredentialFactory.createFromSecret(clientSecret); } else if (certificate != null || certificatePath != null) { try { if (certificatePassword == null) { byte[] pemCertificateBytes = getCertificateBytes(); List<X509Certificate> x509CertificateList = CertificateUtil.publicKeyFromPem(pemCertificateBytes); PrivateKey privateKey = CertificateUtil.privateKeyFromPem(pemCertificateBytes); if (x509CertificateList.size() == 1) { credential = ClientCredentialFactory.createFromCertificate( privateKey, x509CertificateList.get(0)); } else { credential = ClientCredentialFactory.createFromCertificateChain( privateKey, x509CertificateList); } } else { try (InputStream pfxCertificateStream = getCertificateInputStream()) { credential = ClientCredentialFactory.createFromCertificate(pfxCertificateStream, certificatePassword); } } } catch (IOException | GeneralSecurityException e) { return Mono.error(LOGGER.logExceptionAsError(new RuntimeException( "Failed to parse the certificate for the credential: " + e.getMessage(), e))); } } else if (clientAssertionSupplier != null) { credential = ClientCredentialFactory.createFromClientAssertion(clientAssertionSupplier.get()); } else { return Mono.error(LOGGER.logExceptionAsError( new IllegalArgumentException("Must provide client secret or client certificate path." + " To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, credential); try { applicationBuilder = applicationBuilder.authority(authorityUrl) .validateAuthority(options.getAuthorityValidation()); } catch (MalformedURLException e) { return Mono.error(LOGGER.logExceptionAsWarning(new IllegalStateException(e))); } applicationBuilder.sendX5c(options.isIncludeX5c()); initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } TokenCachePersistenceOptions tokenCachePersistenceOptions = options.getTokenCacheOptions(); PersistentTokenCacheImpl tokenCache = null; if (tokenCachePersistenceOptions != null) { try { tokenCache = new PersistentTokenCacheImpl() .setAllowUnencryptedStorage(tokenCachePersistenceOptions.isUnencryptedStorageAllowed()) .setName(tokenCachePersistenceOptions.getName()); applicationBuilder.setTokenCacheAccessAspect(tokenCache); } catch (Throwable t) { return Mono.error(LOGGER.logExceptionAsError(new ClientAuthenticationException( "Shared token cache is unavailable in this environment.", null, t))); } } if (options.getRegionalAuthority() != null) { if (options.getRegionalAuthority() == RegionalAuthority.AUTO_DISCOVER_REGION) { applicationBuilder.autoDetectRegion(true); } else { applicationBuilder.azureRegion(options.getRegionalAuthority().toString()); } } ConfidentialClientApplication confidentialClientApplication = applicationBuilder.build(); return tokenCache != null ? tokenCache.registerCache() .map(ignored -> confidentialClientApplication) : Mono.just(confidentialClientApplication); }); } private Mono<String> parseClientAssertion() { return Mono.fromCallable(() -> { if (clientAssertionFilePath != null) { byte[] encoded = Files.readAllBytes(Paths.get(clientAssertionFilePath)); return new String(encoded, StandardCharsets.UTF_8); } else { throw LOGGER.logExceptionAsError(new IllegalStateException( "Client Assertion File Path is not provided." + " It should be provided to authenticate with client assertion." )); } }); } private Mono<PublicClientApplication> getPublicClientApplication(boolean sharedTokenCacheCredential) { return Mono.defer(() -> { if (clientId == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication.")); } String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl) .validateAuthority(options.getAuthorityValidation()); } catch (MalformedURLException e) { throw LOGGER.logExceptionAsWarning(new IllegalStateException(e)); } initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { publicClientApplicationBuilder.httpClient(httpPipelineAdapter); } else { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { publicClientApplicationBuilder.executorService(options.getExecutorService()); } if (!options.isCp1Disabled()) { Set<String> set = new HashSet<>(1); set.add("CP1"); publicClientApplicationBuilder.clientCapabilities(set); } return Mono.just(publicClientApplicationBuilder); }).flatMap(builder -> { TokenCachePersistenceOptions tokenCachePersistenceOptions = options.getTokenCacheOptions(); PersistentTokenCacheImpl tokenCache = null; if (tokenCachePersistenceOptions != null) { try { tokenCache = new PersistentTokenCacheImpl() .setAllowUnencryptedStorage(tokenCachePersistenceOptions.isUnencryptedStorageAllowed()) .setName(tokenCachePersistenceOptions.getName()); builder.setTokenCacheAccessAspect(tokenCache); } catch (Throwable t) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException( "Shared token cache is unavailable in this environment.", null, t)); } } PublicClientApplication publicClientApplication = builder.build(); return tokenCache != null ? tokenCache.registerCache() .map(ignored -> publicClientApplication) : Mono.just(publicClientApplication); }); } public Mono<MsalToken> authenticateWithIntelliJ(TokenRequestContext request) { try { IntelliJCacheAccessor cacheAccessor = new IntelliJCacheAccessor(options.getIntelliJKeePassDatabasePath()); IntelliJAuthMethodDetails authDetails; try { authDetails = cacheAccessor.getAuthDetailsIfAvailable(); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available.", e))); } if (authDetails == null) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available." + " Please log in with Azure Tools for IntelliJ plugin in the IDE."))); } String authType = authDetails.getAuthMethod(); if ("SP".equalsIgnoreCase(authType)) { Map<String, String> spDetails = cacheAccessor .getIntellijServicePrincipalDetails(authDetails.getCredFilePath()); String authorityUrl = spDetails.get("authURL") + spDetails.get("tenant"); try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(spDetails.get("client"), ClientCredentialFactory.createFromSecret(spDetails.get("key"))) .authority(authorityUrl).validateAuthority(options.getAuthorityValidation()); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } } else if ("DC".equalsIgnoreCase(authType)) { LOGGER.verbose("IntelliJ Authentication => Device Code Authentication scheme detected in Azure Tools" + " for IntelliJ Plugin."); if (isADFSTenant()) { LOGGER.verbose("IntelliJ Authentication => The input tenant is detected to be ADFS and" + " the ADFS tenants are not supported via IntelliJ Authentication currently."); return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJCredential " + "authentication unavailable. ADFS tenant/authorities are not supported."))); } try { JsonNode intelliJCredentials = cacheAccessor.getDeviceCodeCredentials(); String refreshToken = intelliJCredentials.get("refreshToken").textValue(); RefreshTokenParameters.RefreshTokenParametersBuilder refreshTokenParametersBuilder = RefreshTokenParameters.builder(new HashSet<>(request.getScopes()), refreshToken); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); refreshTokenParametersBuilder.claims(customClaimRequest); } return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(refreshTokenParametersBuilder.build())) .map(MsalToken::new)); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, e)); } } else { LOGGER.verbose("IntelliJ Authentication = > Only Service Principal and Device Code Authentication" + " schemes are currently supported via IntelliJ Credential currently. Please ensure you used one" + " of those schemes from Azure Tools for IntelliJ plugin."); return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available." + " Please login with Azure Tools for IntelliJ plugin in the IDE."))); } } catch (IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with Azure CLI. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) { StringBuilder azCommand = new StringBuilder("az account get-access-token --output json --resource "); String scopes = ScopeUtil.scopesToResource(request.getScopes()); try { ScopeUtil.validateScope(scopes); } catch (IllegalArgumentException ex) { return Mono.error(LOGGER.logExceptionAsError(ex)); } azCommand.append(scopes); String tenant = IdentityUtil.resolveTenantId(null, request, options); if (!CoreUtils.isNullOrEmpty(tenant)) { azCommand.append("--tenant ").append(tenant); } AccessToken token; try { String starter; String switcher; if (isWindowsPlatform()) { starter = WINDOWS_STARTER; switcher = WINDOWS_SWITCHER; } else { starter = LINUX_MAC_STARTER; switcher = LINUX_MAC_SWITCHER; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, azCommand.toString()); String workingDirectory = getSafeWorkingDirectory(); if (workingDirectory != null) { builder.directory(new File(workingDirectory)); } else { throw LOGGER.logExceptionAsError(new IllegalStateException("A Safe Working directory could not be" + " found to execute CLI command from. To mitigate this issue, please refer to the troubleshooting " + " guidelines here at https: } builder.redirectErrorStream(true); Process process = builder.start(); StringBuilder output = new StringBuilder(); try (BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream(), StandardCharsets.UTF_8.name()))) { String line; while (true) { line = reader.readLine(); if (line == null) { break; } if (line.startsWith(WINDOWS_PROCESS_ERROR_MESSAGE) || LINUX_MAC_PROCESS_ERROR_MESSAGE.matcher(line).matches()) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "AzureCliCredential authentication unavailable. Azure CLI not installed." + "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } output.append(line); } } String processOutput = output.toString(); process.waitFor(10, TimeUnit.SECONDS); if (process.exitValue() != 0) { if (processOutput.length() > 0) { String redactedOutput = redactInfo(processOutput); if (redactedOutput.contains("az login") || redactedOutput.contains("az account set")) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "AzureCliCredential authentication unavailable." + " Please run 'az login' to set up account. To further mitigate this" + " issue, please refer to the troubleshooting guidelines here at " + "https: } throw LOGGER.logExceptionAsError(new ClientAuthenticationException(redactedOutput, null)); } else { throw LOGGER.logExceptionAsError( new ClientAuthenticationException("Failed to invoke Azure CLI ", null)); } } LOGGER.verbose("Azure CLI Authentication => A token response was received from Azure CLI, deserializing the" + " response into an Access Token."); Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("accessToken"); String time = objectMap.get("expiresOn"); String timeToSecond = time.substring(0, time.indexOf(".")); String timeJoinedWithT = String.join("T", timeToSecond.split(" ")); OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME) .atZone(ZoneId.systemDefault()) .toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC); token = new AccessToken(accessToken, expiresOn); } catch (IOException | InterruptedException e) { throw LOGGER.logExceptionAsError(new IllegalStateException(e)); } catch (RuntimeException e) { return Mono.error(e instanceof CredentialUnavailableException ? LoggingUtil.logCredentialUnavailableException(LOGGER, options, (CredentialUnavailableException) e) : LOGGER.logExceptionAsError(e)); } return Mono.just(token); } /** * Asynchronously acquire a token from Active Directory with Azure Power Shell. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithAzurePowerShell(TokenRequestContext request) { List<CredentialUnavailableException> exceptions = new ArrayList<>(2); PowershellManager defaultPowerShellManager = new PowershellManager(Platform.isWindows() ? DEFAULT_WINDOWS_PS_EXECUTABLE : DEFAULT_LINUX_PS_EXECUTABLE); PowershellManager legacyPowerShellManager = Platform.isWindows() ? new PowershellManager(LEGACY_WINDOWS_PS_EXECUTABLE) : null; List<PowershellManager> powershellManagers = new ArrayList<>(2); powershellManagers.add(defaultPowerShellManager); if (legacyPowerShellManager != null) { powershellManagers.add(legacyPowerShellManager); } return Flux.fromIterable(powershellManagers) .flatMap(powershellManager -> getAccessTokenFromPowerShell(request, powershellManager) .onErrorResume(t -> { if (!t.getClass().getSimpleName().equals("CredentialUnavailableException")) { return Mono.error(new ClientAuthenticationException( "Azure Powershell authentication failed. Error Details: " + t.getMessage() + ". To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: null, t)); } exceptions.add((CredentialUnavailableException) t); return Mono.empty(); }), 1) .next() .switchIfEmpty(Mono.defer(() -> { CredentialUnavailableException last = exceptions.get(exceptions.size() - 1); for (int z = exceptions.size() - 2; z >= 0; z--) { CredentialUnavailableException current = exceptions.get(z); last = new CredentialUnavailableException("Azure PowerShell authentication failed using default" + "powershell(pwsh) with following error: " + current.getMessage() + "\r\n" + "Azure PowerShell authentication failed using powershell-core(powershell)" + " with following error: " + last.getMessage(), last.getCause()); } return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, (last))); })); } /** * Asynchronously acquire a token from Active Directory with Azure PowerShell. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithOBO(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> confidentialClient.acquireToken(OnBehalfOfParameters .builder(new HashSet<>(request.getScopes()), options.getUserAssertion()) .tenant(IdentityUtil.resolveTenantId(tenantId, request, options)) .build())) .map(MsalToken::new)); } private Mono<AccessToken> getAccessTokenFromPowerShell(TokenRequestContext request, PowershellManager powershellManager) { return powershellManager.initSession() .flatMap(manager -> { String azAccountsCommand = "Import-Module Az.Accounts -MinimumVersion 2.2.0 -PassThru"; return manager.runCommand(azAccountsCommand) .flatMap(output -> { if (output.contains("The specified module 'Az.Accounts' with version '2.2.0' was not loaded " + "because no valid module file")) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Az.Account module with version >= 2.2.0 is not installed. It needs to be installed to" + " use Azure PowerShell Credential."))); } LOGGER.verbose("Az.accounts module was found installed."); StringBuilder accessTokenCommand = new StringBuilder("Get-AzAccessToken -ResourceUrl "); accessTokenCommand.append(ScopeUtil.scopesToResource(request.getScopes())); accessTokenCommand.append(" | ConvertTo-Json"); String command = accessTokenCommand.toString(); LOGGER.verbose("Azure Powershell Authentication => Executing the command `%s` in Azure " + "Powershell to retrieve the Access Token.", accessTokenCommand); return manager.runCommand(accessTokenCommand.toString()) .flatMap(out -> { if (out.contains("Run Connect-AzAccount to login")) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Run Connect-AzAccount to login to Azure account in PowerShell."))); } try { LOGGER.verbose("Azure Powershell Authentication => Attempting to deserialize the " + "received response from Azure Powershell."); Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(out, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("Token"); String time = objectMap.get("ExpiresOn"); OffsetDateTime expiresOn = OffsetDateTime.parse(time) .withOffsetSameInstant(ZoneOffset.UTC); return Mono.just(new AccessToken(accessToken, expiresOn)); } catch (IOException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Encountered error when deserializing response from Azure Power Shell.", e))); } }); }); }).doFinally(ignored -> powershellManager.close()); } /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithConfidentialClient(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> { ClientCredentialParameters.ClientCredentialParametersBuilder builder = ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); return confidentialClient.acquireToken(builder.build()); } )).map(MsalToken::new); } private HttpPipeline setupPipeline(HttpClient httpClient) { List<HttpPipelinePolicy> policies = new ArrayList<>(); HttpLogOptions httpLogOptions = new HttpLogOptions(); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(new RetryPolicy()); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); return new HttpPipelineBuilder().httpClient(httpClient) .policies(policies.toArray(new HttpPipelinePolicy[0])).build(); } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> { UserNamePasswordParameters.UserNamePasswordParametersBuilder userNamePasswordParametersBuilder = UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray()); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest .formatAsClaimsRequest(request.getClaims()); userNamePasswordParametersBuilder.claims(customClaimRequest); } userNamePasswordParametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); return pc.acquireToken(userNamePasswordParametersBuilder.build()); } )).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with username and " + "password. To mitigate this issue, please refer to the troubleshooting guidelines " + "here at https: null, t)).map(MsalToken::new); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @param account the account used to log in to acquire the last token * @return a Publisher that emits an AccessToken */ @SuppressWarnings("deprecation") public Mono<MsalToken> authenticateWithPublicClientCache(TokenRequestContext request, IAccount account) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); parametersBuilder.forceRefresh(true); } if (account != null) { parametersBuilder = parametersBuilder.account(account); } parametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); try { return pc.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET))) .switchIfEmpty(Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder forceParametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())).forceRefresh(true); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest .formatAsClaimsRequest(request.getClaims()); forceParametersBuilder.claims(customClaimRequest); } if (account != null) { forceParametersBuilder = forceParametersBuilder.account(account); } forceParametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); try { return pc.acquireTokenSilently(forceParametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new))); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ @SuppressWarnings("deprecation") public Mono<AccessToken> authenticateWithConfidentialClientCache(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())) .tenant(IdentityUtil.resolveTenantId(tenantId, request, options)); try { return confidentialClient.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(ar -> (AccessToken) new MsalToken(ar)) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET)))); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return publicClientApplicationAccessor.getValue().flatMap(pc -> Mono.fromFuture(() -> { DeviceCodeFlowParameters.DeviceCodeFlowParametersBuilder parametersBuilder = DeviceCodeFlowParameters.builder( new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept( new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } return pc.acquireToken(parametersBuilder.build()); }).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with device code", null, t)) .map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with Visual Studio cached refresh token. * * @param request the details of the token request * @return a Publisher that emits an AccessToken. */ public Mono<MsalToken> authenticateWithVsCodeCredential(TokenRequestContext request, String cloud) { if (isADFSTenant()) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("VsCodeCredential " + "authentication unavailable. ADFS tenant/authorities are not supported. " + "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } VisualStudioCacheAccessor accessor = new VisualStudioCacheAccessor(); String credential = null; try { credential = accessor.getCredentials("VS Code Azure", cloud); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, e)); } RefreshTokenParameters.RefreshTokenParametersBuilder parametersBuilder = RefreshTokenParameters .builder(new HashSet<>(request.getScopes()), credential); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(parametersBuilder.build())) .onErrorResume(t -> { if (t instanceof MsalInteractionRequiredException) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("Failed to acquire token with" + " VS code credential." + " To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } return Mono.error(new ClientAuthenticationException("Failed to acquire token with" + " VS code credential", null, t)); }) .map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { AuthorizationCodeParameters.AuthorizationCodeParametersBuilder parametersBuilder = AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } Mono<IAuthenticationResult> acquireToken; if (clientSecret != null) { acquireToken = confidentialClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parametersBuilder.build()))); } else { acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parametersBuilder.build()))); } return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with authorization code", null, t)).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @param redirectUrl the redirect URL to listen on and receive security code * @param loginHint the username suggestion to pre-fill the login page's username/email address field * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, Integer port, String redirectUrl, String loginHint) { URI redirectUri; String redirect; if (port != null) { redirect = HTTP_LOCALHOST + ":" + port; } else if (redirectUrl != null) { redirect = redirectUrl; } else { redirect = HTTP_LOCALHOST; } try { redirectUri = new URI(redirect); } catch (URISyntaxException e) { return Mono.error(LOGGER.logExceptionAsError(new RuntimeException(e))); } InteractiveRequestParameters.InteractiveRequestParametersBuilder builder = InteractiveRequestParameters.builder(redirectUri) .scopes(new HashSet<>(request.getScopes())) .prompt(Prompt.SELECT_ACCOUNT) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); builder.claims(customClaimRequest); } if (loginHint != null) { builder.loginHint(loginHint); } Mono<IAuthenticationResult> acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(builder.build()))); return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with Interactive Browser Authentication.", null, t)).map(MsalToken::new); } /** * Gets token from shared token cache * */ public Mono<MsalToken> authenticateWithSharedTokenCache(TokenRequestContext request, String username) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.getAccounts()) .onErrorMap(t -> new CredentialUnavailableException( "Cannot get accounts from token cache. Error: " + t.getMessage(), t)) .flatMap(set -> { IAccount requestedAccount; Map<String, IAccount> accounts = new HashMap<>(); if (set.isEmpty()) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("SharedTokenCacheCredential " + "authentication unavailable. No accounts were found in the cache."))); } for (IAccount cached : set) { if (username == null || username.equals(cached.username())) { if (!accounts.containsKey(cached.homeAccountId())) { accounts.put(cached.homeAccountId(), cached); } } } if (accounts.isEmpty()) { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. No account matching the specified username: %s was " + "found in the cache.", username))); } else if (accounts.size() > 1) { if (username == null) { return Mono.error(new RuntimeException("SharedTokenCacheCredential authentication " + "unavailable. Multiple accounts were found in the cache. Use username and " + "tenant id to disambiguate.")); } else { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. Multiple accounts matching the specified username: " + "%s were found in the cache.", username))); } } else { requestedAccount = accounts.values().iterator().next(); } return authenticateWithPublicClientCache(request, requestedAccount); })); } /** * Asynchronously acquire a token from the Azure Arc Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToArcManagedIdentityEndpoint(String identityEndpoint, TokenRequestContext request) { return Mono.fromCallable(() -> { HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(ScopeUtil.scopesToResource(request.getScopes()), StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode("2019-11-01", StandardCharsets.UTF_8.name())); URL url = new URL(String.format("%s?%s", identityEndpoint, payload)); String secretKey = null; try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); } catch (IOException e) { if (connection == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Failed to initialize " + "Http URL connection to the endpoint.", null, e)); } int status = connection.getResponseCode(); if (status != 401) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException(String.format("Expected a 401" + " Unauthorized response from Azure Arc Managed Identity Endpoint, received: %d", status), null, e)); } String realm = connection.getHeaderField("WWW-Authenticate"); if (realm == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a value" + " for WWW-Authenticate header in the response from Azure Arc Managed Identity Endpoint", null)); } int separatorIndex = realm.indexOf("="); if (separatorIndex == -1) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a correct value" + " for WWW-Authenticate header in the response from Azure Arc Managed Identity Endpoint", null)); } String secretKeyPath = realm.substring(separatorIndex + 1); secretKey = new String(Files.readAllBytes(Paths.get(secretKeyPath)), StandardCharsets.UTF_8); } finally { if (connection != null) { connection.disconnect(); } } if (secretKey == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a secret value" + " in the response from Azure Arc Managed Identity Endpoint", null)); } try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Authorization", String.format("Basic %s", secretKey)); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner scanner = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = scanner.hasNext() ? scanner.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the Azure Arc Managed Service Identity endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithExchangeToken(TokenRequestContext request) { return clientAssertionAccessor.getValue() .flatMap(assertionToken -> Mono.fromCallable(() -> { String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId + "/oauth2/v2.0/token"; StringBuilder urlParametersBuilder = new StringBuilder(); urlParametersBuilder.append("client_assertion="); urlParametersBuilder.append(assertionToken); urlParametersBuilder.append("&client_assertion_type=urn:ietf:params:oauth:client-assertion-type" + ":jwt-bearer"); urlParametersBuilder.append("&client_id="); urlParametersBuilder.append(clientId); urlParametersBuilder.append("&grant_type=client_credentials"); urlParametersBuilder.append("&scope="); urlParametersBuilder.append(URLEncoder.encode(request.getScopes().get(0), StandardCharsets.UTF_8.name())); String urlParams = urlParametersBuilder.toString(); byte[] postData = urlParams.getBytes(StandardCharsets.UTF_8); int postDataLength = postData.length; HttpURLConnection connection = null; URL url = new URL(authorityUrl); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("POST"); connection.setRequestProperty("Content-Type", "application/x-www-form-urlencoded"); connection.setRequestProperty("Content-Length", Integer.toString(postDataLength)); connection.setDoOutput(true); try (DataOutputStream outputStream = new DataOutputStream(connection.getOutputStream())) { outputStream.write(postData); } connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } })); } /** * Asynchronously acquire a token from the Azure Service Fabric Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param identityHeader the identity header to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToServiceFabricManagedIdentityEndpoint(String identityEndpoint, String identityHeader, String thumbprint, TokenRequestContext request) { return Mono.fromCallable(() -> { HttpsURLConnection connection = null; String endpoint = identityEndpoint; String headerValue = identityHeader; String endpointVersion = SERVICE_FABRIC_MANAGED_IDENTITY_API_VERSION; String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode(endpointVersion, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } try { URL url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpsURLConnection) url.openConnection(); IdentitySslUtil.addTrustedCertificateThumbprint(connection, thumbprint, LOGGER); connection.setRequestMethod("GET"); if (headerValue != null) { connection.setRequestProperty("Secret", headerValue); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param identityHeader the identity header to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String identityEndpoint, String identityHeader, TokenRequestContext request) { return Mono.fromCallable(() -> { String endpoint; String headerValue; String endpointVersion; endpoint = identityEndpoint; headerValue = identityHeader; endpointVersion = IDENTITY_ENDPOINT_VERSION; String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode(endpointVersion, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } try { URL url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (headerValue != null) { if (IDENTITY_ENDPOINT_VERSION.equals(endpointVersion)) { connection.setRequestProperty("X-IDENTITY-HEADER", headerValue); } else { connection.setRequestProperty("Secret", headerValue); } } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", StandardCharsets.UTF_8.name())); payload.append("&resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } } catch (IOException exception) { return Mono.error(exception); } String endpoint = TRAILING_FORWARD_SLASHES.matcher(options.getImdsAuthorityHost()).replaceAll("") + IdentityConstants.DEFAULT_IMDS_TOKENPATH; return checkIMDSAvailable(endpoint).flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw LOGGER.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode; try { responseCode = connection.getResponseCode(); } catch (Exception e) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } if (responseCode == 400) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established.", null)); } if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw LOGGER.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw LOGGER.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); } private Mono<Boolean> checkIMDSAvailable(String endpoint) { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", StandardCharsets.UTF_8.name())); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("%s?%s", endpoint, payload)); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } catch (Exception e) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } private String getSafeWorkingDirectory() { if (isWindowsPlatform()) { if (CoreUtils.isNullOrEmpty(DEFAULT_WINDOWS_SYSTEM_ROOT)) { return null; } return DEFAULT_WINDOWS_SYSTEM_ROOT + "\\system32"; } else { return DEFAULT_MAC_LINUX_PATH; } } private boolean isWindowsPlatform() { return System.getProperty("os.name").contains("Windows"); } private String redactInfo(String input) { return ACCESS_TOKEN_PATTERN.matcher(input).replaceAll("****"); } void openUrl(String url) throws IOException { Runtime rt = Runtime.getRuntime(); String os = System.getProperty("os.name").toLowerCase(Locale.ROOT); if (os.contains("win")) { rt.exec("rundll32 url.dll,FileProtocolHandler " + url); } else if (os.contains("mac")) { rt.exec("open " + url); } else if (os.contains("nix") || os.contains("nux")) { rt.exec("xdg-open " + url); } else { LOGGER.error("Browser could not be opened - please open {} in a browser on this device.", url); } } private CompletableFuture<IAuthenticationResult> getFailedCompletableFuture(Exception e) { CompletableFuture<IAuthenticationResult> completableFuture = new CompletableFuture<>(); completableFuture.completeExceptionally(e); return completableFuture; } private void initializeHttpPipelineAdapter() { HttpPipeline httpPipeline = options.getHttpPipeline(); if (httpPipeline != null) { httpPipelineAdapter = new HttpPipelineAdapter(httpPipeline); } else { HttpClient httpClient = options.getHttpClient(); if (httpClient != null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(httpClient)); } else if (options.getProxyOptions() == null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(HttpClient.createDefault())); } } } /** * Get the configured tenant id. * * @return the tenant id. */ public String getTenantId() { return tenantId; } /** * Get the configured client id. * * @return the client id. */ public String getClientId() { return clientId; } /** * Get the configured identity client options. * * @return the client options. */ public IdentityClientOptions getIdentityClientOptions() { return options; } private boolean isADFSTenant() { return this.tenantId.equals(ADFS_TENANT); } private byte[] getCertificateBytes() throws IOException { if (certificatePath != null) { return Files.readAllBytes(Paths.get(certificatePath)); } else if (certificate != null) { ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); byte[] buffer = new byte[1024]; int read = certificate.read(buffer, 0, buffer.length); while (read != -1) { outputStream.write(buffer, 0, read); read = certificate.read(buffer, 0, buffer.length); } return outputStream.toByteArray(); } else { return new byte[0]; } } }
If certificate is null we return null anyways, so just return certificate no matter what
private InputStream getCertificateInputStream() throws IOException { if (certificatePath != null) { return new BufferedInputStream(new FileInputStream(certificatePath)); } else { return certificate; } }
return certificate;
private InputStream getCertificateInputStream() throws IOException { if (certificatePath != null) { return new BufferedInputStream(new FileInputStream(certificatePath)); } else { return certificate; } }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private static final String WINDOWS_STARTER = "cmd.exe"; private static final String LINUX_MAC_STARTER = "/bin/sh"; private static final String WINDOWS_SWITCHER = "/c"; private static final String LINUX_MAC_SWITCHER = "-c"; private static final String WINDOWS_PROCESS_ERROR_MESSAGE = "'az' is not recognized"; private static final Pattern LINUX_MAC_PROCESS_ERROR_MESSAGE = Pattern.compile("(.*)az:(.*)not found"); private static final String DEFAULT_WINDOWS_SYSTEM_ROOT = System.getenv("SystemRoot"); private static final String DEFAULT_WINDOWS_PS_EXECUTABLE = "pwsh.exe"; private static final String LEGACY_WINDOWS_PS_EXECUTABLE = "powershell.exe"; private static final String DEFAULT_LINUX_PS_EXECUTABLE = "pwsh"; private static final String DEFAULT_MAC_LINUX_PATH = "/bin/"; private static final Duration REFRESH_OFFSET = Duration.ofMinutes(5); private static final String IDENTITY_ENDPOINT_VERSION = "2019-08-01"; private static final String MSI_ENDPOINT_VERSION = "2017-09-01"; private static final String ADFS_TENANT = "adfs"; private static final String HTTP_LOCALHOST = "http: private static final String SERVICE_FABRIC_MANAGED_IDENTITY_API_VERSION = "2019-07-01-preview"; private static final ClientLogger LOGGER = new ClientLogger(IdentityClient.class); private static final Pattern ACCESS_TOKEN_PATTERN = Pattern.compile("\"accessToken\": \"(.*?)(\"|$)"); private static final Pattern TRAILING_FORWARD_SLASHES = Pattern.compile("/+$"); private final IdentityClientOptions options; private final String tenantId; private final String clientId; private final String resourceId; private final String clientSecret; private final String clientAssertionFilePath; private final InputStream certificate; private final String certificatePath; private final Supplier<String> clientAssertionSupplier; private final String certificatePassword; private HttpPipelineAdapter httpPipelineAdapter; private final SynchronizedAccessor<PublicClientApplication> publicClientApplicationAccessor; private final SynchronizedAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessor; private final SynchronizedAccessor<String> clientAssertionAccessor; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param clientSecret the client secret of the application. * @param resourceId the resource ID of the application * @param certificatePath the path to the PKCS12 or PEM certificate of the application. * @param certificate the PKCS12 or PEM certificate of the application. * @param certificatePassword the password protecting the PFX certificate. * @param isSharedTokenCacheCredential Indicate whether the credential is * {@link com.azure.identity.SharedTokenCacheCredential} or not. * @param clientAssertionTimeout the timeout to use for the client assertion. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, String clientSecret, String certificatePath, String clientAssertionFilePath, String resourceId, Supplier<String> clientAssertionSupplier, InputStream certificate, String certificatePassword, boolean isSharedTokenCacheCredential, Duration clientAssertionTimeout, IdentityClientOptions options) { if (tenantId == null) { tenantId = "organizations"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.resourceId = resourceId; this.clientSecret = clientSecret; this.clientAssertionFilePath = clientAssertionFilePath; this.certificatePath = certificatePath; this.certificate = certificate; this.certificatePassword = certificatePassword; this.clientAssertionSupplier = clientAssertionSupplier; this.options = options; this.publicClientApplicationAccessor = new SynchronizedAccessor<>(() -> getPublicClientApplication(isSharedTokenCacheCredential)); this.confidentialClientApplicationAccessor = new SynchronizedAccessor<>(() -> getConfidentialClientApplication()); this.clientAssertionAccessor = clientAssertionTimeout == null ? new SynchronizedAccessor<>(() -> parseClientAssertion(), Duration.ofMinutes(5)) : new SynchronizedAccessor<>(() -> parseClientAssertion(), clientAssertionTimeout); } private Mono<ConfidentialClientApplication> getConfidentialClientApplication() { return Mono.defer(() -> { if (clientId == null) { return Mono.error(LOGGER.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication."))); } String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId; IClientCredential credential; if (clientSecret != null) { credential = ClientCredentialFactory.createFromSecret(clientSecret); } else if (certificate != null || certificatePath != null) { try { if (certificatePassword == null) { byte[] pemCertificateBytes = getCertificateBytes(); List<X509Certificate> x509CertificateList = CertificateUtil.publicKeyFromPem(pemCertificateBytes); PrivateKey privateKey = CertificateUtil.privateKeyFromPem(pemCertificateBytes); if (x509CertificateList.size() == 1) { credential = ClientCredentialFactory.createFromCertificate( privateKey, x509CertificateList.get(0)); } else { credential = ClientCredentialFactory.createFromCertificateChain( privateKey, x509CertificateList); } } else { try (InputStream pfxCertificateStream = getCertificateInputStream()) { credential = ClientCredentialFactory.createFromCertificate(pfxCertificateStream, certificatePassword); } } } catch (IOException | GeneralSecurityException e) { return Mono.error(LOGGER.logExceptionAsError(new RuntimeException( "Failed to parse the certificate for the credential: " + e.getMessage(), e))); } } else if (clientAssertionSupplier != null) { credential = ClientCredentialFactory.createFromClientAssertion(clientAssertionSupplier.get()); } else { return Mono.error(LOGGER.logExceptionAsError( new IllegalArgumentException("Must provide client secret or client certificate path." + " To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, credential); try { applicationBuilder = applicationBuilder.authority(authorityUrl) .validateAuthority(options.getAuthorityValidation()); } catch (MalformedURLException e) { return Mono.error(LOGGER.logExceptionAsWarning(new IllegalStateException(e))); } applicationBuilder.sendX5c(options.isIncludeX5c()); initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } TokenCachePersistenceOptions tokenCachePersistenceOptions = options.getTokenCacheOptions(); PersistentTokenCacheImpl tokenCache = null; if (tokenCachePersistenceOptions != null) { try { tokenCache = new PersistentTokenCacheImpl() .setAllowUnencryptedStorage(tokenCachePersistenceOptions.isUnencryptedStorageAllowed()) .setName(tokenCachePersistenceOptions.getName()); applicationBuilder.setTokenCacheAccessAspect(tokenCache); } catch (Throwable t) { return Mono.error(LOGGER.logExceptionAsError(new ClientAuthenticationException( "Shared token cache is unavailable in this environment.", null, t))); } } if (options.getRegionalAuthority() != null) { if (options.getRegionalAuthority() == RegionalAuthority.AUTO_DISCOVER_REGION) { applicationBuilder.autoDetectRegion(true); } else { applicationBuilder.azureRegion(options.getRegionalAuthority().toString()); } } ConfidentialClientApplication confidentialClientApplication = applicationBuilder.build(); return tokenCache != null ? tokenCache.registerCache() .map(ignored -> confidentialClientApplication) : Mono.just(confidentialClientApplication); }); } private Mono<String> parseClientAssertion() { return Mono.fromCallable(() -> { if (clientAssertionFilePath != null) { byte[] encoded = Files.readAllBytes(Paths.get(clientAssertionFilePath)); return new String(encoded, StandardCharsets.UTF_8); } else { throw LOGGER.logExceptionAsError(new IllegalStateException( "Client Assertion File Path is not provided." + " It should be provided to authenticate with client assertion." )); } }); } private Mono<PublicClientApplication> getPublicClientApplication(boolean sharedTokenCacheCredential) { return Mono.defer(() -> { if (clientId == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication.")); } String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl) .validateAuthority(options.getAuthorityValidation()); } catch (MalformedURLException e) { throw LOGGER.logExceptionAsWarning(new IllegalStateException(e)); } initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { publicClientApplicationBuilder.httpClient(httpPipelineAdapter); } else { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { publicClientApplicationBuilder.executorService(options.getExecutorService()); } if (!options.isCp1Disabled()) { Set<String> set = new HashSet<>(1); set.add("CP1"); publicClientApplicationBuilder.clientCapabilities(set); } return Mono.just(publicClientApplicationBuilder); }).flatMap(builder -> { TokenCachePersistenceOptions tokenCachePersistenceOptions = options.getTokenCacheOptions(); PersistentTokenCacheImpl tokenCache = null; if (tokenCachePersistenceOptions != null) { try { tokenCache = new PersistentTokenCacheImpl() .setAllowUnencryptedStorage(tokenCachePersistenceOptions.isUnencryptedStorageAllowed()) .setName(tokenCachePersistenceOptions.getName()); builder.setTokenCacheAccessAspect(tokenCache); } catch (Throwable t) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException( "Shared token cache is unavailable in this environment.", null, t)); } } PublicClientApplication publicClientApplication = builder.build(); return tokenCache != null ? tokenCache.registerCache() .map(ignored -> publicClientApplication) : Mono.just(publicClientApplication); }); } public Mono<MsalToken> authenticateWithIntelliJ(TokenRequestContext request) { try { IntelliJCacheAccessor cacheAccessor = new IntelliJCacheAccessor(options.getIntelliJKeePassDatabasePath()); IntelliJAuthMethodDetails authDetails; try { authDetails = cacheAccessor.getAuthDetailsIfAvailable(); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available.", e))); } if (authDetails == null) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available." + " Please log in with Azure Tools for IntelliJ plugin in the IDE."))); } String authType = authDetails.getAuthMethod(); if ("SP".equalsIgnoreCase(authType)) { Map<String, String> spDetails = cacheAccessor .getIntellijServicePrincipalDetails(authDetails.getCredFilePath()); String authorityUrl = spDetails.get("authURL") + spDetails.get("tenant"); try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(spDetails.get("client"), ClientCredentialFactory.createFromSecret(spDetails.get("key"))) .authority(authorityUrl).validateAuthority(options.getAuthorityValidation()); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } } else if ("DC".equalsIgnoreCase(authType)) { LOGGER.verbose("IntelliJ Authentication => Device Code Authentication scheme detected in Azure Tools" + " for IntelliJ Plugin."); if (isADFSTenant()) { LOGGER.verbose("IntelliJ Authentication => The input tenant is detected to be ADFS and" + " the ADFS tenants are not supported via IntelliJ Authentication currently."); return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJCredential " + "authentication unavailable. ADFS tenant/authorities are not supported."))); } try { JsonNode intelliJCredentials = cacheAccessor.getDeviceCodeCredentials(); String refreshToken = intelliJCredentials.get("refreshToken").textValue(); RefreshTokenParameters.RefreshTokenParametersBuilder refreshTokenParametersBuilder = RefreshTokenParameters.builder(new HashSet<>(request.getScopes()), refreshToken); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); refreshTokenParametersBuilder.claims(customClaimRequest); } return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(refreshTokenParametersBuilder.build())) .map(MsalToken::new)); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, e)); } } else { LOGGER.verbose("IntelliJ Authentication = > Only Service Principal and Device Code Authentication" + " schemes are currently supported via IntelliJ Credential currently. Please ensure you used one" + " of those schemes from Azure Tools for IntelliJ plugin."); return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available." + " Please login with Azure Tools for IntelliJ plugin in the IDE."))); } } catch (IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with Azure CLI. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) { StringBuilder azCommand = new StringBuilder("az account get-access-token --output json --resource "); String scopes = ScopeUtil.scopesToResource(request.getScopes()); try { ScopeUtil.validateScope(scopes); } catch (IllegalArgumentException ex) { return Mono.error(LOGGER.logExceptionAsError(ex)); } azCommand.append(scopes); String tenant = IdentityUtil.resolveTenantId(null, request, options); if (!CoreUtils.isNullOrEmpty(tenant)) { azCommand.append("--tenant ").append(tenant); } AccessToken token; try { String starter; String switcher; if (isWindowsPlatform()) { starter = WINDOWS_STARTER; switcher = WINDOWS_SWITCHER; } else { starter = LINUX_MAC_STARTER; switcher = LINUX_MAC_SWITCHER; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, azCommand.toString()); String workingDirectory = getSafeWorkingDirectory(); if (workingDirectory != null) { builder.directory(new File(workingDirectory)); } else { throw LOGGER.logExceptionAsError(new IllegalStateException("A Safe Working directory could not be" + " found to execute CLI command from. To mitigate this issue, please refer to the troubleshooting " + " guidelines here at https: } builder.redirectErrorStream(true); Process process = builder.start(); StringBuilder output = new StringBuilder(); try (BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream(), StandardCharsets.UTF_8.name()))) { String line; while (true) { line = reader.readLine(); if (line == null) { break; } if (line.startsWith(WINDOWS_PROCESS_ERROR_MESSAGE) || LINUX_MAC_PROCESS_ERROR_MESSAGE.matcher(line).matches()) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "AzureCliCredential authentication unavailable. Azure CLI not installed." + "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } output.append(line); } } String processOutput = output.toString(); process.waitFor(10, TimeUnit.SECONDS); if (process.exitValue() != 0) { if (processOutput.length() > 0) { String redactedOutput = redactInfo(processOutput); if (redactedOutput.contains("az login") || redactedOutput.contains("az account set")) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "AzureCliCredential authentication unavailable." + " Please run 'az login' to set up account. To further mitigate this" + " issue, please refer to the troubleshooting guidelines here at " + "https: } throw LOGGER.logExceptionAsError(new ClientAuthenticationException(redactedOutput, null)); } else { throw LOGGER.logExceptionAsError( new ClientAuthenticationException("Failed to invoke Azure CLI ", null)); } } LOGGER.verbose("Azure CLI Authentication => A token response was received from Azure CLI, deserializing the" + " response into an Access Token."); Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("accessToken"); String time = objectMap.get("expiresOn"); String timeToSecond = time.substring(0, time.indexOf(".")); String timeJoinedWithT = String.join("T", timeToSecond.split(" ")); OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME) .atZone(ZoneId.systemDefault()) .toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC); token = new AccessToken(accessToken, expiresOn); } catch (IOException | InterruptedException e) { throw LOGGER.logExceptionAsError(new IllegalStateException(e)); } catch (RuntimeException e) { return Mono.error(e instanceof CredentialUnavailableException ? LoggingUtil.logCredentialUnavailableException(LOGGER, options, (CredentialUnavailableException) e) : LOGGER.logExceptionAsError(e)); } return Mono.just(token); } /** * Asynchronously acquire a token from Active Directory with Azure Power Shell. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithAzurePowerShell(TokenRequestContext request) { List<CredentialUnavailableException> exceptions = new ArrayList<>(2); PowershellManager defaultPowerShellManager = new PowershellManager(Platform.isWindows() ? DEFAULT_WINDOWS_PS_EXECUTABLE : DEFAULT_LINUX_PS_EXECUTABLE); PowershellManager legacyPowerShellManager = Platform.isWindows() ? new PowershellManager(LEGACY_WINDOWS_PS_EXECUTABLE) : null; List<PowershellManager> powershellManagers = new ArrayList<>(2); powershellManagers.add(defaultPowerShellManager); if (legacyPowerShellManager != null) { powershellManagers.add(legacyPowerShellManager); } return Flux.fromIterable(powershellManagers) .flatMap(powershellManager -> getAccessTokenFromPowerShell(request, powershellManager) .onErrorResume(t -> { if (!t.getClass().getSimpleName().equals("CredentialUnavailableException")) { return Mono.error(new ClientAuthenticationException( "Azure Powershell authentication failed. Error Details: " + t.getMessage() + ". To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: null, t)); } exceptions.add((CredentialUnavailableException) t); return Mono.empty(); }), 1) .next() .switchIfEmpty(Mono.defer(() -> { CredentialUnavailableException last = exceptions.get(exceptions.size() - 1); for (int z = exceptions.size() - 2; z >= 0; z--) { CredentialUnavailableException current = exceptions.get(z); last = new CredentialUnavailableException("Azure PowerShell authentication failed using default" + "powershell(pwsh) with following error: " + current.getMessage() + "\r\n" + "Azure PowerShell authentication failed using powershell-core(powershell)" + " with following error: " + last.getMessage(), last.getCause()); } return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, (last))); })); } /** * Asynchronously acquire a token from Active Directory with Azure PowerShell. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithOBO(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> confidentialClient.acquireToken(OnBehalfOfParameters .builder(new HashSet<>(request.getScopes()), options.getUserAssertion()) .tenant(IdentityUtil.resolveTenantId(tenantId, request, options)) .build())) .map(MsalToken::new)); } private Mono<AccessToken> getAccessTokenFromPowerShell(TokenRequestContext request, PowershellManager powershellManager) { return powershellManager.initSession() .flatMap(manager -> { String azAccountsCommand = "Import-Module Az.Accounts -MinimumVersion 2.2.0 -PassThru"; return manager.runCommand(azAccountsCommand) .flatMap(output -> { if (output.contains("The specified module 'Az.Accounts' with version '2.2.0' was not loaded " + "because no valid module file")) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Az.Account module with version >= 2.2.0 is not installed. It needs to be installed to" + " use Azure PowerShell Credential."))); } LOGGER.verbose("Az.accounts module was found installed."); StringBuilder accessTokenCommand = new StringBuilder("Get-AzAccessToken -ResourceUrl "); accessTokenCommand.append(ScopeUtil.scopesToResource(request.getScopes())); accessTokenCommand.append(" | ConvertTo-Json"); String command = accessTokenCommand.toString(); LOGGER.verbose("Azure Powershell Authentication => Executing the command `%s` in Azure " + "Powershell to retrieve the Access Token.", accessTokenCommand); return manager.runCommand(accessTokenCommand.toString()) .flatMap(out -> { if (out.contains("Run Connect-AzAccount to login")) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Run Connect-AzAccount to login to Azure account in PowerShell."))); } try { LOGGER.verbose("Azure Powershell Authentication => Attempting to deserialize the " + "received response from Azure Powershell."); Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(out, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("Token"); String time = objectMap.get("ExpiresOn"); OffsetDateTime expiresOn = OffsetDateTime.parse(time) .withOffsetSameInstant(ZoneOffset.UTC); return Mono.just(new AccessToken(accessToken, expiresOn)); } catch (IOException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Encountered error when deserializing response from Azure Power Shell.", e))); } }); }); }).doFinally(ignored -> powershellManager.close()); } /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithConfidentialClient(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> { ClientCredentialParameters.ClientCredentialParametersBuilder builder = ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); return confidentialClient.acquireToken(builder.build()); } )).map(MsalToken::new); } private HttpPipeline setupPipeline(HttpClient httpClient) { List<HttpPipelinePolicy> policies = new ArrayList<>(); HttpLogOptions httpLogOptions = new HttpLogOptions(); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(new RetryPolicy()); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); return new HttpPipelineBuilder().httpClient(httpClient) .policies(policies.toArray(new HttpPipelinePolicy[0])).build(); } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> { UserNamePasswordParameters.UserNamePasswordParametersBuilder userNamePasswordParametersBuilder = UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray()); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest .formatAsClaimsRequest(request.getClaims()); userNamePasswordParametersBuilder.claims(customClaimRequest); } userNamePasswordParametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); return pc.acquireToken(userNamePasswordParametersBuilder.build()); } )).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with username and " + "password. To mitigate this issue, please refer to the troubleshooting guidelines " + "here at https: null, t)).map(MsalToken::new); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @param account the account used to log in to acquire the last token * @return a Publisher that emits an AccessToken */ @SuppressWarnings("deprecation") public Mono<MsalToken> authenticateWithPublicClientCache(TokenRequestContext request, IAccount account) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); parametersBuilder.forceRefresh(true); } if (account != null) { parametersBuilder = parametersBuilder.account(account); } parametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); try { return pc.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET))) .switchIfEmpty(Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder forceParametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())).forceRefresh(true); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest .formatAsClaimsRequest(request.getClaims()); forceParametersBuilder.claims(customClaimRequest); } if (account != null) { forceParametersBuilder = forceParametersBuilder.account(account); } forceParametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); try { return pc.acquireTokenSilently(forceParametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new))); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ @SuppressWarnings("deprecation") public Mono<AccessToken> authenticateWithConfidentialClientCache(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())) .tenant(IdentityUtil.resolveTenantId(tenantId, request, options)); try { return confidentialClient.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(ar -> (AccessToken) new MsalToken(ar)) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET)))); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return publicClientApplicationAccessor.getValue().flatMap(pc -> Mono.fromFuture(() -> { DeviceCodeFlowParameters.DeviceCodeFlowParametersBuilder parametersBuilder = DeviceCodeFlowParameters.builder( new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept( new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } return pc.acquireToken(parametersBuilder.build()); }).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with device code", null, t)) .map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with Visual Studio cached refresh token. * * @param request the details of the token request * @return a Publisher that emits an AccessToken. */ public Mono<MsalToken> authenticateWithVsCodeCredential(TokenRequestContext request, String cloud) { if (isADFSTenant()) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("VsCodeCredential " + "authentication unavailable. ADFS tenant/authorities are not supported. " + "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } VisualStudioCacheAccessor accessor = new VisualStudioCacheAccessor(); String credential = null; try { credential = accessor.getCredentials("VS Code Azure", cloud); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, e)); } RefreshTokenParameters.RefreshTokenParametersBuilder parametersBuilder = RefreshTokenParameters .builder(new HashSet<>(request.getScopes()), credential); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(parametersBuilder.build())) .onErrorResume(t -> { if (t instanceof MsalInteractionRequiredException) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("Failed to acquire token with" + " VS code credential." + " To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } return Mono.error(new ClientAuthenticationException("Failed to acquire token with" + " VS code credential", null, t)); }) .map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { AuthorizationCodeParameters.AuthorizationCodeParametersBuilder parametersBuilder = AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } Mono<IAuthenticationResult> acquireToken; if (clientSecret != null) { acquireToken = confidentialClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parametersBuilder.build()))); } else { acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parametersBuilder.build()))); } return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with authorization code", null, t)).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @param redirectUrl the redirect URL to listen on and receive security code * @param loginHint the username suggestion to pre-fill the login page's username/email address field * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, Integer port, String redirectUrl, String loginHint) { URI redirectUri; String redirect; if (port != null) { redirect = HTTP_LOCALHOST + ":" + port; } else if (redirectUrl != null) { redirect = redirectUrl; } else { redirect = HTTP_LOCALHOST; } try { redirectUri = new URI(redirect); } catch (URISyntaxException e) { return Mono.error(LOGGER.logExceptionAsError(new RuntimeException(e))); } InteractiveRequestParameters.InteractiveRequestParametersBuilder builder = InteractiveRequestParameters.builder(redirectUri) .scopes(new HashSet<>(request.getScopes())) .prompt(Prompt.SELECT_ACCOUNT) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); builder.claims(customClaimRequest); } if (loginHint != null) { builder.loginHint(loginHint); } Mono<IAuthenticationResult> acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(builder.build()))); return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with Interactive Browser Authentication.", null, t)).map(MsalToken::new); } /** * Gets token from shared token cache * */ public Mono<MsalToken> authenticateWithSharedTokenCache(TokenRequestContext request, String username) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.getAccounts()) .onErrorMap(t -> new CredentialUnavailableException( "Cannot get accounts from token cache. Error: " + t.getMessage(), t)) .flatMap(set -> { IAccount requestedAccount; Map<String, IAccount> accounts = new HashMap<>(); if (set.isEmpty()) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("SharedTokenCacheCredential " + "authentication unavailable. No accounts were found in the cache."))); } for (IAccount cached : set) { if (username == null || username.equals(cached.username())) { if (!accounts.containsKey(cached.homeAccountId())) { accounts.put(cached.homeAccountId(), cached); } } } if (accounts.isEmpty()) { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. No account matching the specified username: %s was " + "found in the cache.", username))); } else if (accounts.size() > 1) { if (username == null) { return Mono.error(new RuntimeException("SharedTokenCacheCredential authentication " + "unavailable. Multiple accounts were found in the cache. Use username and " + "tenant id to disambiguate.")); } else { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. Multiple accounts matching the specified username: " + "%s were found in the cache.", username))); } } else { requestedAccount = accounts.values().iterator().next(); } return authenticateWithPublicClientCache(request, requestedAccount); })); } /** * Asynchronously acquire a token from the Azure Arc Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToArcManagedIdentityEndpoint(String identityEndpoint, TokenRequestContext request) { return Mono.fromCallable(() -> { HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(ScopeUtil.scopesToResource(request.getScopes()), StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode("2019-11-01", StandardCharsets.UTF_8.name())); URL url = new URL(String.format("%s?%s", identityEndpoint, payload)); String secretKey = null; try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); } catch (IOException e) { if (connection == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Failed to initialize " + "Http URL connection to the endpoint.", null, e)); } int status = connection.getResponseCode(); if (status != 401) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException(String.format("Expected a 401" + " Unauthorized response from Azure Arc Managed Identity Endpoint, received: %d", status), null, e)); } String realm = connection.getHeaderField("WWW-Authenticate"); if (realm == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a value" + " for WWW-Authenticate header in the response from Azure Arc Managed Identity Endpoint", null)); } int separatorIndex = realm.indexOf("="); if (separatorIndex == -1) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a correct value" + " for WWW-Authenticate header in the response from Azure Arc Managed Identity Endpoint", null)); } String secretKeyPath = realm.substring(separatorIndex + 1); secretKey = new String(Files.readAllBytes(Paths.get(secretKeyPath)), StandardCharsets.UTF_8); } finally { if (connection != null) { connection.disconnect(); } } if (secretKey == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a secret value" + " in the response from Azure Arc Managed Identity Endpoint", null)); } try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Authorization", String.format("Basic %s", secretKey)); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner scanner = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = scanner.hasNext() ? scanner.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the Azure Arc Managed Service Identity endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithExchangeToken(TokenRequestContext request) { return clientAssertionAccessor.getValue() .flatMap(assertionToken -> Mono.fromCallable(() -> { String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId + "/oauth2/v2.0/token"; StringBuilder urlParametersBuilder = new StringBuilder(); urlParametersBuilder.append("client_assertion="); urlParametersBuilder.append(assertionToken); urlParametersBuilder.append("&client_assertion_type=urn:ietf:params:oauth:client-assertion-type" + ":jwt-bearer"); urlParametersBuilder.append("&client_id="); urlParametersBuilder.append(clientId); urlParametersBuilder.append("&grant_type=client_credentials"); urlParametersBuilder.append("&scope="); urlParametersBuilder.append(URLEncoder.encode(request.getScopes().get(0), StandardCharsets.UTF_8.name())); String urlParams = urlParametersBuilder.toString(); byte[] postData = urlParams.getBytes(StandardCharsets.UTF_8); int postDataLength = postData.length; HttpURLConnection connection = null; URL url = new URL(authorityUrl); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("POST"); connection.setRequestProperty("Content-Type", "application/x-www-form-urlencoded"); connection.setRequestProperty("Content-Length", Integer.toString(postDataLength)); connection.setDoOutput(true); try (DataOutputStream outputStream = new DataOutputStream(connection.getOutputStream())) { outputStream.write(postData); } connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } })); } /** * Asynchronously acquire a token from the Azure Service Fabric Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param identityHeader the identity header to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToServiceFabricManagedIdentityEndpoint(String identityEndpoint, String identityHeader, String thumbprint, TokenRequestContext request) { return Mono.fromCallable(() -> { HttpsURLConnection connection = null; String endpoint = identityEndpoint; String headerValue = identityHeader; String endpointVersion = SERVICE_FABRIC_MANAGED_IDENTITY_API_VERSION; String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode(endpointVersion, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } try { URL url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpsURLConnection) url.openConnection(); IdentitySslUtil.addTrustedCertificateThumbprint(connection, thumbprint, LOGGER); connection.setRequestMethod("GET"); if (headerValue != null) { connection.setRequestProperty("Secret", headerValue); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param identityHeader the identity header to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String identityEndpoint, String identityHeader, TokenRequestContext request) { return Mono.fromCallable(() -> { String endpoint; String headerValue; String endpointVersion; endpoint = identityEndpoint; headerValue = identityHeader; endpointVersion = IDENTITY_ENDPOINT_VERSION; String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode(endpointVersion, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } try { URL url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (headerValue != null) { if (IDENTITY_ENDPOINT_VERSION.equals(endpointVersion)) { connection.setRequestProperty("X-IDENTITY-HEADER", headerValue); } else { connection.setRequestProperty("Secret", headerValue); } } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", StandardCharsets.UTF_8.name())); payload.append("&resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } } catch (IOException exception) { return Mono.error(exception); } String endpoint = TRAILING_FORWARD_SLASHES.matcher(options.getImdsAuthorityHost()).replaceAll("") + IdentityConstants.DEFAULT_IMDS_TOKENPATH; return checkIMDSAvailable(endpoint).flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw LOGGER.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode; try { responseCode = connection.getResponseCode(); } catch (Exception e) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } if (responseCode == 400) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established.", null)); } if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw LOGGER.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw LOGGER.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); } private Mono<Boolean> checkIMDSAvailable(String endpoint) { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", StandardCharsets.UTF_8.name())); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("%s?%s", endpoint, payload)); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } catch (Exception e) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } private String getSafeWorkingDirectory() { if (isWindowsPlatform()) { if (CoreUtils.isNullOrEmpty(DEFAULT_WINDOWS_SYSTEM_ROOT)) { return null; } return DEFAULT_WINDOWS_SYSTEM_ROOT + "\\system32"; } else { return DEFAULT_MAC_LINUX_PATH; } } private boolean isWindowsPlatform() { return System.getProperty("os.name").contains("Windows"); } private String redactInfo(String input) { return ACCESS_TOKEN_PATTERN.matcher(input).replaceAll("****"); } void openUrl(String url) throws IOException { Runtime rt = Runtime.getRuntime(); String os = System.getProperty("os.name").toLowerCase(Locale.ROOT); if (os.contains("win")) { rt.exec("rundll32 url.dll,FileProtocolHandler " + url); } else if (os.contains("mac")) { rt.exec("open " + url); } else if (os.contains("nix") || os.contains("nux")) { rt.exec("xdg-open " + url); } else { LOGGER.error("Browser could not be opened - please open {} in a browser on this device.", url); } } private CompletableFuture<IAuthenticationResult> getFailedCompletableFuture(Exception e) { CompletableFuture<IAuthenticationResult> completableFuture = new CompletableFuture<>(); completableFuture.completeExceptionally(e); return completableFuture; } private void initializeHttpPipelineAdapter() { HttpPipeline httpPipeline = options.getHttpPipeline(); if (httpPipeline != null) { httpPipelineAdapter = new HttpPipelineAdapter(httpPipeline); } else { HttpClient httpClient = options.getHttpClient(); if (httpClient != null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(httpClient)); } else if (options.getProxyOptions() == null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(HttpClient.createDefault())); } } } /** * Get the configured tenant id. * * @return the tenant id. */ public String getTenantId() { return tenantId; } /** * Get the configured client id. * * @return the client id. */ public String getClientId() { return clientId; } /** * Get the configured identity client options. * * @return the client options. */ public IdentityClientOptions getIdentityClientOptions() { return options; } private boolean isADFSTenant() { return this.tenantId.equals(ADFS_TENANT); } private byte[] getCertificateBytes() throws IOException { if (certificatePath != null) { return Files.readAllBytes(Paths.get(certificatePath)); } else if (certificate != null) { ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); byte[] buffer = new byte[1024]; int read = certificate.read(buffer, 0, buffer.length); while (read != -1) { outputStream.write(buffer, 0, read); read = certificate.read(buffer, 0, buffer.length); } return outputStream.toByteArray(); } else { return new byte[0]; } } }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private static final String WINDOWS_STARTER = "cmd.exe"; private static final String LINUX_MAC_STARTER = "/bin/sh"; private static final String WINDOWS_SWITCHER = "/c"; private static final String LINUX_MAC_SWITCHER = "-c"; private static final String WINDOWS_PROCESS_ERROR_MESSAGE = "'az' is not recognized"; private static final Pattern LINUX_MAC_PROCESS_ERROR_MESSAGE = Pattern.compile("(.*)az:(.*)not found"); private static final String DEFAULT_WINDOWS_SYSTEM_ROOT = System.getenv("SystemRoot"); private static final String DEFAULT_WINDOWS_PS_EXECUTABLE = "pwsh.exe"; private static final String LEGACY_WINDOWS_PS_EXECUTABLE = "powershell.exe"; private static final String DEFAULT_LINUX_PS_EXECUTABLE = "pwsh"; private static final String DEFAULT_MAC_LINUX_PATH = "/bin/"; private static final Duration REFRESH_OFFSET = Duration.ofMinutes(5); private static final String IDENTITY_ENDPOINT_VERSION = "2019-08-01"; private static final String MSI_ENDPOINT_VERSION = "2017-09-01"; private static final String ADFS_TENANT = "adfs"; private static final String HTTP_LOCALHOST = "http: private static final String SERVICE_FABRIC_MANAGED_IDENTITY_API_VERSION = "2019-07-01-preview"; private static final ClientLogger LOGGER = new ClientLogger(IdentityClient.class); private static final Pattern ACCESS_TOKEN_PATTERN = Pattern.compile("\"accessToken\": \"(.*?)(\"|$)"); private static final Pattern TRAILING_FORWARD_SLASHES = Pattern.compile("/+$"); private final IdentityClientOptions options; private final String tenantId; private final String clientId; private final String resourceId; private final String clientSecret; private final String clientAssertionFilePath; private final InputStream certificate; private final String certificatePath; private final Supplier<String> clientAssertionSupplier; private final String certificatePassword; private HttpPipelineAdapter httpPipelineAdapter; private final SynchronizedAccessor<PublicClientApplication> publicClientApplicationAccessor; private final SynchronizedAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessor; private final SynchronizedAccessor<String> clientAssertionAccessor; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param clientSecret the client secret of the application. * @param resourceId the resource ID of the application * @param certificatePath the path to the PKCS12 or PEM certificate of the application. * @param certificate the PKCS12 or PEM certificate of the application. * @param certificatePassword the password protecting the PFX certificate. * @param isSharedTokenCacheCredential Indicate whether the credential is * {@link com.azure.identity.SharedTokenCacheCredential} or not. * @param clientAssertionTimeout the timeout to use for the client assertion. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, String clientSecret, String certificatePath, String clientAssertionFilePath, String resourceId, Supplier<String> clientAssertionSupplier, InputStream certificate, String certificatePassword, boolean isSharedTokenCacheCredential, Duration clientAssertionTimeout, IdentityClientOptions options) { if (tenantId == null) { tenantId = "organizations"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.resourceId = resourceId; this.clientSecret = clientSecret; this.clientAssertionFilePath = clientAssertionFilePath; this.certificatePath = certificatePath; this.certificate = certificate; this.certificatePassword = certificatePassword; this.clientAssertionSupplier = clientAssertionSupplier; this.options = options; this.publicClientApplicationAccessor = new SynchronizedAccessor<>(() -> getPublicClientApplication(isSharedTokenCacheCredential)); this.confidentialClientApplicationAccessor = new SynchronizedAccessor<>(() -> getConfidentialClientApplication()); this.clientAssertionAccessor = clientAssertionTimeout == null ? new SynchronizedAccessor<>(() -> parseClientAssertion(), Duration.ofMinutes(5)) : new SynchronizedAccessor<>(() -> parseClientAssertion(), clientAssertionTimeout); } private Mono<ConfidentialClientApplication> getConfidentialClientApplication() { return Mono.defer(() -> { if (clientId == null) { return Mono.error(LOGGER.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication."))); } String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId; IClientCredential credential; if (clientSecret != null) { credential = ClientCredentialFactory.createFromSecret(clientSecret); } else if (certificate != null || certificatePath != null) { try { if (certificatePassword == null) { byte[] pemCertificateBytes = getCertificateBytes(); List<X509Certificate> x509CertificateList = CertificateUtil.publicKeyFromPem(pemCertificateBytes); PrivateKey privateKey = CertificateUtil.privateKeyFromPem(pemCertificateBytes); if (x509CertificateList.size() == 1) { credential = ClientCredentialFactory.createFromCertificate( privateKey, x509CertificateList.get(0)); } else { credential = ClientCredentialFactory.createFromCertificateChain( privateKey, x509CertificateList); } } else { try (InputStream pfxCertificateStream = getCertificateInputStream()) { credential = ClientCredentialFactory.createFromCertificate(pfxCertificateStream, certificatePassword); } } } catch (IOException | GeneralSecurityException e) { return Mono.error(LOGGER.logExceptionAsError(new RuntimeException( "Failed to parse the certificate for the credential: " + e.getMessage(), e))); } } else if (clientAssertionSupplier != null) { credential = ClientCredentialFactory.createFromClientAssertion(clientAssertionSupplier.get()); } else { return Mono.error(LOGGER.logExceptionAsError( new IllegalArgumentException("Must provide client secret or client certificate path." + " To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, credential); try { applicationBuilder = applicationBuilder.authority(authorityUrl) .validateAuthority(options.getAuthorityValidation()); } catch (MalformedURLException e) { return Mono.error(LOGGER.logExceptionAsWarning(new IllegalStateException(e))); } applicationBuilder.sendX5c(options.isIncludeX5c()); initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } TokenCachePersistenceOptions tokenCachePersistenceOptions = options.getTokenCacheOptions(); PersistentTokenCacheImpl tokenCache = null; if (tokenCachePersistenceOptions != null) { try { tokenCache = new PersistentTokenCacheImpl() .setAllowUnencryptedStorage(tokenCachePersistenceOptions.isUnencryptedStorageAllowed()) .setName(tokenCachePersistenceOptions.getName()); applicationBuilder.setTokenCacheAccessAspect(tokenCache); } catch (Throwable t) { return Mono.error(LOGGER.logExceptionAsError(new ClientAuthenticationException( "Shared token cache is unavailable in this environment.", null, t))); } } if (options.getRegionalAuthority() != null) { if (options.getRegionalAuthority() == RegionalAuthority.AUTO_DISCOVER_REGION) { applicationBuilder.autoDetectRegion(true); } else { applicationBuilder.azureRegion(options.getRegionalAuthority().toString()); } } ConfidentialClientApplication confidentialClientApplication = applicationBuilder.build(); return tokenCache != null ? tokenCache.registerCache() .map(ignored -> confidentialClientApplication) : Mono.just(confidentialClientApplication); }); } private Mono<String> parseClientAssertion() { return Mono.fromCallable(() -> { if (clientAssertionFilePath != null) { byte[] encoded = Files.readAllBytes(Paths.get(clientAssertionFilePath)); return new String(encoded, StandardCharsets.UTF_8); } else { throw LOGGER.logExceptionAsError(new IllegalStateException( "Client Assertion File Path is not provided." + " It should be provided to authenticate with client assertion." )); } }); } private Mono<PublicClientApplication> getPublicClientApplication(boolean sharedTokenCacheCredential) { return Mono.defer(() -> { if (clientId == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication.")); } String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl) .validateAuthority(options.getAuthorityValidation()); } catch (MalformedURLException e) { throw LOGGER.logExceptionAsWarning(new IllegalStateException(e)); } initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { publicClientApplicationBuilder.httpClient(httpPipelineAdapter); } else { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { publicClientApplicationBuilder.executorService(options.getExecutorService()); } if (!options.isCp1Disabled()) { Set<String> set = new HashSet<>(1); set.add("CP1"); publicClientApplicationBuilder.clientCapabilities(set); } return Mono.just(publicClientApplicationBuilder); }).flatMap(builder -> { TokenCachePersistenceOptions tokenCachePersistenceOptions = options.getTokenCacheOptions(); PersistentTokenCacheImpl tokenCache = null; if (tokenCachePersistenceOptions != null) { try { tokenCache = new PersistentTokenCacheImpl() .setAllowUnencryptedStorage(tokenCachePersistenceOptions.isUnencryptedStorageAllowed()) .setName(tokenCachePersistenceOptions.getName()); builder.setTokenCacheAccessAspect(tokenCache); } catch (Throwable t) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException( "Shared token cache is unavailable in this environment.", null, t)); } } PublicClientApplication publicClientApplication = builder.build(); return tokenCache != null ? tokenCache.registerCache() .map(ignored -> publicClientApplication) : Mono.just(publicClientApplication); }); } public Mono<MsalToken> authenticateWithIntelliJ(TokenRequestContext request) { try { IntelliJCacheAccessor cacheAccessor = new IntelliJCacheAccessor(options.getIntelliJKeePassDatabasePath()); IntelliJAuthMethodDetails authDetails; try { authDetails = cacheAccessor.getAuthDetailsIfAvailable(); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available.", e))); } if (authDetails == null) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available." + " Please log in with Azure Tools for IntelliJ plugin in the IDE."))); } String authType = authDetails.getAuthMethod(); if ("SP".equalsIgnoreCase(authType)) { Map<String, String> spDetails = cacheAccessor .getIntellijServicePrincipalDetails(authDetails.getCredFilePath()); String authorityUrl = spDetails.get("authURL") + spDetails.get("tenant"); try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(spDetails.get("client"), ClientCredentialFactory.createFromSecret(spDetails.get("key"))) .authority(authorityUrl).validateAuthority(options.getAuthorityValidation()); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } } else if ("DC".equalsIgnoreCase(authType)) { LOGGER.verbose("IntelliJ Authentication => Device Code Authentication scheme detected in Azure Tools" + " for IntelliJ Plugin."); if (isADFSTenant()) { LOGGER.verbose("IntelliJ Authentication => The input tenant is detected to be ADFS and" + " the ADFS tenants are not supported via IntelliJ Authentication currently."); return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJCredential " + "authentication unavailable. ADFS tenant/authorities are not supported."))); } try { JsonNode intelliJCredentials = cacheAccessor.getDeviceCodeCredentials(); String refreshToken = intelliJCredentials.get("refreshToken").textValue(); RefreshTokenParameters.RefreshTokenParametersBuilder refreshTokenParametersBuilder = RefreshTokenParameters.builder(new HashSet<>(request.getScopes()), refreshToken); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); refreshTokenParametersBuilder.claims(customClaimRequest); } return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(refreshTokenParametersBuilder.build())) .map(MsalToken::new)); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, e)); } } else { LOGGER.verbose("IntelliJ Authentication = > Only Service Principal and Device Code Authentication" + " schemes are currently supported via IntelliJ Credential currently. Please ensure you used one" + " of those schemes from Azure Tools for IntelliJ plugin."); return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available." + " Please login with Azure Tools for IntelliJ plugin in the IDE."))); } } catch (IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with Azure CLI. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) { StringBuilder azCommand = new StringBuilder("az account get-access-token --output json --resource "); String scopes = ScopeUtil.scopesToResource(request.getScopes()); try { ScopeUtil.validateScope(scopes); } catch (IllegalArgumentException ex) { return Mono.error(LOGGER.logExceptionAsError(ex)); } azCommand.append(scopes); String tenant = IdentityUtil.resolveTenantId(null, request, options); if (!CoreUtils.isNullOrEmpty(tenant)) { azCommand.append("--tenant ").append(tenant); } AccessToken token; try { String starter; String switcher; if (isWindowsPlatform()) { starter = WINDOWS_STARTER; switcher = WINDOWS_SWITCHER; } else { starter = LINUX_MAC_STARTER; switcher = LINUX_MAC_SWITCHER; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, azCommand.toString()); String workingDirectory = getSafeWorkingDirectory(); if (workingDirectory != null) { builder.directory(new File(workingDirectory)); } else { throw LOGGER.logExceptionAsError(new IllegalStateException("A Safe Working directory could not be" + " found to execute CLI command from. To mitigate this issue, please refer to the troubleshooting " + " guidelines here at https: } builder.redirectErrorStream(true); Process process = builder.start(); StringBuilder output = new StringBuilder(); try (BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream(), StandardCharsets.UTF_8.name()))) { String line; while (true) { line = reader.readLine(); if (line == null) { break; } if (line.startsWith(WINDOWS_PROCESS_ERROR_MESSAGE) || LINUX_MAC_PROCESS_ERROR_MESSAGE.matcher(line).matches()) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "AzureCliCredential authentication unavailable. Azure CLI not installed." + "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } output.append(line); } } String processOutput = output.toString(); process.waitFor(10, TimeUnit.SECONDS); if (process.exitValue() != 0) { if (processOutput.length() > 0) { String redactedOutput = redactInfo(processOutput); if (redactedOutput.contains("az login") || redactedOutput.contains("az account set")) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "AzureCliCredential authentication unavailable." + " Please run 'az login' to set up account. To further mitigate this" + " issue, please refer to the troubleshooting guidelines here at " + "https: } throw LOGGER.logExceptionAsError(new ClientAuthenticationException(redactedOutput, null)); } else { throw LOGGER.logExceptionAsError( new ClientAuthenticationException("Failed to invoke Azure CLI ", null)); } } LOGGER.verbose("Azure CLI Authentication => A token response was received from Azure CLI, deserializing the" + " response into an Access Token."); Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("accessToken"); String time = objectMap.get("expiresOn"); String timeToSecond = time.substring(0, time.indexOf(".")); String timeJoinedWithT = String.join("T", timeToSecond.split(" ")); OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME) .atZone(ZoneId.systemDefault()) .toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC); token = new AccessToken(accessToken, expiresOn); } catch (IOException | InterruptedException e) { throw LOGGER.logExceptionAsError(new IllegalStateException(e)); } catch (RuntimeException e) { return Mono.error(e instanceof CredentialUnavailableException ? LoggingUtil.logCredentialUnavailableException(LOGGER, options, (CredentialUnavailableException) e) : LOGGER.logExceptionAsError(e)); } return Mono.just(token); } /** * Asynchronously acquire a token from Active Directory with Azure Power Shell. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithAzurePowerShell(TokenRequestContext request) { List<CredentialUnavailableException> exceptions = new ArrayList<>(2); PowershellManager defaultPowerShellManager = new PowershellManager(Platform.isWindows() ? DEFAULT_WINDOWS_PS_EXECUTABLE : DEFAULT_LINUX_PS_EXECUTABLE); PowershellManager legacyPowerShellManager = Platform.isWindows() ? new PowershellManager(LEGACY_WINDOWS_PS_EXECUTABLE) : null; List<PowershellManager> powershellManagers = new ArrayList<>(2); powershellManagers.add(defaultPowerShellManager); if (legacyPowerShellManager != null) { powershellManagers.add(legacyPowerShellManager); } return Flux.fromIterable(powershellManagers) .flatMap(powershellManager -> getAccessTokenFromPowerShell(request, powershellManager) .onErrorResume(t -> { if (!t.getClass().getSimpleName().equals("CredentialUnavailableException")) { return Mono.error(new ClientAuthenticationException( "Azure Powershell authentication failed. Error Details: " + t.getMessage() + ". To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: null, t)); } exceptions.add((CredentialUnavailableException) t); return Mono.empty(); }), 1) .next() .switchIfEmpty(Mono.defer(() -> { CredentialUnavailableException last = exceptions.get(exceptions.size() - 1); for (int z = exceptions.size() - 2; z >= 0; z--) { CredentialUnavailableException current = exceptions.get(z); last = new CredentialUnavailableException("Azure PowerShell authentication failed using default" + "powershell(pwsh) with following error: " + current.getMessage() + "\r\n" + "Azure PowerShell authentication failed using powershell-core(powershell)" + " with following error: " + last.getMessage(), last.getCause()); } return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, (last))); })); } /** * Asynchronously acquire a token from Active Directory with Azure PowerShell. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithOBO(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> confidentialClient.acquireToken(OnBehalfOfParameters .builder(new HashSet<>(request.getScopes()), options.getUserAssertion()) .tenant(IdentityUtil.resolveTenantId(tenantId, request, options)) .build())) .map(MsalToken::new)); } private Mono<AccessToken> getAccessTokenFromPowerShell(TokenRequestContext request, PowershellManager powershellManager) { return powershellManager.initSession() .flatMap(manager -> { String azAccountsCommand = "Import-Module Az.Accounts -MinimumVersion 2.2.0 -PassThru"; return manager.runCommand(azAccountsCommand) .flatMap(output -> { if (output.contains("The specified module 'Az.Accounts' with version '2.2.0' was not loaded " + "because no valid module file")) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Az.Account module with version >= 2.2.0 is not installed. It needs to be installed to" + " use Azure PowerShell Credential."))); } LOGGER.verbose("Az.accounts module was found installed."); StringBuilder accessTokenCommand = new StringBuilder("Get-AzAccessToken -ResourceUrl "); accessTokenCommand.append(ScopeUtil.scopesToResource(request.getScopes())); accessTokenCommand.append(" | ConvertTo-Json"); String command = accessTokenCommand.toString(); LOGGER.verbose("Azure Powershell Authentication => Executing the command `%s` in Azure " + "Powershell to retrieve the Access Token.", accessTokenCommand); return manager.runCommand(accessTokenCommand.toString()) .flatMap(out -> { if (out.contains("Run Connect-AzAccount to login")) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Run Connect-AzAccount to login to Azure account in PowerShell."))); } try { LOGGER.verbose("Azure Powershell Authentication => Attempting to deserialize the " + "received response from Azure Powershell."); Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(out, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("Token"); String time = objectMap.get("ExpiresOn"); OffsetDateTime expiresOn = OffsetDateTime.parse(time) .withOffsetSameInstant(ZoneOffset.UTC); return Mono.just(new AccessToken(accessToken, expiresOn)); } catch (IOException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Encountered error when deserializing response from Azure Power Shell.", e))); } }); }); }).doFinally(ignored -> powershellManager.close()); } /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithConfidentialClient(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> { ClientCredentialParameters.ClientCredentialParametersBuilder builder = ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); return confidentialClient.acquireToken(builder.build()); } )).map(MsalToken::new); } private HttpPipeline setupPipeline(HttpClient httpClient) { List<HttpPipelinePolicy> policies = new ArrayList<>(); HttpLogOptions httpLogOptions = new HttpLogOptions(); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(new RetryPolicy()); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); return new HttpPipelineBuilder().httpClient(httpClient) .policies(policies.toArray(new HttpPipelinePolicy[0])).build(); } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> { UserNamePasswordParameters.UserNamePasswordParametersBuilder userNamePasswordParametersBuilder = UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray()); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest .formatAsClaimsRequest(request.getClaims()); userNamePasswordParametersBuilder.claims(customClaimRequest); } userNamePasswordParametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); return pc.acquireToken(userNamePasswordParametersBuilder.build()); } )).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with username and " + "password. To mitigate this issue, please refer to the troubleshooting guidelines " + "here at https: null, t)).map(MsalToken::new); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @param account the account used to log in to acquire the last token * @return a Publisher that emits an AccessToken */ @SuppressWarnings("deprecation") public Mono<MsalToken> authenticateWithPublicClientCache(TokenRequestContext request, IAccount account) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); parametersBuilder.forceRefresh(true); } if (account != null) { parametersBuilder = parametersBuilder.account(account); } parametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); try { return pc.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET))) .switchIfEmpty(Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder forceParametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())).forceRefresh(true); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest .formatAsClaimsRequest(request.getClaims()); forceParametersBuilder.claims(customClaimRequest); } if (account != null) { forceParametersBuilder = forceParametersBuilder.account(account); } forceParametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); try { return pc.acquireTokenSilently(forceParametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new))); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ @SuppressWarnings("deprecation") public Mono<AccessToken> authenticateWithConfidentialClientCache(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())) .tenant(IdentityUtil.resolveTenantId(tenantId, request, options)); try { return confidentialClient.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(ar -> (AccessToken) new MsalToken(ar)) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET)))); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return publicClientApplicationAccessor.getValue().flatMap(pc -> Mono.fromFuture(() -> { DeviceCodeFlowParameters.DeviceCodeFlowParametersBuilder parametersBuilder = DeviceCodeFlowParameters.builder( new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept( new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } return pc.acquireToken(parametersBuilder.build()); }).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with device code", null, t)) .map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with Visual Studio cached refresh token. * * @param request the details of the token request * @return a Publisher that emits an AccessToken. */ public Mono<MsalToken> authenticateWithVsCodeCredential(TokenRequestContext request, String cloud) { if (isADFSTenant()) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("VsCodeCredential " + "authentication unavailable. ADFS tenant/authorities are not supported. " + "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } VisualStudioCacheAccessor accessor = new VisualStudioCacheAccessor(); String credential = null; try { credential = accessor.getCredentials("VS Code Azure", cloud); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, e)); } RefreshTokenParameters.RefreshTokenParametersBuilder parametersBuilder = RefreshTokenParameters .builder(new HashSet<>(request.getScopes()), credential); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(parametersBuilder.build())) .onErrorResume(t -> { if (t instanceof MsalInteractionRequiredException) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("Failed to acquire token with" + " VS code credential." + " To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } return Mono.error(new ClientAuthenticationException("Failed to acquire token with" + " VS code credential", null, t)); }) .map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { AuthorizationCodeParameters.AuthorizationCodeParametersBuilder parametersBuilder = AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } Mono<IAuthenticationResult> acquireToken; if (clientSecret != null) { acquireToken = confidentialClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parametersBuilder.build()))); } else { acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parametersBuilder.build()))); } return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with authorization code", null, t)).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @param redirectUrl the redirect URL to listen on and receive security code * @param loginHint the username suggestion to pre-fill the login page's username/email address field * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, Integer port, String redirectUrl, String loginHint) { URI redirectUri; String redirect; if (port != null) { redirect = HTTP_LOCALHOST + ":" + port; } else if (redirectUrl != null) { redirect = redirectUrl; } else { redirect = HTTP_LOCALHOST; } try { redirectUri = new URI(redirect); } catch (URISyntaxException e) { return Mono.error(LOGGER.logExceptionAsError(new RuntimeException(e))); } InteractiveRequestParameters.InteractiveRequestParametersBuilder builder = InteractiveRequestParameters.builder(redirectUri) .scopes(new HashSet<>(request.getScopes())) .prompt(Prompt.SELECT_ACCOUNT) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); builder.claims(customClaimRequest); } if (loginHint != null) { builder.loginHint(loginHint); } Mono<IAuthenticationResult> acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(builder.build()))); return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with Interactive Browser Authentication.", null, t)).map(MsalToken::new); } /** * Gets token from shared token cache * */ public Mono<MsalToken> authenticateWithSharedTokenCache(TokenRequestContext request, String username) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.getAccounts()) .onErrorMap(t -> new CredentialUnavailableException( "Cannot get accounts from token cache. Error: " + t.getMessage(), t)) .flatMap(set -> { IAccount requestedAccount; Map<String, IAccount> accounts = new HashMap<>(); if (set.isEmpty()) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("SharedTokenCacheCredential " + "authentication unavailable. No accounts were found in the cache."))); } for (IAccount cached : set) { if (username == null || username.equals(cached.username())) { if (!accounts.containsKey(cached.homeAccountId())) { accounts.put(cached.homeAccountId(), cached); } } } if (accounts.isEmpty()) { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. No account matching the specified username: %s was " + "found in the cache.", username))); } else if (accounts.size() > 1) { if (username == null) { return Mono.error(new RuntimeException("SharedTokenCacheCredential authentication " + "unavailable. Multiple accounts were found in the cache. Use username and " + "tenant id to disambiguate.")); } else { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. Multiple accounts matching the specified username: " + "%s were found in the cache.", username))); } } else { requestedAccount = accounts.values().iterator().next(); } return authenticateWithPublicClientCache(request, requestedAccount); })); } /** * Asynchronously acquire a token from the Azure Arc Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToArcManagedIdentityEndpoint(String identityEndpoint, TokenRequestContext request) { return Mono.fromCallable(() -> { HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(ScopeUtil.scopesToResource(request.getScopes()), StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode("2019-11-01", StandardCharsets.UTF_8.name())); URL url = new URL(String.format("%s?%s", identityEndpoint, payload)); String secretKey = null; try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); } catch (IOException e) { if (connection == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Failed to initialize " + "Http URL connection to the endpoint.", null, e)); } int status = connection.getResponseCode(); if (status != 401) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException(String.format("Expected a 401" + " Unauthorized response from Azure Arc Managed Identity Endpoint, received: %d", status), null, e)); } String realm = connection.getHeaderField("WWW-Authenticate"); if (realm == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a value" + " for WWW-Authenticate header in the response from Azure Arc Managed Identity Endpoint", null)); } int separatorIndex = realm.indexOf("="); if (separatorIndex == -1) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a correct value" + " for WWW-Authenticate header in the response from Azure Arc Managed Identity Endpoint", null)); } String secretKeyPath = realm.substring(separatorIndex + 1); secretKey = new String(Files.readAllBytes(Paths.get(secretKeyPath)), StandardCharsets.UTF_8); } finally { if (connection != null) { connection.disconnect(); } } if (secretKey == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a secret value" + " in the response from Azure Arc Managed Identity Endpoint", null)); } try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Authorization", String.format("Basic %s", secretKey)); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner scanner = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = scanner.hasNext() ? scanner.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the Azure Arc Managed Service Identity endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithExchangeToken(TokenRequestContext request) { return clientAssertionAccessor.getValue() .flatMap(assertionToken -> Mono.fromCallable(() -> { String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId + "/oauth2/v2.0/token"; StringBuilder urlParametersBuilder = new StringBuilder(); urlParametersBuilder.append("client_assertion="); urlParametersBuilder.append(assertionToken); urlParametersBuilder.append("&client_assertion_type=urn:ietf:params:oauth:client-assertion-type" + ":jwt-bearer"); urlParametersBuilder.append("&client_id="); urlParametersBuilder.append(clientId); urlParametersBuilder.append("&grant_type=client_credentials"); urlParametersBuilder.append("&scope="); urlParametersBuilder.append(URLEncoder.encode(request.getScopes().get(0), StandardCharsets.UTF_8.name())); String urlParams = urlParametersBuilder.toString(); byte[] postData = urlParams.getBytes(StandardCharsets.UTF_8); int postDataLength = postData.length; HttpURLConnection connection = null; URL url = new URL(authorityUrl); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("POST"); connection.setRequestProperty("Content-Type", "application/x-www-form-urlencoded"); connection.setRequestProperty("Content-Length", Integer.toString(postDataLength)); connection.setDoOutput(true); try (DataOutputStream outputStream = new DataOutputStream(connection.getOutputStream())) { outputStream.write(postData); } connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } })); } /** * Asynchronously acquire a token from the Azure Service Fabric Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param identityHeader the identity header to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToServiceFabricManagedIdentityEndpoint(String identityEndpoint, String identityHeader, String thumbprint, TokenRequestContext request) { return Mono.fromCallable(() -> { HttpsURLConnection connection = null; String endpoint = identityEndpoint; String headerValue = identityHeader; String endpointVersion = SERVICE_FABRIC_MANAGED_IDENTITY_API_VERSION; String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode(endpointVersion, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } try { URL url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpsURLConnection) url.openConnection(); IdentitySslUtil.addTrustedCertificateThumbprint(connection, thumbprint, LOGGER); connection.setRequestMethod("GET"); if (headerValue != null) { connection.setRequestProperty("Secret", headerValue); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param identityHeader the identity header to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String identityEndpoint, String identityHeader, TokenRequestContext request) { return Mono.fromCallable(() -> { String endpoint; String headerValue; String endpointVersion; endpoint = identityEndpoint; headerValue = identityHeader; endpointVersion = IDENTITY_ENDPOINT_VERSION; String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode(endpointVersion, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } try { URL url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (headerValue != null) { if (IDENTITY_ENDPOINT_VERSION.equals(endpointVersion)) { connection.setRequestProperty("X-IDENTITY-HEADER", headerValue); } else { connection.setRequestProperty("Secret", headerValue); } } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", StandardCharsets.UTF_8.name())); payload.append("&resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } } catch (IOException exception) { return Mono.error(exception); } String endpoint = TRAILING_FORWARD_SLASHES.matcher(options.getImdsAuthorityHost()).replaceAll("") + IdentityConstants.DEFAULT_IMDS_TOKENPATH; return checkIMDSAvailable(endpoint).flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw LOGGER.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode; try { responseCode = connection.getResponseCode(); } catch (Exception e) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } if (responseCode == 400) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established.", null)); } if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw LOGGER.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw LOGGER.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); } private Mono<Boolean> checkIMDSAvailable(String endpoint) { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", StandardCharsets.UTF_8.name())); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("%s?%s", endpoint, payload)); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } catch (Exception e) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } private String getSafeWorkingDirectory() { if (isWindowsPlatform()) { if (CoreUtils.isNullOrEmpty(DEFAULT_WINDOWS_SYSTEM_ROOT)) { return null; } return DEFAULT_WINDOWS_SYSTEM_ROOT + "\\system32"; } else { return DEFAULT_MAC_LINUX_PATH; } } private boolean isWindowsPlatform() { return System.getProperty("os.name").contains("Windows"); } private String redactInfo(String input) { return ACCESS_TOKEN_PATTERN.matcher(input).replaceAll("****"); } void openUrl(String url) throws IOException { Runtime rt = Runtime.getRuntime(); String os = System.getProperty("os.name").toLowerCase(Locale.ROOT); if (os.contains("win")) { rt.exec("rundll32 url.dll,FileProtocolHandler " + url); } else if (os.contains("mac")) { rt.exec("open " + url); } else if (os.contains("nix") || os.contains("nux")) { rt.exec("xdg-open " + url); } else { LOGGER.error("Browser could not be opened - please open {} in a browser on this device.", url); } } private CompletableFuture<IAuthenticationResult> getFailedCompletableFuture(Exception e) { CompletableFuture<IAuthenticationResult> completableFuture = new CompletableFuture<>(); completableFuture.completeExceptionally(e); return completableFuture; } private void initializeHttpPipelineAdapter() { HttpPipeline httpPipeline = options.getHttpPipeline(); if (httpPipeline != null) { httpPipelineAdapter = new HttpPipelineAdapter(httpPipeline); } else { HttpClient httpClient = options.getHttpClient(); if (httpClient != null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(httpClient)); } else if (options.getProxyOptions() == null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(HttpClient.createDefault())); } } } /** * Get the configured tenant id. * * @return the tenant id. */ public String getTenantId() { return tenantId; } /** * Get the configured client id. * * @return the client id. */ public String getClientId() { return clientId; } /** * Get the configured identity client options. * * @return the client options. */ public IdentityClientOptions getIdentityClientOptions() { return options; } private boolean isADFSTenant() { return this.tenantId.equals(ADFS_TENANT); } private byte[] getCertificateBytes() throws IOException { if (certificatePath != null) { return Files.readAllBytes(Paths.get(certificatePath)); } else if (certificate != null) { ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); byte[] buffer = new byte[1024]; int read = certificate.read(buffer, 0, buffer.length); while (read != -1) { outputStream.write(buffer, 0, read); read = certificate.read(buffer, 0, buffer.length); } return outputStream.toByteArray(); } else { return new byte[0]; } } }
Replace `Pattern` usage with substring as we check if the string begins with the value that we regex replaceFirst, effectively a substring operation
public JsonNode getDeviceCodeCredentials() throws IOException { if (Platform.isMac()) { KeyChainAccessor accessor = new KeyChainAccessor(null, "ADAuthManager", "cachedAuthResult"); String jsonCred = new String(accessor.read(), StandardCharsets.UTF_8); return DEFAULT_MAPPER.readTree(jsonCred); } else if (Platform.isLinux()) { LinuxKeyRingAccessor accessor = new LinuxKeyRingAccessor( "com.intellij.credentialStore.Credential", "service", "ADAuthManager", "account", "cachedAuthResult"); String jsonCred = new String(accessor.read(), StandardCharsets.UTF_8); if (jsonCred.startsWith("cachedAuthResult@")) { jsonCred = jsonCred.substring("cachedAuthResult@".length()); } return DEFAULT_MAPPER.readTree(jsonCred); } else if (Platform.isWindows()) { return getCredentialFromKdbx(); } else { throw LOGGER.logExceptionAsError(new RuntimeException(String.format("OS %s Platform not supported.", Platform.getOSType()))); } }
jsonCred = jsonCred.substring("cachedAuthResult@".length());
public JsonNode getDeviceCodeCredentials() throws IOException { if (Platform.isMac()) { KeyChainAccessor accessor = new KeyChainAccessor(null, "ADAuthManager", "cachedAuthResult"); String jsonCred = new String(accessor.read(), StandardCharsets.UTF_8); return DEFAULT_MAPPER.readTree(jsonCred); } else if (Platform.isLinux()) { LinuxKeyRingAccessor accessor = new LinuxKeyRingAccessor( "com.intellij.credentialStore.Credential", "service", "ADAuthManager", "account", "cachedAuthResult"); String jsonCred = new String(accessor.read(), StandardCharsets.UTF_8); if (jsonCred.startsWith("cachedAuthResult@")) { jsonCred = jsonCred.substring("cachedAuthResult@".length()); } return DEFAULT_MAPPER.readTree(jsonCred); } else if (Platform.isWindows()) { return getCredentialFromKdbx(); } else { throw LOGGER.logExceptionAsError(new RuntimeException(String.format("OS %s Platform not supported.", Platform.getOSType()))); } }
class IntelliJCacheAccessor { private static final ClientLogger LOGGER = new ClientLogger(IntelliJCacheAccessor.class); private final String keePassDatabasePath; private static final byte[] CRYPTO_KEY = new byte[] {0x50, 0x72, 0x6f, 0x78, 0x79, 0x20, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x20, 0x53, 0x65, 0x63}; private static final ObjectMapper DEFAULT_MAPPER = new ObjectMapper(); private static final ObjectMapper DONT_FAIL_ON_UNKNOWN_PROPERTIES_MAPPER = new ObjectMapper() .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); /** * Creates an instance of {@link IntelliJCacheAccessor} * * @param keePassDatabasePath the KeePass database path. */ public IntelliJCacheAccessor(String keePassDatabasePath) { this.keePassDatabasePath = keePassDatabasePath; } private List<String> getAzureToolsForIntelliJPluginConfigPaths() { return Arrays.asList(Paths.get(System.getProperty("user.home"), "AzureToolsForIntelliJ").toString(), Paths.get(System.getProperty("user.home"), ".AzureToolsForIntelliJ").toString()); } /** * Get the Device Code credential details of Azure Tools plugin in the IntelliJ IDE. * * @return the {@link JsonNode} holding the authentication details. * @throws IOException If an I/O error occurs. */ /** * Get the Service Principal credential details of Azure Tools plugin in the IntelliJ IDE. * * @param credFilePath the file path holding authentication details * @return the {@link HashMap} holding auth details. * @throws IOException if an error is countered while reading the credential file. */ public Map<String, String> getIntellijServicePrincipalDetails(String credFilePath) throws IOException { BufferedReader reader = null; HashMap<String, String> servicePrincipalDetails = new HashMap<>(8); try { reader = new BufferedReader(new FileReader(credFilePath)); String line = reader.readLine(); while (line != null) { String[] split = line.split("="); split[1] = split[1].replace("\\", ""); servicePrincipalDetails.put(split[0], split[1]); line = reader.readLine(); } } finally { if (reader != null) { reader.close(); } } return servicePrincipalDetails; } @SuppressWarnings({"rawtypes", "unchecked"}) private JsonNode getCredentialFromKdbx() throws IOException { if (CoreUtils.isNullOrEmpty(keePassDatabasePath)) { throw new CredentialUnavailableException("The KeePass database path is either empty or not configured." + " Please configure it on the builder. It is required to use " + "IntelliJ credential on the windows platform."); } String extractedpwd = getKdbxPassword(); SecretKeySpec key = new SecretKeySpec(CRYPTO_KEY, "AES"); String password; byte[] dataToDecrypt = Crypt32Util.cryptUnprotectData(Base64.getDecoder().decode(extractedpwd)); ByteBuffer decryptBuffer = ByteBuffer.wrap(dataToDecrypt); Cipher cipher; try { cipher = Cipher.getInstance("AES/CBC/PKCS5Padding"); int ivLen = decryptBuffer.getInt(); cipher.init(Cipher.DECRYPT_MODE, key, new IvParameterSpec(dataToDecrypt, decryptBuffer.position(), ivLen)); int dataOffset = decryptBuffer.position() + ivLen; byte[] decrypted = cipher.doFinal(dataToDecrypt, dataOffset, dataToDecrypt.length - dataOffset); password = new String(decrypted, StandardCharsets.UTF_8); } catch (NoSuchAlgorithmException | NoSuchPaddingException | InvalidKeyException | InvalidAlgorithmParameterException | IllegalBlockSizeException | BadPaddingException e) { throw LOGGER.logExceptionAsError(new RuntimeException("Unable to access cache.", e)); } try (InputStream inputStream = new FileInputStream(keePassDatabasePath)) { IntelliJKdbxDatabase kdbxDatabase = IntelliJKdbxDatabase.parse(inputStream, password); String jsonToken = kdbxDatabase.getDatabaseEntryValue("ADAuthManager"); if (CoreUtils.isNullOrEmpty(jsonToken)) { throw new CredentialUnavailableException("No credentials found in the cache." + " Please login with IntelliJ Azure Tools plugin in the IDE."); } return DEFAULT_MAPPER.readTree(jsonToken); } catch (Exception e) { throw LOGGER.logExceptionAsError(new RuntimeException("Failed to read KeePass database.", e)); } } private String getKdbxPassword() throws IOException { String passwordFilePath = new File(keePassDatabasePath).getParent() + File.separator + "c.pwd"; String extractedpwd = ""; try (BufferedReader reader = new BufferedReader(new FileReader(passwordFilePath))) { String line = reader.readLine(); while (line != null) { if (line.contains("value")) { String[] tokens = line.split(" "); if (tokens.length == 3) { extractedpwd = tokens[2]; break; } else { throw LOGGER.logExceptionAsError(new RuntimeException("Password not found in the file.")); } } line = reader.readLine(); } } return extractedpwd; } /** * Get the auth host of the specified {@code azureEnvironment}. * @param azureEnvironment the specified Azure Environment * @return the auth host. */ public String getAzureAuthHost(String azureEnvironment) { switch (azureEnvironment) { case "GLOBAL": return AzureAuthorityHosts.AZURE_PUBLIC_CLOUD; case "CHINA": return AzureAuthorityHosts.AZURE_CHINA; case "GERMAN": return AzureAuthorityHosts.AZURE_GERMANY; case "US_GOVERNMENT": return AzureAuthorityHosts.AZURE_GOVERNMENT; default: return AzureAuthorityHosts.AZURE_PUBLIC_CLOUD; } } /** * Parse the auth details of the specified file. * @param file the file input; * @return the parsed {@link IntelliJAuthMethodDetails} from the file input. * @throws IOException when invalid file path is specified. */ public IntelliJAuthMethodDetails parseAuthMethodDetails(File file) throws IOException { return DONT_FAIL_ON_UNKNOWN_PROPERTIES_MAPPER.readValue(file, IntelliJAuthMethodDetails.class); } /** * Get the current authentication method details of Azure Tools plugin in IntelliJ IDE. * * @return the {@link IntelliJAuthMethodDetails} * @throws IOException if an error is encountered while reading the auth details file. */ public IntelliJAuthMethodDetails getAuthDetailsIfAvailable() throws IOException { File authFile = null; for (String metadataPath : getAzureToolsForIntelliJPluginConfigPaths()) { String authMethodDetailsPath = Paths.get(metadataPath, "AuthMethodDetails.json").toString(); authFile = new File(authMethodDetailsPath); if (authFile.exists()) { break; } } if (authFile == null || !authFile.exists()) { return null; } IntelliJAuthMethodDetails authMethodDetails = parseAuthMethodDetails(authFile); String authType = authMethodDetails.getAuthMethod(); if (CoreUtils.isNullOrEmpty(authType)) { return null; } if ("SP".equalsIgnoreCase(authType)) { if (CoreUtils.isNullOrEmpty(authMethodDetails.getCredFilePath())) { return null; } } else if ("DC".equalsIgnoreCase(authType)) { if (CoreUtils.isNullOrEmpty(authMethodDetails.getAccountEmail())) { return null; } } return authMethodDetails; } }
class IntelliJCacheAccessor { private static final ClientLogger LOGGER = new ClientLogger(IntelliJCacheAccessor.class); private final String keePassDatabasePath; private static final byte[] CRYPTO_KEY = new byte[] {0x50, 0x72, 0x6f, 0x78, 0x79, 0x20, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x20, 0x53, 0x65, 0x63}; private static final ObjectMapper DEFAULT_MAPPER = new ObjectMapper(); private static final ObjectMapper DONT_FAIL_ON_UNKNOWN_PROPERTIES_MAPPER = new ObjectMapper() .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); /** * Creates an instance of {@link IntelliJCacheAccessor} * * @param keePassDatabasePath the KeePass database path. */ public IntelliJCacheAccessor(String keePassDatabasePath) { this.keePassDatabasePath = keePassDatabasePath; } private List<String> getAzureToolsForIntelliJPluginConfigPaths() { return Arrays.asList(Paths.get(System.getProperty("user.home"), "AzureToolsForIntelliJ").toString(), Paths.get(System.getProperty("user.home"), ".AzureToolsForIntelliJ").toString()); } /** * Get the Device Code credential details of Azure Tools plugin in the IntelliJ IDE. * * @return the {@link JsonNode} holding the authentication details. * @throws IOException If an I/O error occurs. */ /** * Get the Service Principal credential details of Azure Tools plugin in the IntelliJ IDE. * * @param credFilePath the file path holding authentication details * @return the {@link HashMap} holding auth details. * @throws IOException if an error is countered while reading the credential file. */ public Map<String, String> getIntellijServicePrincipalDetails(String credFilePath) throws IOException { BufferedReader reader = null; HashMap<String, String> servicePrincipalDetails = new HashMap<>(8); try { reader = new BufferedReader(new FileReader(credFilePath)); String line = reader.readLine(); while (line != null) { String[] split = line.split("="); split[1] = split[1].replace("\\", ""); servicePrincipalDetails.put(split[0], split[1]); line = reader.readLine(); } } finally { if (reader != null) { reader.close(); } } return servicePrincipalDetails; } @SuppressWarnings({"rawtypes", "unchecked"}) private JsonNode getCredentialFromKdbx() throws IOException { if (CoreUtils.isNullOrEmpty(keePassDatabasePath)) { throw new CredentialUnavailableException("The KeePass database path is either empty or not configured." + " Please configure it on the builder. It is required to use " + "IntelliJ credential on the windows platform."); } String extractedpwd = getKdbxPassword(); SecretKeySpec key = new SecretKeySpec(CRYPTO_KEY, "AES"); String password; byte[] dataToDecrypt = Crypt32Util.cryptUnprotectData(Base64.getDecoder().decode(extractedpwd)); ByteBuffer decryptBuffer = ByteBuffer.wrap(dataToDecrypt); Cipher cipher; try { cipher = Cipher.getInstance("AES/CBC/PKCS5Padding"); int ivLen = decryptBuffer.getInt(); cipher.init(Cipher.DECRYPT_MODE, key, new IvParameterSpec(dataToDecrypt, decryptBuffer.position(), ivLen)); int dataOffset = decryptBuffer.position() + ivLen; byte[] decrypted = cipher.doFinal(dataToDecrypt, dataOffset, dataToDecrypt.length - dataOffset); password = new String(decrypted, StandardCharsets.UTF_8); } catch (NoSuchAlgorithmException | NoSuchPaddingException | InvalidKeyException | InvalidAlgorithmParameterException | IllegalBlockSizeException | BadPaddingException e) { throw LOGGER.logExceptionAsError(new RuntimeException("Unable to access cache.", e)); } try (InputStream inputStream = new FileInputStream(keePassDatabasePath)) { IntelliJKdbxDatabase kdbxDatabase = IntelliJKdbxDatabase.parse(inputStream, password); String jsonToken = kdbxDatabase.getDatabaseEntryValue("ADAuthManager"); if (CoreUtils.isNullOrEmpty(jsonToken)) { throw new CredentialUnavailableException("No credentials found in the cache." + " Please login with IntelliJ Azure Tools plugin in the IDE."); } return DEFAULT_MAPPER.readTree(jsonToken); } catch (Exception e) { throw LOGGER.logExceptionAsError(new RuntimeException("Failed to read KeePass database.", e)); } } private String getKdbxPassword() throws IOException { String passwordFilePath = new File(keePassDatabasePath).getParent() + File.separator + "c.pwd"; String extractedpwd = ""; try (BufferedReader reader = new BufferedReader(new FileReader(passwordFilePath))) { String line = reader.readLine(); while (line != null) { if (line.contains("value")) { String[] tokens = line.split(" "); if (tokens.length == 3) { extractedpwd = tokens[2]; break; } else { throw LOGGER.logExceptionAsError(new RuntimeException("Password not found in the file.")); } } line = reader.readLine(); } } return extractedpwd; } /** * Get the auth host of the specified {@code azureEnvironment}. * @param azureEnvironment the specified Azure Environment * @return the auth host. */ public String getAzureAuthHost(String azureEnvironment) { switch (azureEnvironment) { case "GLOBAL": return AzureAuthorityHosts.AZURE_PUBLIC_CLOUD; case "CHINA": return AzureAuthorityHosts.AZURE_CHINA; case "GERMAN": return AzureAuthorityHosts.AZURE_GERMANY; case "US_GOVERNMENT": return AzureAuthorityHosts.AZURE_GOVERNMENT; default: return AzureAuthorityHosts.AZURE_PUBLIC_CLOUD; } } /** * Parse the auth details of the specified file. * @param file the file input; * @return the parsed {@link IntelliJAuthMethodDetails} from the file input. * @throws IOException when invalid file path is specified. */ public IntelliJAuthMethodDetails parseAuthMethodDetails(File file) throws IOException { return DONT_FAIL_ON_UNKNOWN_PROPERTIES_MAPPER.readValue(file, IntelliJAuthMethodDetails.class); } /** * Get the current authentication method details of Azure Tools plugin in IntelliJ IDE. * * @return the {@link IntelliJAuthMethodDetails} * @throws IOException if an error is encountered while reading the auth details file. */ public IntelliJAuthMethodDetails getAuthDetailsIfAvailable() throws IOException { File authFile = null; for (String metadataPath : getAzureToolsForIntelliJPluginConfigPaths()) { String authMethodDetailsPath = Paths.get(metadataPath, "AuthMethodDetails.json").toString(); authFile = new File(authMethodDetailsPath); if (authFile.exists()) { break; } } if (authFile == null || !authFile.exists()) { return null; } IntelliJAuthMethodDetails authMethodDetails = parseAuthMethodDetails(authFile); String authType = authMethodDetails.getAuthMethod(); if (CoreUtils.isNullOrEmpty(authType)) { return null; } if ("SP".equalsIgnoreCase(authType)) { if (CoreUtils.isNullOrEmpty(authMethodDetails.getCredFilePath())) { return null; } } else if ("DC".equalsIgnoreCase(authType)) { if (CoreUtils.isNullOrEmpty(authMethodDetails.getAccountEmail())) { return null; } } return authMethodDetails; } }
Unnecessary array creation to pass into a varargs API
public Mono<PowershellManager> initSession() { ProcessBuilder pb; if (Platform.isWindows()) { pb = new ProcessBuilder("cmd.exe", "/c", "chcp", "65001", ">", "NUL", "&", powershellPath, "-ExecutionPolicy", "Bypass", "-NoExit", "-NoProfile", "-Command", "-"); } else { pb = new ProcessBuilder(powershellPath, "-nologo", "-noexit", "-Command", "-"); } pb.redirectErrorStream(true); Supplier<PowershellManager> supplier = () -> { try { this.process = pb.start(); this.commandWriter = new PrintWriter( new OutputStreamWriter(new BufferedOutputStream(process.getOutputStream()), StandardCharsets.UTF_8), true); if (this.process.waitFor(4L, TimeUnit.SECONDS) && !this.process.isAlive()) { throw new CredentialUnavailableException("Unable to execute PowerShell." + " Please make sure that it is installed in your system."); } this.closed = false; } catch (InterruptedException | IOException e) { throw new CredentialUnavailableException("Unable to execute PowerShell. " + "Please make sure that it is installed in your system", e); } return this; }; return executorService != null ? Mono.fromFuture(CompletableFuture.supplyAsync(supplier, executorService)) : Mono.fromFuture(CompletableFuture.supplyAsync(supplier)); }
pb = new ProcessBuilder(powershellPath, "-nologo", "-noexit", "-Command", "-");
public Mono<PowershellManager> initSession() { ProcessBuilder pb; if (Platform.isWindows()) { pb = new ProcessBuilder("cmd.exe", "/c", "chcp", "65001", ">", "NUL", "&", powershellPath, "-ExecutionPolicy", "Bypass", "-NoExit", "-NoProfile", "-Command", "-"); } else { pb = new ProcessBuilder(powershellPath, "-nologo", "-noexit", "-Command", "-"); } pb.redirectErrorStream(true); Supplier<PowershellManager> supplier = () -> { try { this.process = pb.start(); this.commandWriter = new PrintWriter( new OutputStreamWriter(new BufferedOutputStream(process.getOutputStream()), StandardCharsets.UTF_8), true); if (this.process.waitFor(4L, TimeUnit.SECONDS) && !this.process.isAlive()) { throw new CredentialUnavailableException("Unable to execute PowerShell." + " Please make sure that it is installed in your system."); } this.closed = false; } catch (InterruptedException | IOException e) { throw new CredentialUnavailableException("Unable to execute PowerShell. " + "Please make sure that it is installed in your system", e); } return this; }; return executorService != null ? Mono.fromFuture(CompletableFuture.supplyAsync(supplier, executorService)) : Mono.fromFuture(CompletableFuture.supplyAsync(supplier)); }
class PowershellManager { private static final ClientLogger LOGGER = new ClientLogger(PowershellManager.class); public static final Pattern PS_RESPONSE_PATTERN = Pattern.compile("\\s+$"); private Process process; private PrintWriter commandWriter; private boolean closed; private int waitPause = 1000; private long maxWait = 10000L; private final String powershellPath; private ExecutorService executorService; public PowershellManager(String powershellPath) { this.powershellPath = powershellPath; } public PowershellManager(String powershellPath, ExecutorService executorService) { this.powershellPath = powershellPath; this.executorService = executorService; } public Mono<String> runCommand(String command) { BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream(), StandardCharsets.UTF_8)); StringBuilder powerShellOutput = new StringBuilder(); commandWriter.println(command); return canRead(reader) .flatMap(b -> { if (b) { return readData(reader, powerShellOutput) .flatMap(ignored -> Mono.just(PS_RESPONSE_PATTERN.matcher(powerShellOutput.toString()) .replaceAll(""))); } else { return Mono.error(new CredentialUnavailableException("Error reading data from reader")); } }); } private Mono<Boolean> readData(BufferedReader reader, StringBuilder powerShellOutput) { return Mono.defer(() -> { String line; try { line = reader.readLine(); if (line != null) { powerShellOutput.append(line).append("\r\n"); return canRead(reader).flatMap(b -> { if (!this.closed && b) { return Mono.empty(); } return Mono.just(true); }); } else { return Mono.just(true); } } catch (IOException e) { return Mono.error( new CredentialUnavailableException("Powershell reader not ready for reading", e)); } }).repeatWhenEmpty((Flux<Long> longFlux) -> longFlux.concatMap(ignored -> Flux.just(true))); } private Mono<Boolean> canRead(BufferedReader reader) { Supplier<Boolean> supplier = () -> { int pause = 62; int maxPause = Platform.isMac() ? this.waitPause : 500; while (true) { try { if (!reader.ready()) { if (pause > maxPause) { return false; } pause *= 2; Thread.sleep((long) pause); } else { break; } } catch (IOException | InterruptedException e) { throw new CredentialUnavailableException("Powershell reader not ready for reading", e); } } return true; }; return executorService != null ? Mono.fromFuture(CompletableFuture.supplyAsync(supplier, executorService)) : Mono.fromFuture(CompletableFuture.supplyAsync(supplier)); } public Mono<Boolean> close() { if (!this.closed && this.process != null) { Supplier<Boolean> supplier = () -> { this.commandWriter.println("exit"); try { this.process.waitFor(maxWait, TimeUnit.MILLISECONDS); } catch (InterruptedException e) { LOGGER.logExceptionAsError(new RuntimeException("PowerShell process encountered unexpected" + " error when closing.", e)); } finally { this.commandWriter.close(); try { if (process.isAlive()) { process.getInputStream().close(); } } catch (IOException ex) { LOGGER.logExceptionAsError(new RuntimeException("PowerShell stream encountered unexpected" + " error when closing.", ex)); } this.closed = true; } return this.closed; }; return executorService != null ? Mono.fromFuture(CompletableFuture.supplyAsync(supplier, executorService)) : Mono.fromFuture(CompletableFuture.supplyAsync(supplier)); } else { return Mono.just(true); } } }
class PowershellManager { private static final ClientLogger LOGGER = new ClientLogger(PowershellManager.class); public static final Pattern PS_RESPONSE_PATTERN = Pattern.compile("\\s+$"); private Process process; private PrintWriter commandWriter; private boolean closed; private int waitPause = 1000; private long maxWait = 10000L; private final String powershellPath; private ExecutorService executorService; public PowershellManager(String powershellPath) { this.powershellPath = powershellPath; } public PowershellManager(String powershellPath, ExecutorService executorService) { this.powershellPath = powershellPath; this.executorService = executorService; } public Mono<String> runCommand(String command) { BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream(), StandardCharsets.UTF_8)); StringBuilder powerShellOutput = new StringBuilder(); commandWriter.println(command); return canRead(reader) .flatMap(b -> { if (b) { return readData(reader, powerShellOutput) .flatMap(ignored -> Mono.just(PS_RESPONSE_PATTERN.matcher(powerShellOutput.toString()) .replaceAll(""))); } else { return Mono.error(new CredentialUnavailableException("Error reading data from reader")); } }); } private Mono<Boolean> readData(BufferedReader reader, StringBuilder powerShellOutput) { return Mono.defer(() -> { String line; try { line = reader.readLine(); if (line != null) { powerShellOutput.append(line).append("\r\n"); return canRead(reader).flatMap(b -> { if (!this.closed && b) { return Mono.empty(); } return Mono.just(true); }); } else { return Mono.just(true); } } catch (IOException e) { return Mono.error( new CredentialUnavailableException("Powershell reader not ready for reading", e)); } }).repeatWhenEmpty((Flux<Long> longFlux) -> longFlux.concatMap(ignored -> Flux.just(true))); } private Mono<Boolean> canRead(BufferedReader reader) { Supplier<Boolean> supplier = () -> { int pause = 62; int maxPause = Platform.isMac() ? this.waitPause : 500; while (true) { try { if (!reader.ready()) { if (pause > maxPause) { return false; } pause *= 2; Thread.sleep((long) pause); } else { break; } } catch (IOException | InterruptedException e) { throw new CredentialUnavailableException("Powershell reader not ready for reading", e); } } return true; }; return executorService != null ? Mono.fromFuture(CompletableFuture.supplyAsync(supplier, executorService)) : Mono.fromFuture(CompletableFuture.supplyAsync(supplier)); } public Mono<Boolean> close() { if (!this.closed && this.process != null) { Supplier<Boolean> supplier = () -> { this.commandWriter.println("exit"); try { this.process.waitFor(maxWait, TimeUnit.MILLISECONDS); } catch (InterruptedException e) { LOGGER.logExceptionAsError(new RuntimeException("PowerShell process encountered unexpected" + " error when closing.", e)); } finally { this.commandWriter.close(); try { if (process.isAlive()) { process.getInputStream().close(); } } catch (IOException ex) { LOGGER.logExceptionAsError(new RuntimeException("PowerShell stream encountered unexpected" + " error when closing.", ex)); } this.closed = true; } return this.closed; }; return executorService != null ? Mono.fromFuture(CompletableFuture.supplyAsync(supplier, executorService)) : Mono.fromFuture(CompletableFuture.supplyAsync(supplier)); } else { return Mono.just(true); } } }
Use StringBuilder instead of StringBuffer which uses locking and this is used in a thread-safe fashion
private static String extractCertificateThumbprint(Certificate certificate, ClientLogger logger) { try { StringBuilder thumbprint = new StringBuilder(); MessageDigest messageDigest; messageDigest = MessageDigest.getInstance("SHA-1"); byte[] encodedCertificate; try { encodedCertificate = certificate.getEncoded(); } catch (CertificateEncodingException e) { throw new RuntimeException(e); } byte[] updatedDigest = messageDigest.digest(encodedCertificate); for (byte b : updatedDigest) { int unsignedByte = b & 0xff; if (unsignedByte < 16) { thumbprint.append("0"); } thumbprint.append(Integer.toHexString(unsignedByte)); } return thumbprint.toString(); } catch (NoSuchAlgorithmException e) { throw logger.logExceptionAsError(new RuntimeException(e)); } }
StringBuilder thumbprint = new StringBuilder();
private static String extractCertificateThumbprint(Certificate certificate, ClientLogger logger) { try { StringBuilder thumbprint = new StringBuilder(); MessageDigest messageDigest; messageDigest = MessageDigest.getInstance("SHA-1"); byte[] encodedCertificate; try { encodedCertificate = certificate.getEncoded(); } catch (CertificateEncodingException e) { throw new RuntimeException(e); } byte[] updatedDigest = messageDigest.digest(encodedCertificate); for (byte b : updatedDigest) { int unsignedByte = b & 0xff; if (unsignedByte < 16) { thumbprint.append("0"); } thumbprint.append(Integer.toHexString(unsignedByte)); } return thumbprint.toString(); } catch (NoSuchAlgorithmException e) { throw logger.logExceptionAsError(new RuntimeException(e)); } }
class IdentitySslUtil { public static final HostnameVerifier ALL_HOSTS_ACCEPT_HOSTNAME_VERIFIER; static { ALL_HOSTS_ACCEPT_HOSTNAME_VERIFIER = new HostnameVerifier() { @SuppressWarnings("BadHostnameVerifier") @Override public boolean verify(String hostname, SSLSession session) { return true; } }; } private IdentitySslUtil() { } /** * * Pins the specified HTTPS URL Connection to work against a specific server-side certificate with * the specified thumbprint only. * * @param httpsUrlConnection The https url connection to configure * @param certificateThumbprint The thumbprint of the certificate * @param logger The {@link ClientLogger} used to log any errors that occur in this method call. */ public static void addTrustedCertificateThumbprint(HttpsURLConnection httpsUrlConnection, String certificateThumbprint, ClientLogger logger) { if (httpsUrlConnection.getHostnameVerifier() != ALL_HOSTS_ACCEPT_HOSTNAME_VERIFIER) { httpsUrlConnection.setHostnameVerifier(ALL_HOSTS_ACCEPT_HOSTNAME_VERIFIER); } TrustManager[] certificateTrust = new TrustManager[]{new X509TrustManager() { public X509Certificate[] getAcceptedIssuers() { return new X509Certificate[]{}; } public void checkClientTrusted(X509Certificate[] certificates, String authenticationType) throws CertificateException { throw logger.logExceptionAsError(new RuntimeException("No client side certificate configured.")); } public void checkServerTrusted(X509Certificate[] certificates, String authenticationType) throws CertificateException { if (certificates == null || certificates.length == 0) { throw logger.logExceptionAsError( new RuntimeException("Did not receive any certificate from the server.")); } for (X509Certificate x509Certificate : certificates) { String sslCertificateThumbprint = extractCertificateThumbprint(x509Certificate, logger); if (certificateThumbprint.equalsIgnoreCase(sslCertificateThumbprint)) { return; } } throw logger.logExceptionAsError(new RuntimeException( "Thumbprint of certificates received did not match the expected thumbprint.")); } } }; SSLSocketFactory sslSocketFactory; try { SSLContext sslContext = SSLContext.getInstance("TLS"); sslContext.init(null, certificateTrust, null); sslSocketFactory = sslContext.getSocketFactory(); } catch (NoSuchAlgorithmException | KeyManagementException e) { throw logger.logExceptionAsError(new RuntimeException("Error Creating SSL Context", e)); } if (httpsUrlConnection.getSSLSocketFactory() != sslSocketFactory) { httpsUrlConnection.setSSLSocketFactory(sslSocketFactory); } } }
class IdentitySslUtil { public static final HostnameVerifier ALL_HOSTS_ACCEPT_HOSTNAME_VERIFIER; static { ALL_HOSTS_ACCEPT_HOSTNAME_VERIFIER = new HostnameVerifier() { @SuppressWarnings("BadHostnameVerifier") @Override public boolean verify(String hostname, SSLSession session) { return true; } }; } private IdentitySslUtil() { } /** * * Pins the specified HTTPS URL Connection to work against a specific server-side certificate with * the specified thumbprint only. * * @param httpsUrlConnection The https url connection to configure * @param certificateThumbprint The thumbprint of the certificate * @param logger The {@link ClientLogger} used to log any errors that occur in this method call. */ public static void addTrustedCertificateThumbprint(HttpsURLConnection httpsUrlConnection, String certificateThumbprint, ClientLogger logger) { if (httpsUrlConnection.getHostnameVerifier() != ALL_HOSTS_ACCEPT_HOSTNAME_VERIFIER) { httpsUrlConnection.setHostnameVerifier(ALL_HOSTS_ACCEPT_HOSTNAME_VERIFIER); } TrustManager[] certificateTrust = new TrustManager[]{new X509TrustManager() { public X509Certificate[] getAcceptedIssuers() { return new X509Certificate[]{}; } public void checkClientTrusted(X509Certificate[] certificates, String authenticationType) throws CertificateException { throw logger.logExceptionAsError(new RuntimeException("No client side certificate configured.")); } public void checkServerTrusted(X509Certificate[] certificates, String authenticationType) throws CertificateException { if (certificates == null || certificates.length == 0) { throw logger.logExceptionAsError( new RuntimeException("Did not receive any certificate from the server.")); } for (X509Certificate x509Certificate : certificates) { String sslCertificateThumbprint = extractCertificateThumbprint(x509Certificate, logger); if (certificateThumbprint.equalsIgnoreCase(sslCertificateThumbprint)) { return; } } throw logger.logExceptionAsError(new RuntimeException( "Thumbprint of certificates received did not match the expected thumbprint.")); } } }; SSLSocketFactory sslSocketFactory; try { SSLContext sslContext = SSLContext.getInstance("TLS"); sslContext.init(null, certificateTrust, null); sslSocketFactory = sslContext.getSocketFactory(); } catch (NoSuchAlgorithmException | KeyManagementException e) { throw logger.logExceptionAsError(new RuntimeException("Error Creating SSL Context", e)); } if (httpsUrlConnection.getSSLSocketFactory() != sslSocketFactory) { httpsUrlConnection.setSSLSocketFactory(sslSocketFactory); } } }
👍
public AzureApplicationCredentialBuilder managedIdentityResourceId(String resourceId) { this.managedIdentityResourceId = resourceId; return this; }
this.managedIdentityResourceId = resourceId;
public AzureApplicationCredentialBuilder managedIdentityResourceId(String resourceId) { this.managedIdentityResourceId = resourceId; return this; }
class AzureApplicationCredentialBuilder extends CredentialBuilderBase<AzureApplicationCredentialBuilder> { private static final ClientLogger LOGGER = new ClientLogger(AzureApplicationCredentialBuilder.class); private String managedIdentityClientId; private String managedIdentityResourceId; /** * Creates an instance of a AzureApplicationCredentialBuilder. */ AzureApplicationCredentialBuilder() { Configuration configuration = Configuration.getGlobalConfiguration().clone(); managedIdentityClientId = configuration.get(Configuration.PROPERTY_AZURE_CLIENT_ID); } /** * Specifies the Azure Active Directory endpoint to acquire tokens. * @param authorityHost the Azure Active Directory endpoint * @return An updated instance of this builder with the authority host set as specified. */ public AzureApplicationCredentialBuilder authorityHost(String authorityHost) { this.identityClientOptions.setAuthorityHost(authorityHost); return this; } /** * Specifies the client ID of user assigned or system assigned identity, when this credential is running * in an environment with managed identities. If unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If neither is set, the default value is null and will only work with system assigned * managed identities and not user assigned managed identities. * * @param clientId the client ID * @return An updated instance of this builder with the managed identity client id set as specified. */ public AzureApplicationCredentialBuilder managedIdentityClientId(String clientId) { this.managedIdentityClientId = clientId; return this; } /** * Specifies the resource ID of user assigned or system assigned identity, when this credential is running * in an environment with managed identities. * * @param resourceId the resource ID * @return An updated instance of this builder with the managed identity client id set as specified. */ /** * Specifies the ExecutorService to be used to execute the authentication requests. * Developer is responsible for maintaining the lifecycle of the ExecutorService. * * <p> * If this is not configured, the {@link ForkJoinPool * also shared with other application tasks. If the common pool is heavily used for other tasks, authentication * requests might starve and setting up this executor service should be considered. * </p> * * <p> The executor service and can be safely shutdown if the TokenCredential is no longer being used by the * Azure SDK clients and should be shutdown before the application exits. </p> * * @param executorService the executor service to use for executing authentication requests. * @return An updated instance of this builder with the executor service set as specified. */ public AzureApplicationCredentialBuilder executorService(ExecutorService executorService) { this.identityClientOptions.setExecutorService(executorService); return this; } /** * Creates new {@link AzureApplicationCredential} with the configured options set. * @return a {@link AzureApplicationCredential} with the current configurations. * @throws IllegalStateException if clientId and resourceId are both set. */ public AzureApplicationCredential build() { if (managedIdentityClientId != null && managedIdentityResourceId != null) { throw LOGGER.logExceptionAsError( new IllegalStateException("Only one of managedIdentityClientId and managedIdentityResourceId can be specified.")); } return new AzureApplicationCredential(getCredentialsChain()); } private ArrayList<TokenCredential> getCredentialsChain() { ArrayList<TokenCredential> output = new ArrayList<TokenCredential>(2); output.add(new EnvironmentCredential(identityClientOptions)); output.add(new ManagedIdentityCredential(managedIdentityClientId, managedIdentityResourceId, identityClientOptions)); return output; } }
class AzureApplicationCredentialBuilder extends CredentialBuilderBase<AzureApplicationCredentialBuilder> { private static final ClientLogger LOGGER = new ClientLogger(AzureApplicationCredentialBuilder.class); private String managedIdentityClientId; private String managedIdentityResourceId; /** * Creates an instance of a AzureApplicationCredentialBuilder. */ AzureApplicationCredentialBuilder() { Configuration configuration = Configuration.getGlobalConfiguration().clone(); managedIdentityClientId = configuration.get(Configuration.PROPERTY_AZURE_CLIENT_ID); } /** * Specifies the Azure Active Directory endpoint to acquire tokens. * @param authorityHost the Azure Active Directory endpoint * @return An updated instance of this builder with the authority host set as specified. */ public AzureApplicationCredentialBuilder authorityHost(String authorityHost) { this.identityClientOptions.setAuthorityHost(authorityHost); return this; } /** * Specifies the client ID of user assigned or system assigned identity, when this credential is running * in an environment with managed identities. If unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If neither is set, the default value is null and will only work with system assigned * managed identities and not user assigned managed identities. * * @param clientId the client ID * @return An updated instance of this builder with the managed identity client id set as specified. */ public AzureApplicationCredentialBuilder managedIdentityClientId(String clientId) { this.managedIdentityClientId = clientId; return this; } /** * Specifies the resource ID of user assigned or system assigned identity, when this credential is running * in an environment with managed identities. * * @param resourceId the resource ID * @return An updated instance of this builder with the managed identity client id set as specified. */ /** * Specifies the ExecutorService to be used to execute the authentication requests. * Developer is responsible for maintaining the lifecycle of the ExecutorService. * * <p> * If this is not configured, the {@link ForkJoinPool * also shared with other application tasks. If the common pool is heavily used for other tasks, authentication * requests might starve and setting up this executor service should be considered. * </p> * * <p> The executor service and can be safely shutdown if the TokenCredential is no longer being used by the * Azure SDK clients and should be shutdown before the application exits. </p> * * @param executorService the executor service to use for executing authentication requests. * @return An updated instance of this builder with the executor service set as specified. */ public AzureApplicationCredentialBuilder executorService(ExecutorService executorService) { this.identityClientOptions.setExecutorService(executorService); return this; } /** * Creates new {@link AzureApplicationCredential} with the configured options set. * @return a {@link AzureApplicationCredential} with the current configurations. * @throws IllegalStateException if clientId and resourceId are both set. */ public AzureApplicationCredential build() { if (managedIdentityClientId != null && managedIdentityResourceId != null) { throw LOGGER.logExceptionAsError( new IllegalStateException("Only one of managedIdentityClientId and managedIdentityResourceId can be specified.")); } return new AzureApplicationCredential(getCredentialsChain()); } private ArrayList<TokenCredential> getCredentialsChain() { ArrayList<TokenCredential> output = new ArrayList<TokenCredential>(2); output.add(new EnvironmentCredential(identityClientOptions)); output.add(new ManagedIdentityCredential(managedIdentityClientId, managedIdentityResourceId, identityClientOptions)); return output; } }
👍
public Mono<AccessToken> authenticate(TokenRequestContext request) { if (this.getClientId() == null) { return Mono.error(LOGGER.logExceptionAsError(new IllegalStateException("The client id is not configured via" + " 'AZURE_CLIENT_ID' environment variable or through the credential builder." + " Please ensure client id is provided to authenticate via token exchange in AKS environment."))); } return identityClient.authenticateWithExchangeToken(request); }
return identityClient.authenticateWithExchangeToken(request);
public Mono<AccessToken> authenticate(TokenRequestContext request) { if (this.getClientId() == null) { return Mono.error(LOGGER.logExceptionAsError(new IllegalStateException("The client id is not configured via" + " 'AZURE_CLIENT_ID' environment variable or through the credential builder." + " Please ensure client id is provided to authenticate via token exchange in AKS environment."))); } return identityClient.authenticateWithExchangeToken(request); }
class AksExchangeTokenCredential extends ManagedIdentityServiceCredential { private static final ClientLogger LOGGER = new ClientLogger(AksExchangeTokenCredential.class); /** * Creates an instance of AksExchangeTokenCredential. * * @param clientId the client id of user assigned or system assigned identity. * @param identityClient the identity client to acquire a token with. */ AksExchangeTokenCredential(String clientId, IdentityClient identityClient) { super(clientId, identityClient, "AZURE AKS TOKEN EXCHANGE"); } @Override }
class AksExchangeTokenCredential extends ManagedIdentityServiceCredential { private static final ClientLogger LOGGER = new ClientLogger(AksExchangeTokenCredential.class); /** * Creates an instance of AksExchangeTokenCredential. * * @param clientId the client id of user assigned or system assigned identity. * @param identityClient the identity client to acquire a token with. */ AksExchangeTokenCredential(String clientId, IdentityClient identityClient) { super(clientId, identityClient, "AZURE AKS TOKEN EXCHANGE"); } @Override }
does !isInCreateMode() == is in update mode?
public GalleryImageImpl withEndOfLifeDate(OffsetDateTime endOfLifeDate) { this.innerModel().withEndOfLifeDate(endOfLifeDate); if (!isInCreateMode()) { this.galleryImageUpdate.withEndOfLifeDate(endOfLifeDate); } return this; }
if (!isInCreateMode()) {
public GalleryImageImpl withEndOfLifeDate(OffsetDateTime endOfLifeDate) { this.innerModel().withEndOfLifeDate(endOfLifeDate); if (isInUpdateMode()) { this.galleryImageUpdate.withEndOfLifeDate(endOfLifeDate); } return this; }
class GalleryImageImpl extends CreatableUpdatableImpl<GalleryImage, GalleryImageInner, GalleryImageImpl> implements GalleryImage, GalleryImage.Definition, GalleryImage.Update { private final ComputeManager manager; private String resourceGroupName; private String galleryName; private String galleryImageName; private GalleryImageUpdate galleryImageUpdate; GalleryImageImpl(String name, ComputeManager manager) { super(name, new GalleryImageInner()); this.manager = manager; this.galleryImageName = name; } GalleryImageImpl(GalleryImageInner inner, ComputeManager manager) { super(inner.name(), inner); this.manager = manager; this.galleryImageName = inner.name(); this.resourceGroupName = getValueFromIdByName(inner.id(), "resourceGroups"); this.galleryName = getValueFromIdByName(inner.id(), "galleries"); this.galleryImageName = getValueFromIdByName(inner.id(), "images"); } @Override public Mono<GalleryImageVersion> getVersionAsync(String versionName) { return this .manager() .galleryImageVersions() .getByGalleryImageAsync(this.resourceGroupName, this.galleryName, this.galleryImageName, versionName); } @Override public GalleryImageVersion getVersion(String versionName) { return this .manager() .galleryImageVersions() .getByGalleryImage(this.resourceGroupName, this.galleryName, this.galleryImageName, versionName); } @Override public PagedFlux<GalleryImageVersion> listVersionsAsync() { return this .manager() .galleryImageVersions() .listByGalleryImageAsync(this.resourceGroupName, this.galleryName, this.galleryImageName); } @Override public PagedIterable<GalleryImageVersion> listVersions() { return this .manager() .galleryImageVersions() .listByGalleryImage(this.resourceGroupName, this.galleryName, this.galleryImageName); } @Override public ComputeManager manager() { return this.manager; } @Override public Mono<GalleryImage> createResourceAsync() { return manager() .serviceClient() .getGalleryImages() .createOrUpdateAsync(this.resourceGroupName, this.galleryName, this.galleryImageName, this.innerModel()) .map(innerToFluentMap(this)); } @Override public GalleryImageImpl update() { this.galleryImageUpdate = new GalleryImageUpdate(); return super.update(); } @Override public Mono<GalleryImage> updateResourceAsync() { this.galleryImageUpdate .withOsState(innerModel().osState()) .withOsType(innerModel().osType()) .withIdentifier(innerModel().identifier()); return manager() .serviceClient() .getGalleryImages() .updateAsync(this.resourceGroupName, this.galleryName, this.galleryImageName, this.galleryImageUpdate) .map(innerToFluentMap(this)); } @Override protected Mono<GalleryImageInner> getInnerAsync() { return manager() .serviceClient() .getGalleryImages() .getAsync(this.resourceGroupName, this.galleryName, this.galleryImageName); } @Override public boolean isInCreateMode() { return this.innerModel().id() == null; } @Override public String description() { return this.innerModel().description(); } @Override public List<DiskSkuTypes> unsupportedDiskTypes() { if (this.innerModel().disallowed() == null || this.innerModel().disallowed().diskTypes() == null) { return Collections.unmodifiableList(new ArrayList<DiskSkuTypes>()); } else { List<DiskSkuTypes> diskTypes = new ArrayList<DiskSkuTypes>(); for (String diskTypeStr : this.innerModel().disallowed().diskTypes()) { diskTypes.add(DiskSkuTypes.fromStorageAccountType(DiskStorageAccountTypes.fromString(diskTypeStr))); } return Collections.unmodifiableList(diskTypes); } } @Override public Disallowed disallowed() { return this.innerModel().disallowed(); } @Override public OffsetDateTime endOfLifeDate() { return this.innerModel().endOfLifeDate(); } @Override public String eula() { return this.innerModel().eula(); } @Override public String id() { return this.innerModel().id(); } @Override public GalleryImageIdentifier identifier() { return this.innerModel().identifier(); } @Override public String location() { return this.innerModel().location(); } @Override public String name() { return this.innerModel().name(); } @Override public OperatingSystemStateTypes osState() { return this.innerModel().osState(); } @Override public OperatingSystemTypes osType() { return this.innerModel().osType(); } @Override public String privacyStatementUri() { return this.innerModel().privacyStatementUri(); } @Override public String provisioningState() { return this.innerModel().provisioningState().toString(); } @Override public ImagePurchasePlan purchasePlan() { return this.innerModel().purchasePlan(); } @Override public RecommendedMachineConfiguration recommendedVirtualMachineConfiguration() { return this.innerModel().recommended(); } @Override public String releaseNoteUri() { return this.innerModel().releaseNoteUri(); } @Override public Map<String, String> tags() { return this.innerModel().tags(); } @Override public String type() { return this.innerModel().type(); } @Override public GalleryImageImpl withExistingGallery(String resourceGroupName, String galleryName) { this.resourceGroupName = resourceGroupName; this.galleryName = galleryName; return this; } @Override public GalleryImageImpl withExistingGallery(Gallery gallery) { this.resourceGroupName = gallery.resourceGroupName(); this.galleryName = gallery.name(); return this; } @Override public GalleryImageImpl withLocation(String location) { this.innerModel().withLocation(location); return this; } @Override public GalleryImageImpl withLocation(Region location) { this.innerModel().withLocation(location.toString()); return this; } @Override public GalleryImageImpl withIdentifier(GalleryImageIdentifier identifier) { this.innerModel().withIdentifier(identifier); return this; } @Override public GalleryImageImpl withIdentifier(String publisher, String offer, String sku) { this .innerModel() .withIdentifier(new GalleryImageIdentifier().withPublisher(publisher).withOffer(offer).withSku(sku)); return this; } @Override public GalleryImageImpl withGeneralizedWindows() { return this.withWindows(OperatingSystemStateTypes.GENERALIZED); } @Override public GalleryImageImpl withGeneralizedLinux() { return this.withLinux(OperatingSystemStateTypes.GENERALIZED); } @Override public GalleryImageImpl withWindows(OperatingSystemStateTypes osState) { this.innerModel().withOsType(OperatingSystemTypes.WINDOWS).withOsState(osState); return this; } @Override public GalleryImageImpl withLinux(OperatingSystemStateTypes osState) { this.innerModel().withOsType(OperatingSystemTypes.LINUX).withOsState(osState); return this; } @Override public GalleryImageImpl withDescription(String description) { this.innerModel().withDescription(description); if (!isInCreateMode()) { this.galleryImageUpdate.withDescription(description); } return this; } @Override public GalleryImageImpl withUnsupportedDiskType(DiskSkuTypes diskType) { if (this.innerModel().disallowed() == null) { this.innerModel().withDisallowed(new Disallowed()); } if (this.innerModel().disallowed().diskTypes() == null) { this.innerModel().disallowed().withDiskTypes(new ArrayList<String>()); } boolean found = false; String newDiskTypeStr = diskType.toString(); for (String diskTypeStr : this.innerModel().disallowed().diskTypes()) { if (diskTypeStr.equalsIgnoreCase(newDiskTypeStr)) { found = true; break; } } if (!found) { this.innerModel().disallowed().diskTypes().add(diskType.toString()); } if (!isInCreateMode()) { this.galleryImageUpdate.withDisallowed(this.innerModel().disallowed()); } return this; } @Override public GalleryImageImpl withUnsupportedDiskTypes(List<DiskSkuTypes> diskTypes) { if (this.innerModel().disallowed() == null) { this.innerModel().withDisallowed(new Disallowed()); } this.innerModel().disallowed().withDiskTypes(new ArrayList<String>()); for (DiskSkuTypes diskType : diskTypes) { this.innerModel().disallowed().diskTypes().add(diskType.toString()); } if (!isInCreateMode()) { this.galleryImageUpdate.withDisallowed(this.innerModel().disallowed()); } return this; } @Override public GalleryImageImpl withoutUnsupportedDiskType(DiskSkuTypes diskType) { if (this.innerModel().disallowed() != null && this.innerModel().disallowed().diskTypes() != null) { int foundIndex = -1; int i = 0; String diskTypeToRemove = diskType.toString(); for (String diskTypeStr : this.innerModel().disallowed().diskTypes()) { if (diskTypeStr.equalsIgnoreCase(diskTypeToRemove)) { foundIndex = i; break; } i++; } if (foundIndex != -1) { this.innerModel().disallowed().diskTypes().remove(foundIndex); } if (!isInCreateMode()) { this.galleryImageUpdate.withDisallowed(this.innerModel().disallowed()); } } return this; } @Override public GalleryImageImpl withDisallowed(Disallowed disallowed) { this.innerModel().withDisallowed(disallowed); if (!isInCreateMode()) { this.galleryImageUpdate.withDisallowed(disallowed); } return this; } @Override @Override public GalleryImageImpl withEula(String eula) { this.innerModel().withEula(eula); if (!isInCreateMode()) { this.galleryImageUpdate.withEula(eula); } return this; } @Override public GalleryImageImpl withOsState(OperatingSystemStateTypes osState) { this.innerModel().withOsState(osState); if (!isInCreateMode()) { this.galleryImageUpdate.withOsState(osState); } return this; } @Override public GalleryImageImpl withPrivacyStatementUri(String privacyStatementUri) { this.innerModel().withPrivacyStatementUri(privacyStatementUri); if (!isInCreateMode()) { this.galleryImageUpdate.withPrivacyStatementUri(privacyStatementUri); } return this; } @Override public GalleryImageImpl withPurchasePlan(String name, String publisher, String product) { return this .withPurchasePlan(new ImagePurchasePlan().withName(name).withPublisher(publisher).withProduct(product)); } @Override public GalleryImageImpl withPurchasePlan(ImagePurchasePlan purchasePlan) { this.innerModel().withPurchasePlan(purchasePlan); return this; } @Override public GalleryImageImpl withRecommendedMinimumCPUsCountForVirtualMachine(int minCount) { if (this.innerModel().recommended() == null) { this.innerModel().withRecommended(new RecommendedMachineConfiguration()); } if (this.innerModel().recommended().vCPUs() == null) { this.innerModel().recommended().withVCPUs(new ResourceRange()); } this.innerModel().recommended().vCPUs().withMin(minCount); if (!isInCreateMode()) { this.galleryImageUpdate.withRecommended(this.innerModel().recommended()); } return this; } @Override public GalleryImageImpl withRecommendedMaximumCPUsCountForVirtualMachine(int maxCount) { if (this.innerModel().recommended() == null) { this.innerModel().withRecommended(new RecommendedMachineConfiguration()); } if (this.innerModel().recommended().vCPUs() == null) { this.innerModel().recommended().withVCPUs(new ResourceRange()); } this.innerModel().recommended().vCPUs().withMax(maxCount); if (!isInCreateMode()) { this.galleryImageUpdate.withRecommended(this.innerModel().recommended()); } return this; } @Override public GalleryImageImpl withRecommendedCPUsCountForVirtualMachine(int minCount, int maxCount) { if (this.innerModel().recommended() == null) { this.innerModel().withRecommended(new RecommendedMachineConfiguration()); } this.innerModel().recommended().withVCPUs(new ResourceRange()); this.innerModel().recommended().vCPUs().withMin(minCount); this.innerModel().recommended().vCPUs().withMax(maxCount); if (!isInCreateMode()) { this.galleryImageUpdate.withRecommended(this.innerModel().recommended()); } return this; } @Override public GalleryImageImpl withRecommendedMinimumMemoryForVirtualMachine(int minMB) { if (this.innerModel().recommended() == null) { this.innerModel().withRecommended(new RecommendedMachineConfiguration()); } if (this.innerModel().recommended().memory() == null) { this.innerModel().recommended().withMemory(new ResourceRange()); } this.innerModel().recommended().memory().withMin(minMB); if (!isInCreateMode()) { this.galleryImageUpdate.withRecommended(this.innerModel().recommended()); } return this; } @Override public GalleryImageImpl withRecommendedMaximumMemoryForVirtualMachine(int maxMB) { if (this.innerModel().recommended() == null) { this.innerModel().withRecommended(new RecommendedMachineConfiguration()); } if (this.innerModel().recommended().memory() == null) { this.innerModel().recommended().withMemory(new ResourceRange()); } this.innerModel().recommended().memory().withMax(maxMB); if (!isInCreateMode()) { this.galleryImageUpdate.withRecommended(this.innerModel().recommended()); } return this; } @Override public GalleryImageImpl withRecommendedMemoryForVirtualMachine(int minMB, int maxMB) { if (this.innerModel().recommended() == null) { this.innerModel().withRecommended(new RecommendedMachineConfiguration()); } this.innerModel().recommended().withMemory(new ResourceRange()); this.innerModel().recommended().memory().withMin(minMB); this.innerModel().recommended().memory().withMax(maxMB); if (!isInCreateMode()) { this.galleryImageUpdate.withRecommended(this.innerModel().recommended()); } return this; } @Override public GalleryImageImpl withRecommendedConfigurationForVirtualMachine( RecommendedMachineConfiguration recommendedConfig) { this.innerModel().withRecommended(recommendedConfig); if (!isInCreateMode()) { this.galleryImageUpdate.withRecommended(recommendedConfig); } return this; } @Override public GalleryImageImpl withReleaseNoteUri(String releaseNoteUri) { this.innerModel().withReleaseNoteUri(releaseNoteUri); if (!isInCreateMode()) { this.galleryImageUpdate.withReleaseNoteUri(releaseNoteUri); } return this; } @Override public GalleryImageImpl withTags(Map<String, String> tags) { this.innerModel().withTags(tags); if (!isInCreateMode()) { this.galleryImageUpdate.withTags(tags); } return this; } private static String getValueFromIdByName(String id, String name) { if (id == null) { return null; } Iterable<String> iterable = Arrays.asList(id.split("/")); Iterator<String> itr = iterable.iterator(); while (itr.hasNext()) { String part = itr.next(); if (part != null && !part.trim().isEmpty()) { if (part.equalsIgnoreCase(name)) { if (itr.hasNext()) { return itr.next(); } else { return null; } } } } return null; } }
class GalleryImageImpl extends CreatableUpdatableImpl<GalleryImage, GalleryImageInner, GalleryImageImpl> implements GalleryImage, GalleryImage.Definition, GalleryImage.Update { private final ComputeManager manager; private String resourceGroupName; private String galleryName; private String galleryImageName; private GalleryImageUpdate galleryImageUpdate; GalleryImageImpl(String name, ComputeManager manager) { super(name, new GalleryImageInner()); this.manager = manager; this.galleryImageName = name; } GalleryImageImpl(GalleryImageInner inner, ComputeManager manager) { super(inner.name(), inner); this.manager = manager; this.galleryImageName = inner.name(); this.resourceGroupName = getValueFromIdByName(inner.id(), "resourceGroups"); this.galleryName = getValueFromIdByName(inner.id(), "galleries"); this.galleryImageName = getValueFromIdByName(inner.id(), "images"); } @Override public Mono<GalleryImageVersion> getVersionAsync(String versionName) { return this .manager() .galleryImageVersions() .getByGalleryImageAsync(this.resourceGroupName, this.galleryName, this.galleryImageName, versionName); } @Override public GalleryImageVersion getVersion(String versionName) { return this .manager() .galleryImageVersions() .getByGalleryImage(this.resourceGroupName, this.galleryName, this.galleryImageName, versionName); } @Override public PagedFlux<GalleryImageVersion> listVersionsAsync() { return this .manager() .galleryImageVersions() .listByGalleryImageAsync(this.resourceGroupName, this.galleryName, this.galleryImageName); } @Override public PagedIterable<GalleryImageVersion> listVersions() { return this .manager() .galleryImageVersions() .listByGalleryImage(this.resourceGroupName, this.galleryName, this.galleryImageName); } @Override public ComputeManager manager() { return this.manager; } @Override public Mono<GalleryImage> createResourceAsync() { return manager() .serviceClient() .getGalleryImages() .createOrUpdateAsync(this.resourceGroupName, this.galleryName, this.galleryImageName, this.innerModel()) .map(innerToFluentMap(this)); } @Override public GalleryImageImpl update() { this.galleryImageUpdate = new GalleryImageUpdate(); return super.update(); } @Override public Mono<GalleryImage> updateResourceAsync() { this.galleryImageUpdate .withOsState(innerModel().osState()) .withOsType(innerModel().osType()) .withIdentifier(innerModel().identifier()); return manager() .serviceClient() .getGalleryImages() .updateAsync(this.resourceGroupName, this.galleryName, this.galleryImageName, this.galleryImageUpdate) .map(innerToFluentMap(this)); } @Override protected Mono<GalleryImageInner> getInnerAsync() { return manager() .serviceClient() .getGalleryImages() .getAsync(this.resourceGroupName, this.galleryName, this.galleryImageName); } @Override public boolean isInCreateMode() { return this.innerModel().id() == null; } @Override public String description() { return this.innerModel().description(); } @Override public List<DiskSkuTypes> unsupportedDiskTypes() { if (this.innerModel().disallowed() == null || this.innerModel().disallowed().diskTypes() == null) { return Collections.unmodifiableList(new ArrayList<DiskSkuTypes>()); } else { List<DiskSkuTypes> diskTypes = new ArrayList<DiskSkuTypes>(); for (String diskTypeStr : this.innerModel().disallowed().diskTypes()) { diskTypes.add(DiskSkuTypes.fromStorageAccountType(DiskStorageAccountTypes.fromString(diskTypeStr))); } return Collections.unmodifiableList(diskTypes); } } @Override public Disallowed disallowed() { return this.innerModel().disallowed(); } @Override public OffsetDateTime endOfLifeDate() { return this.innerModel().endOfLifeDate(); } @Override public String eula() { return this.innerModel().eula(); } @Override public String id() { return this.innerModel().id(); } @Override public GalleryImageIdentifier identifier() { return this.innerModel().identifier(); } @Override public String location() { return this.innerModel().location(); } @Override public String name() { return this.innerModel().name(); } @Override public OperatingSystemStateTypes osState() { return this.innerModel().osState(); } @Override public OperatingSystemTypes osType() { return this.innerModel().osType(); } @Override public String privacyStatementUri() { return this.innerModel().privacyStatementUri(); } @Override public String provisioningState() { return this.innerModel().provisioningState().toString(); } @Override public ImagePurchasePlan purchasePlan() { return this.innerModel().purchasePlan(); } @Override public RecommendedMachineConfiguration recommendedVirtualMachineConfiguration() { return this.innerModel().recommended(); } @Override public String releaseNoteUri() { return this.innerModel().releaseNoteUri(); } @Override public Map<String, String> tags() { return this.innerModel().tags(); } @Override public String type() { return this.innerModel().type(); } @Override public GalleryImageImpl withExistingGallery(String resourceGroupName, String galleryName) { this.resourceGroupName = resourceGroupName; this.galleryName = galleryName; return this; } @Override public GalleryImageImpl withExistingGallery(Gallery gallery) { this.resourceGroupName = gallery.resourceGroupName(); this.galleryName = gallery.name(); return this; } @Override public GalleryImageImpl withLocation(String location) { this.innerModel().withLocation(location); return this; } @Override public GalleryImageImpl withLocation(Region location) { this.innerModel().withLocation(location.toString()); return this; } @Override public GalleryImageImpl withIdentifier(GalleryImageIdentifier identifier) { this.innerModel().withIdentifier(identifier); return this; } @Override public GalleryImageImpl withIdentifier(String publisher, String offer, String sku) { this .innerModel() .withIdentifier(new GalleryImageIdentifier().withPublisher(publisher).withOffer(offer).withSku(sku)); return this; } @Override public GalleryImageImpl withGeneralizedWindows() { return this.withWindows(OperatingSystemStateTypes.GENERALIZED); } @Override public GalleryImageImpl withGeneralizedLinux() { return this.withLinux(OperatingSystemStateTypes.GENERALIZED); } @Override public GalleryImageImpl withWindows(OperatingSystemStateTypes osState) { this.innerModel().withOsType(OperatingSystemTypes.WINDOWS).withOsState(osState); return this; } @Override public GalleryImageImpl withLinux(OperatingSystemStateTypes osState) { this.innerModel().withOsType(OperatingSystemTypes.LINUX).withOsState(osState); return this; } @Override public GalleryImageImpl withDescription(String description) { this.innerModel().withDescription(description); if (isInUpdateMode()) { this.galleryImageUpdate.withDescription(description); } return this; } @Override public GalleryImageImpl withUnsupportedDiskType(DiskSkuTypes diskType) { if (this.innerModel().disallowed() == null) { this.innerModel().withDisallowed(new Disallowed()); } if (this.innerModel().disallowed().diskTypes() == null) { this.innerModel().disallowed().withDiskTypes(new ArrayList<String>()); } boolean found = false; String newDiskTypeStr = diskType.toString(); for (String diskTypeStr : this.innerModel().disallowed().diskTypes()) { if (diskTypeStr.equalsIgnoreCase(newDiskTypeStr)) { found = true; break; } } if (!found) { this.innerModel().disallowed().diskTypes().add(diskType.toString()); } if (isInUpdateMode()) { this.galleryImageUpdate.withDisallowed(this.innerModel().disallowed()); } return this; } @Override public GalleryImageImpl withUnsupportedDiskTypes(List<DiskSkuTypes> diskTypes) { if (this.innerModel().disallowed() == null) { this.innerModel().withDisallowed(new Disallowed()); } this.innerModel().disallowed().withDiskTypes(new ArrayList<String>()); for (DiskSkuTypes diskType : diskTypes) { this.innerModel().disallowed().diskTypes().add(diskType.toString()); } if (isInUpdateMode()) { this.galleryImageUpdate.withDisallowed(this.innerModel().disallowed()); } return this; } @Override public GalleryImageImpl withoutUnsupportedDiskType(DiskSkuTypes diskType) { if (this.innerModel().disallowed() != null && this.innerModel().disallowed().diskTypes() != null) { int foundIndex = -1; int i = 0; String diskTypeToRemove = diskType.toString(); for (String diskTypeStr : this.innerModel().disallowed().diskTypes()) { if (diskTypeStr.equalsIgnoreCase(diskTypeToRemove)) { foundIndex = i; break; } i++; } if (foundIndex != -1) { this.innerModel().disallowed().diskTypes().remove(foundIndex); } if (isInUpdateMode()) { this.galleryImageUpdate.withDisallowed(this.innerModel().disallowed()); } } return this; } @Override public GalleryImageImpl withDisallowed(Disallowed disallowed) { this.innerModel().withDisallowed(disallowed); if (isInUpdateMode()) { this.galleryImageUpdate.withDisallowed(disallowed); } return this; } @Override @Override public GalleryImageImpl withEula(String eula) { this.innerModel().withEula(eula); if (isInUpdateMode()) { this.galleryImageUpdate.withEula(eula); } return this; } @Override public GalleryImageImpl withOsState(OperatingSystemStateTypes osState) { this.innerModel().withOsState(osState); if (isInUpdateMode()) { this.galleryImageUpdate.withOsState(osState); } return this; } @Override public GalleryImageImpl withPrivacyStatementUri(String privacyStatementUri) { this.innerModel().withPrivacyStatementUri(privacyStatementUri); if (isInUpdateMode()) { this.galleryImageUpdate.withPrivacyStatementUri(privacyStatementUri); } return this; } @Override public GalleryImageImpl withPurchasePlan(String name, String publisher, String product) { return this .withPurchasePlan(new ImagePurchasePlan().withName(name).withPublisher(publisher).withProduct(product)); } @Override public GalleryImageImpl withPurchasePlan(ImagePurchasePlan purchasePlan) { this.innerModel().withPurchasePlan(purchasePlan); return this; } @Override public GalleryImageImpl withRecommendedMinimumCPUsCountForVirtualMachine(int minCount) { if (this.innerModel().recommended() == null) { this.innerModel().withRecommended(new RecommendedMachineConfiguration()); } if (this.innerModel().recommended().vCPUs() == null) { this.innerModel().recommended().withVCPUs(new ResourceRange()); } this.innerModel().recommended().vCPUs().withMin(minCount); if (isInUpdateMode()) { this.galleryImageUpdate.withRecommended(this.innerModel().recommended()); } return this; } @Override public GalleryImageImpl withRecommendedMaximumCPUsCountForVirtualMachine(int maxCount) { if (this.innerModel().recommended() == null) { this.innerModel().withRecommended(new RecommendedMachineConfiguration()); } if (this.innerModel().recommended().vCPUs() == null) { this.innerModel().recommended().withVCPUs(new ResourceRange()); } this.innerModel().recommended().vCPUs().withMax(maxCount); if (isInUpdateMode()) { this.galleryImageUpdate.withRecommended(this.innerModel().recommended()); } return this; } @Override public GalleryImageImpl withRecommendedCPUsCountForVirtualMachine(int minCount, int maxCount) { if (this.innerModel().recommended() == null) { this.innerModel().withRecommended(new RecommendedMachineConfiguration()); } this.innerModel().recommended().withVCPUs(new ResourceRange()); this.innerModel().recommended().vCPUs().withMin(minCount); this.innerModel().recommended().vCPUs().withMax(maxCount); if (isInUpdateMode()) { this.galleryImageUpdate.withRecommended(this.innerModel().recommended()); } return this; } @Override public GalleryImageImpl withRecommendedMinimumMemoryForVirtualMachine(int minMB) { if (this.innerModel().recommended() == null) { this.innerModel().withRecommended(new RecommendedMachineConfiguration()); } if (this.innerModel().recommended().memory() == null) { this.innerModel().recommended().withMemory(new ResourceRange()); } this.innerModel().recommended().memory().withMin(minMB); if (isInUpdateMode()) { this.galleryImageUpdate.withRecommended(this.innerModel().recommended()); } return this; } @Override public GalleryImageImpl withRecommendedMaximumMemoryForVirtualMachine(int maxMB) { if (this.innerModel().recommended() == null) { this.innerModel().withRecommended(new RecommendedMachineConfiguration()); } if (this.innerModel().recommended().memory() == null) { this.innerModel().recommended().withMemory(new ResourceRange()); } this.innerModel().recommended().memory().withMax(maxMB); if (isInUpdateMode()) { this.galleryImageUpdate.withRecommended(this.innerModel().recommended()); } return this; } @Override public GalleryImageImpl withRecommendedMemoryForVirtualMachine(int minMB, int maxMB) { if (this.innerModel().recommended() == null) { this.innerModel().withRecommended(new RecommendedMachineConfiguration()); } this.innerModel().recommended().withMemory(new ResourceRange()); this.innerModel().recommended().memory().withMin(minMB); this.innerModel().recommended().memory().withMax(maxMB); if (isInUpdateMode()) { this.galleryImageUpdate.withRecommended(this.innerModel().recommended()); } return this; } @Override public GalleryImageImpl withRecommendedConfigurationForVirtualMachine( RecommendedMachineConfiguration recommendedConfig) { this.innerModel().withRecommended(recommendedConfig); if (isInUpdateMode()) { this.galleryImageUpdate.withRecommended(recommendedConfig); } return this; } @Override public GalleryImageImpl withReleaseNoteUri(String releaseNoteUri) { this.innerModel().withReleaseNoteUri(releaseNoteUri); if (isInUpdateMode()) { this.galleryImageUpdate.withReleaseNoteUri(releaseNoteUri); } return this; } @Override public GalleryImageImpl withTags(Map<String, String> tags) { this.innerModel().withTags(tags); if (isInUpdateMode()) { this.galleryImageUpdate.withTags(tags); } return this; } private boolean isInUpdateMode() { return !isInCreateMode(); } private static String getValueFromIdByName(String id, String name) { if (id == null) { return null; } Iterable<String> iterable = Arrays.asList(id.split("/")); Iterator<String> itr = iterable.iterator(); while (itr.hasNext()) { String part = itr.next(); if (part != null && !part.trim().isEmpty()) { if (part.equalsIgnoreCase(name)) { if (itr.hasNext()) { return itr.next(); } else { return null; } } } } return null; } }
Yes, it's the same.
public GalleryImageImpl withEndOfLifeDate(OffsetDateTime endOfLifeDate) { this.innerModel().withEndOfLifeDate(endOfLifeDate); if (!isInCreateMode()) { this.galleryImageUpdate.withEndOfLifeDate(endOfLifeDate); } return this; }
if (!isInCreateMode()) {
public GalleryImageImpl withEndOfLifeDate(OffsetDateTime endOfLifeDate) { this.innerModel().withEndOfLifeDate(endOfLifeDate); if (isInUpdateMode()) { this.galleryImageUpdate.withEndOfLifeDate(endOfLifeDate); } return this; }
class GalleryImageImpl extends CreatableUpdatableImpl<GalleryImage, GalleryImageInner, GalleryImageImpl> implements GalleryImage, GalleryImage.Definition, GalleryImage.Update { private final ComputeManager manager; private String resourceGroupName; private String galleryName; private String galleryImageName; private GalleryImageUpdate galleryImageUpdate; GalleryImageImpl(String name, ComputeManager manager) { super(name, new GalleryImageInner()); this.manager = manager; this.galleryImageName = name; } GalleryImageImpl(GalleryImageInner inner, ComputeManager manager) { super(inner.name(), inner); this.manager = manager; this.galleryImageName = inner.name(); this.resourceGroupName = getValueFromIdByName(inner.id(), "resourceGroups"); this.galleryName = getValueFromIdByName(inner.id(), "galleries"); this.galleryImageName = getValueFromIdByName(inner.id(), "images"); } @Override public Mono<GalleryImageVersion> getVersionAsync(String versionName) { return this .manager() .galleryImageVersions() .getByGalleryImageAsync(this.resourceGroupName, this.galleryName, this.galleryImageName, versionName); } @Override public GalleryImageVersion getVersion(String versionName) { return this .manager() .galleryImageVersions() .getByGalleryImage(this.resourceGroupName, this.galleryName, this.galleryImageName, versionName); } @Override public PagedFlux<GalleryImageVersion> listVersionsAsync() { return this .manager() .galleryImageVersions() .listByGalleryImageAsync(this.resourceGroupName, this.galleryName, this.galleryImageName); } @Override public PagedIterable<GalleryImageVersion> listVersions() { return this .manager() .galleryImageVersions() .listByGalleryImage(this.resourceGroupName, this.galleryName, this.galleryImageName); } @Override public ComputeManager manager() { return this.manager; } @Override public Mono<GalleryImage> createResourceAsync() { return manager() .serviceClient() .getGalleryImages() .createOrUpdateAsync(this.resourceGroupName, this.galleryName, this.galleryImageName, this.innerModel()) .map(innerToFluentMap(this)); } @Override public GalleryImageImpl update() { this.galleryImageUpdate = new GalleryImageUpdate(); return super.update(); } @Override public Mono<GalleryImage> updateResourceAsync() { this.galleryImageUpdate .withOsState(innerModel().osState()) .withOsType(innerModel().osType()) .withIdentifier(innerModel().identifier()); return manager() .serviceClient() .getGalleryImages() .updateAsync(this.resourceGroupName, this.galleryName, this.galleryImageName, this.galleryImageUpdate) .map(innerToFluentMap(this)); } @Override protected Mono<GalleryImageInner> getInnerAsync() { return manager() .serviceClient() .getGalleryImages() .getAsync(this.resourceGroupName, this.galleryName, this.galleryImageName); } @Override public boolean isInCreateMode() { return this.innerModel().id() == null; } @Override public String description() { return this.innerModel().description(); } @Override public List<DiskSkuTypes> unsupportedDiskTypes() { if (this.innerModel().disallowed() == null || this.innerModel().disallowed().diskTypes() == null) { return Collections.unmodifiableList(new ArrayList<DiskSkuTypes>()); } else { List<DiskSkuTypes> diskTypes = new ArrayList<DiskSkuTypes>(); for (String diskTypeStr : this.innerModel().disallowed().diskTypes()) { diskTypes.add(DiskSkuTypes.fromStorageAccountType(DiskStorageAccountTypes.fromString(diskTypeStr))); } return Collections.unmodifiableList(diskTypes); } } @Override public Disallowed disallowed() { return this.innerModel().disallowed(); } @Override public OffsetDateTime endOfLifeDate() { return this.innerModel().endOfLifeDate(); } @Override public String eula() { return this.innerModel().eula(); } @Override public String id() { return this.innerModel().id(); } @Override public GalleryImageIdentifier identifier() { return this.innerModel().identifier(); } @Override public String location() { return this.innerModel().location(); } @Override public String name() { return this.innerModel().name(); } @Override public OperatingSystemStateTypes osState() { return this.innerModel().osState(); } @Override public OperatingSystemTypes osType() { return this.innerModel().osType(); } @Override public String privacyStatementUri() { return this.innerModel().privacyStatementUri(); } @Override public String provisioningState() { return this.innerModel().provisioningState().toString(); } @Override public ImagePurchasePlan purchasePlan() { return this.innerModel().purchasePlan(); } @Override public RecommendedMachineConfiguration recommendedVirtualMachineConfiguration() { return this.innerModel().recommended(); } @Override public String releaseNoteUri() { return this.innerModel().releaseNoteUri(); } @Override public Map<String, String> tags() { return this.innerModel().tags(); } @Override public String type() { return this.innerModel().type(); } @Override public GalleryImageImpl withExistingGallery(String resourceGroupName, String galleryName) { this.resourceGroupName = resourceGroupName; this.galleryName = galleryName; return this; } @Override public GalleryImageImpl withExistingGallery(Gallery gallery) { this.resourceGroupName = gallery.resourceGroupName(); this.galleryName = gallery.name(); return this; } @Override public GalleryImageImpl withLocation(String location) { this.innerModel().withLocation(location); return this; } @Override public GalleryImageImpl withLocation(Region location) { this.innerModel().withLocation(location.toString()); return this; } @Override public GalleryImageImpl withIdentifier(GalleryImageIdentifier identifier) { this.innerModel().withIdentifier(identifier); return this; } @Override public GalleryImageImpl withIdentifier(String publisher, String offer, String sku) { this .innerModel() .withIdentifier(new GalleryImageIdentifier().withPublisher(publisher).withOffer(offer).withSku(sku)); return this; } @Override public GalleryImageImpl withGeneralizedWindows() { return this.withWindows(OperatingSystemStateTypes.GENERALIZED); } @Override public GalleryImageImpl withGeneralizedLinux() { return this.withLinux(OperatingSystemStateTypes.GENERALIZED); } @Override public GalleryImageImpl withWindows(OperatingSystemStateTypes osState) { this.innerModel().withOsType(OperatingSystemTypes.WINDOWS).withOsState(osState); return this; } @Override public GalleryImageImpl withLinux(OperatingSystemStateTypes osState) { this.innerModel().withOsType(OperatingSystemTypes.LINUX).withOsState(osState); return this; } @Override public GalleryImageImpl withDescription(String description) { this.innerModel().withDescription(description); if (!isInCreateMode()) { this.galleryImageUpdate.withDescription(description); } return this; } @Override public GalleryImageImpl withUnsupportedDiskType(DiskSkuTypes diskType) { if (this.innerModel().disallowed() == null) { this.innerModel().withDisallowed(new Disallowed()); } if (this.innerModel().disallowed().diskTypes() == null) { this.innerModel().disallowed().withDiskTypes(new ArrayList<String>()); } boolean found = false; String newDiskTypeStr = diskType.toString(); for (String diskTypeStr : this.innerModel().disallowed().diskTypes()) { if (diskTypeStr.equalsIgnoreCase(newDiskTypeStr)) { found = true; break; } } if (!found) { this.innerModel().disallowed().diskTypes().add(diskType.toString()); } if (!isInCreateMode()) { this.galleryImageUpdate.withDisallowed(this.innerModel().disallowed()); } return this; } @Override public GalleryImageImpl withUnsupportedDiskTypes(List<DiskSkuTypes> diskTypes) { if (this.innerModel().disallowed() == null) { this.innerModel().withDisallowed(new Disallowed()); } this.innerModel().disallowed().withDiskTypes(new ArrayList<String>()); for (DiskSkuTypes diskType : diskTypes) { this.innerModel().disallowed().diskTypes().add(diskType.toString()); } if (!isInCreateMode()) { this.galleryImageUpdate.withDisallowed(this.innerModel().disallowed()); } return this; } @Override public GalleryImageImpl withoutUnsupportedDiskType(DiskSkuTypes diskType) { if (this.innerModel().disallowed() != null && this.innerModel().disallowed().diskTypes() != null) { int foundIndex = -1; int i = 0; String diskTypeToRemove = diskType.toString(); for (String diskTypeStr : this.innerModel().disallowed().diskTypes()) { if (diskTypeStr.equalsIgnoreCase(diskTypeToRemove)) { foundIndex = i; break; } i++; } if (foundIndex != -1) { this.innerModel().disallowed().diskTypes().remove(foundIndex); } if (!isInCreateMode()) { this.galleryImageUpdate.withDisallowed(this.innerModel().disallowed()); } } return this; } @Override public GalleryImageImpl withDisallowed(Disallowed disallowed) { this.innerModel().withDisallowed(disallowed); if (!isInCreateMode()) { this.galleryImageUpdate.withDisallowed(disallowed); } return this; } @Override @Override public GalleryImageImpl withEula(String eula) { this.innerModel().withEula(eula); if (!isInCreateMode()) { this.galleryImageUpdate.withEula(eula); } return this; } @Override public GalleryImageImpl withOsState(OperatingSystemStateTypes osState) { this.innerModel().withOsState(osState); if (!isInCreateMode()) { this.galleryImageUpdate.withOsState(osState); } return this; } @Override public GalleryImageImpl withPrivacyStatementUri(String privacyStatementUri) { this.innerModel().withPrivacyStatementUri(privacyStatementUri); if (!isInCreateMode()) { this.galleryImageUpdate.withPrivacyStatementUri(privacyStatementUri); } return this; } @Override public GalleryImageImpl withPurchasePlan(String name, String publisher, String product) { return this .withPurchasePlan(new ImagePurchasePlan().withName(name).withPublisher(publisher).withProduct(product)); } @Override public GalleryImageImpl withPurchasePlan(ImagePurchasePlan purchasePlan) { this.innerModel().withPurchasePlan(purchasePlan); return this; } @Override public GalleryImageImpl withRecommendedMinimumCPUsCountForVirtualMachine(int minCount) { if (this.innerModel().recommended() == null) { this.innerModel().withRecommended(new RecommendedMachineConfiguration()); } if (this.innerModel().recommended().vCPUs() == null) { this.innerModel().recommended().withVCPUs(new ResourceRange()); } this.innerModel().recommended().vCPUs().withMin(minCount); if (!isInCreateMode()) { this.galleryImageUpdate.withRecommended(this.innerModel().recommended()); } return this; } @Override public GalleryImageImpl withRecommendedMaximumCPUsCountForVirtualMachine(int maxCount) { if (this.innerModel().recommended() == null) { this.innerModel().withRecommended(new RecommendedMachineConfiguration()); } if (this.innerModel().recommended().vCPUs() == null) { this.innerModel().recommended().withVCPUs(new ResourceRange()); } this.innerModel().recommended().vCPUs().withMax(maxCount); if (!isInCreateMode()) { this.galleryImageUpdate.withRecommended(this.innerModel().recommended()); } return this; } @Override public GalleryImageImpl withRecommendedCPUsCountForVirtualMachine(int minCount, int maxCount) { if (this.innerModel().recommended() == null) { this.innerModel().withRecommended(new RecommendedMachineConfiguration()); } this.innerModel().recommended().withVCPUs(new ResourceRange()); this.innerModel().recommended().vCPUs().withMin(minCount); this.innerModel().recommended().vCPUs().withMax(maxCount); if (!isInCreateMode()) { this.galleryImageUpdate.withRecommended(this.innerModel().recommended()); } return this; } @Override public GalleryImageImpl withRecommendedMinimumMemoryForVirtualMachine(int minMB) { if (this.innerModel().recommended() == null) { this.innerModel().withRecommended(new RecommendedMachineConfiguration()); } if (this.innerModel().recommended().memory() == null) { this.innerModel().recommended().withMemory(new ResourceRange()); } this.innerModel().recommended().memory().withMin(minMB); if (!isInCreateMode()) { this.galleryImageUpdate.withRecommended(this.innerModel().recommended()); } return this; } @Override public GalleryImageImpl withRecommendedMaximumMemoryForVirtualMachine(int maxMB) { if (this.innerModel().recommended() == null) { this.innerModel().withRecommended(new RecommendedMachineConfiguration()); } if (this.innerModel().recommended().memory() == null) { this.innerModel().recommended().withMemory(new ResourceRange()); } this.innerModel().recommended().memory().withMax(maxMB); if (!isInCreateMode()) { this.galleryImageUpdate.withRecommended(this.innerModel().recommended()); } return this; } @Override public GalleryImageImpl withRecommendedMemoryForVirtualMachine(int minMB, int maxMB) { if (this.innerModel().recommended() == null) { this.innerModel().withRecommended(new RecommendedMachineConfiguration()); } this.innerModel().recommended().withMemory(new ResourceRange()); this.innerModel().recommended().memory().withMin(minMB); this.innerModel().recommended().memory().withMax(maxMB); if (!isInCreateMode()) { this.galleryImageUpdate.withRecommended(this.innerModel().recommended()); } return this; } @Override public GalleryImageImpl withRecommendedConfigurationForVirtualMachine( RecommendedMachineConfiguration recommendedConfig) { this.innerModel().withRecommended(recommendedConfig); if (!isInCreateMode()) { this.galleryImageUpdate.withRecommended(recommendedConfig); } return this; } @Override public GalleryImageImpl withReleaseNoteUri(String releaseNoteUri) { this.innerModel().withReleaseNoteUri(releaseNoteUri); if (!isInCreateMode()) { this.galleryImageUpdate.withReleaseNoteUri(releaseNoteUri); } return this; } @Override public GalleryImageImpl withTags(Map<String, String> tags) { this.innerModel().withTags(tags); if (!isInCreateMode()) { this.galleryImageUpdate.withTags(tags); } return this; } private static String getValueFromIdByName(String id, String name) { if (id == null) { return null; } Iterable<String> iterable = Arrays.asList(id.split("/")); Iterator<String> itr = iterable.iterator(); while (itr.hasNext()) { String part = itr.next(); if (part != null && !part.trim().isEmpty()) { if (part.equalsIgnoreCase(name)) { if (itr.hasNext()) { return itr.next(); } else { return null; } } } } return null; } }
class GalleryImageImpl extends CreatableUpdatableImpl<GalleryImage, GalleryImageInner, GalleryImageImpl> implements GalleryImage, GalleryImage.Definition, GalleryImage.Update { private final ComputeManager manager; private String resourceGroupName; private String galleryName; private String galleryImageName; private GalleryImageUpdate galleryImageUpdate; GalleryImageImpl(String name, ComputeManager manager) { super(name, new GalleryImageInner()); this.manager = manager; this.galleryImageName = name; } GalleryImageImpl(GalleryImageInner inner, ComputeManager manager) { super(inner.name(), inner); this.manager = manager; this.galleryImageName = inner.name(); this.resourceGroupName = getValueFromIdByName(inner.id(), "resourceGroups"); this.galleryName = getValueFromIdByName(inner.id(), "galleries"); this.galleryImageName = getValueFromIdByName(inner.id(), "images"); } @Override public Mono<GalleryImageVersion> getVersionAsync(String versionName) { return this .manager() .galleryImageVersions() .getByGalleryImageAsync(this.resourceGroupName, this.galleryName, this.galleryImageName, versionName); } @Override public GalleryImageVersion getVersion(String versionName) { return this .manager() .galleryImageVersions() .getByGalleryImage(this.resourceGroupName, this.galleryName, this.galleryImageName, versionName); } @Override public PagedFlux<GalleryImageVersion> listVersionsAsync() { return this .manager() .galleryImageVersions() .listByGalleryImageAsync(this.resourceGroupName, this.galleryName, this.galleryImageName); } @Override public PagedIterable<GalleryImageVersion> listVersions() { return this .manager() .galleryImageVersions() .listByGalleryImage(this.resourceGroupName, this.galleryName, this.galleryImageName); } @Override public ComputeManager manager() { return this.manager; } @Override public Mono<GalleryImage> createResourceAsync() { return manager() .serviceClient() .getGalleryImages() .createOrUpdateAsync(this.resourceGroupName, this.galleryName, this.galleryImageName, this.innerModel()) .map(innerToFluentMap(this)); } @Override public GalleryImageImpl update() { this.galleryImageUpdate = new GalleryImageUpdate(); return super.update(); } @Override public Mono<GalleryImage> updateResourceAsync() { this.galleryImageUpdate .withOsState(innerModel().osState()) .withOsType(innerModel().osType()) .withIdentifier(innerModel().identifier()); return manager() .serviceClient() .getGalleryImages() .updateAsync(this.resourceGroupName, this.galleryName, this.galleryImageName, this.galleryImageUpdate) .map(innerToFluentMap(this)); } @Override protected Mono<GalleryImageInner> getInnerAsync() { return manager() .serviceClient() .getGalleryImages() .getAsync(this.resourceGroupName, this.galleryName, this.galleryImageName); } @Override public boolean isInCreateMode() { return this.innerModel().id() == null; } @Override public String description() { return this.innerModel().description(); } @Override public List<DiskSkuTypes> unsupportedDiskTypes() { if (this.innerModel().disallowed() == null || this.innerModel().disallowed().diskTypes() == null) { return Collections.unmodifiableList(new ArrayList<DiskSkuTypes>()); } else { List<DiskSkuTypes> diskTypes = new ArrayList<DiskSkuTypes>(); for (String diskTypeStr : this.innerModel().disallowed().diskTypes()) { diskTypes.add(DiskSkuTypes.fromStorageAccountType(DiskStorageAccountTypes.fromString(diskTypeStr))); } return Collections.unmodifiableList(diskTypes); } } @Override public Disallowed disallowed() { return this.innerModel().disallowed(); } @Override public OffsetDateTime endOfLifeDate() { return this.innerModel().endOfLifeDate(); } @Override public String eula() { return this.innerModel().eula(); } @Override public String id() { return this.innerModel().id(); } @Override public GalleryImageIdentifier identifier() { return this.innerModel().identifier(); } @Override public String location() { return this.innerModel().location(); } @Override public String name() { return this.innerModel().name(); } @Override public OperatingSystemStateTypes osState() { return this.innerModel().osState(); } @Override public OperatingSystemTypes osType() { return this.innerModel().osType(); } @Override public String privacyStatementUri() { return this.innerModel().privacyStatementUri(); } @Override public String provisioningState() { return this.innerModel().provisioningState().toString(); } @Override public ImagePurchasePlan purchasePlan() { return this.innerModel().purchasePlan(); } @Override public RecommendedMachineConfiguration recommendedVirtualMachineConfiguration() { return this.innerModel().recommended(); } @Override public String releaseNoteUri() { return this.innerModel().releaseNoteUri(); } @Override public Map<String, String> tags() { return this.innerModel().tags(); } @Override public String type() { return this.innerModel().type(); } @Override public GalleryImageImpl withExistingGallery(String resourceGroupName, String galleryName) { this.resourceGroupName = resourceGroupName; this.galleryName = galleryName; return this; } @Override public GalleryImageImpl withExistingGallery(Gallery gallery) { this.resourceGroupName = gallery.resourceGroupName(); this.galleryName = gallery.name(); return this; } @Override public GalleryImageImpl withLocation(String location) { this.innerModel().withLocation(location); return this; } @Override public GalleryImageImpl withLocation(Region location) { this.innerModel().withLocation(location.toString()); return this; } @Override public GalleryImageImpl withIdentifier(GalleryImageIdentifier identifier) { this.innerModel().withIdentifier(identifier); return this; } @Override public GalleryImageImpl withIdentifier(String publisher, String offer, String sku) { this .innerModel() .withIdentifier(new GalleryImageIdentifier().withPublisher(publisher).withOffer(offer).withSku(sku)); return this; } @Override public GalleryImageImpl withGeneralizedWindows() { return this.withWindows(OperatingSystemStateTypes.GENERALIZED); } @Override public GalleryImageImpl withGeneralizedLinux() { return this.withLinux(OperatingSystemStateTypes.GENERALIZED); } @Override public GalleryImageImpl withWindows(OperatingSystemStateTypes osState) { this.innerModel().withOsType(OperatingSystemTypes.WINDOWS).withOsState(osState); return this; } @Override public GalleryImageImpl withLinux(OperatingSystemStateTypes osState) { this.innerModel().withOsType(OperatingSystemTypes.LINUX).withOsState(osState); return this; } @Override public GalleryImageImpl withDescription(String description) { this.innerModel().withDescription(description); if (isInUpdateMode()) { this.galleryImageUpdate.withDescription(description); } return this; } @Override public GalleryImageImpl withUnsupportedDiskType(DiskSkuTypes diskType) { if (this.innerModel().disallowed() == null) { this.innerModel().withDisallowed(new Disallowed()); } if (this.innerModel().disallowed().diskTypes() == null) { this.innerModel().disallowed().withDiskTypes(new ArrayList<String>()); } boolean found = false; String newDiskTypeStr = diskType.toString(); for (String diskTypeStr : this.innerModel().disallowed().diskTypes()) { if (diskTypeStr.equalsIgnoreCase(newDiskTypeStr)) { found = true; break; } } if (!found) { this.innerModel().disallowed().diskTypes().add(diskType.toString()); } if (isInUpdateMode()) { this.galleryImageUpdate.withDisallowed(this.innerModel().disallowed()); } return this; } @Override public GalleryImageImpl withUnsupportedDiskTypes(List<DiskSkuTypes> diskTypes) { if (this.innerModel().disallowed() == null) { this.innerModel().withDisallowed(new Disallowed()); } this.innerModel().disallowed().withDiskTypes(new ArrayList<String>()); for (DiskSkuTypes diskType : diskTypes) { this.innerModel().disallowed().diskTypes().add(diskType.toString()); } if (isInUpdateMode()) { this.galleryImageUpdate.withDisallowed(this.innerModel().disallowed()); } return this; } @Override public GalleryImageImpl withoutUnsupportedDiskType(DiskSkuTypes diskType) { if (this.innerModel().disallowed() != null && this.innerModel().disallowed().diskTypes() != null) { int foundIndex = -1; int i = 0; String diskTypeToRemove = diskType.toString(); for (String diskTypeStr : this.innerModel().disallowed().diskTypes()) { if (diskTypeStr.equalsIgnoreCase(diskTypeToRemove)) { foundIndex = i; break; } i++; } if (foundIndex != -1) { this.innerModel().disallowed().diskTypes().remove(foundIndex); } if (isInUpdateMode()) { this.galleryImageUpdate.withDisallowed(this.innerModel().disallowed()); } } return this; } @Override public GalleryImageImpl withDisallowed(Disallowed disallowed) { this.innerModel().withDisallowed(disallowed); if (isInUpdateMode()) { this.galleryImageUpdate.withDisallowed(disallowed); } return this; } @Override @Override public GalleryImageImpl withEula(String eula) { this.innerModel().withEula(eula); if (isInUpdateMode()) { this.galleryImageUpdate.withEula(eula); } return this; } @Override public GalleryImageImpl withOsState(OperatingSystemStateTypes osState) { this.innerModel().withOsState(osState); if (isInUpdateMode()) { this.galleryImageUpdate.withOsState(osState); } return this; } @Override public GalleryImageImpl withPrivacyStatementUri(String privacyStatementUri) { this.innerModel().withPrivacyStatementUri(privacyStatementUri); if (isInUpdateMode()) { this.galleryImageUpdate.withPrivacyStatementUri(privacyStatementUri); } return this; } @Override public GalleryImageImpl withPurchasePlan(String name, String publisher, String product) { return this .withPurchasePlan(new ImagePurchasePlan().withName(name).withPublisher(publisher).withProduct(product)); } @Override public GalleryImageImpl withPurchasePlan(ImagePurchasePlan purchasePlan) { this.innerModel().withPurchasePlan(purchasePlan); return this; } @Override public GalleryImageImpl withRecommendedMinimumCPUsCountForVirtualMachine(int minCount) { if (this.innerModel().recommended() == null) { this.innerModel().withRecommended(new RecommendedMachineConfiguration()); } if (this.innerModel().recommended().vCPUs() == null) { this.innerModel().recommended().withVCPUs(new ResourceRange()); } this.innerModel().recommended().vCPUs().withMin(minCount); if (isInUpdateMode()) { this.galleryImageUpdate.withRecommended(this.innerModel().recommended()); } return this; } @Override public GalleryImageImpl withRecommendedMaximumCPUsCountForVirtualMachine(int maxCount) { if (this.innerModel().recommended() == null) { this.innerModel().withRecommended(new RecommendedMachineConfiguration()); } if (this.innerModel().recommended().vCPUs() == null) { this.innerModel().recommended().withVCPUs(new ResourceRange()); } this.innerModel().recommended().vCPUs().withMax(maxCount); if (isInUpdateMode()) { this.galleryImageUpdate.withRecommended(this.innerModel().recommended()); } return this; } @Override public GalleryImageImpl withRecommendedCPUsCountForVirtualMachine(int minCount, int maxCount) { if (this.innerModel().recommended() == null) { this.innerModel().withRecommended(new RecommendedMachineConfiguration()); } this.innerModel().recommended().withVCPUs(new ResourceRange()); this.innerModel().recommended().vCPUs().withMin(minCount); this.innerModel().recommended().vCPUs().withMax(maxCount); if (isInUpdateMode()) { this.galleryImageUpdate.withRecommended(this.innerModel().recommended()); } return this; } @Override public GalleryImageImpl withRecommendedMinimumMemoryForVirtualMachine(int minMB) { if (this.innerModel().recommended() == null) { this.innerModel().withRecommended(new RecommendedMachineConfiguration()); } if (this.innerModel().recommended().memory() == null) { this.innerModel().recommended().withMemory(new ResourceRange()); } this.innerModel().recommended().memory().withMin(minMB); if (isInUpdateMode()) { this.galleryImageUpdate.withRecommended(this.innerModel().recommended()); } return this; } @Override public GalleryImageImpl withRecommendedMaximumMemoryForVirtualMachine(int maxMB) { if (this.innerModel().recommended() == null) { this.innerModel().withRecommended(new RecommendedMachineConfiguration()); } if (this.innerModel().recommended().memory() == null) { this.innerModel().recommended().withMemory(new ResourceRange()); } this.innerModel().recommended().memory().withMax(maxMB); if (isInUpdateMode()) { this.galleryImageUpdate.withRecommended(this.innerModel().recommended()); } return this; } @Override public GalleryImageImpl withRecommendedMemoryForVirtualMachine(int minMB, int maxMB) { if (this.innerModel().recommended() == null) { this.innerModel().withRecommended(new RecommendedMachineConfiguration()); } this.innerModel().recommended().withMemory(new ResourceRange()); this.innerModel().recommended().memory().withMin(minMB); this.innerModel().recommended().memory().withMax(maxMB); if (isInUpdateMode()) { this.galleryImageUpdate.withRecommended(this.innerModel().recommended()); } return this; } @Override public GalleryImageImpl withRecommendedConfigurationForVirtualMachine( RecommendedMachineConfiguration recommendedConfig) { this.innerModel().withRecommended(recommendedConfig); if (isInUpdateMode()) { this.galleryImageUpdate.withRecommended(recommendedConfig); } return this; } @Override public GalleryImageImpl withReleaseNoteUri(String releaseNoteUri) { this.innerModel().withReleaseNoteUri(releaseNoteUri); if (isInUpdateMode()) { this.galleryImageUpdate.withReleaseNoteUri(releaseNoteUri); } return this; } @Override public GalleryImageImpl withTags(Map<String, String> tags) { this.innerModel().withTags(tags); if (isInUpdateMode()) { this.galleryImageUpdate.withTags(tags); } return this; } private boolean isInUpdateMode() { return !isInCreateMode(); } private static String getValueFromIdByName(String id, String name) { if (id == null) { return null; } Iterable<String> iterable = Arrays.asList(id.split("/")); Iterator<String> itr = iterable.iterator(); while (itr.hasNext()) { String part = itr.next(); if (part != null && !part.trim().isEmpty()) { if (part.equalsIgnoreCase(name)) { if (itr.hasNext()) { return itr.next(); } else { return null; } } } } return null; } }
Because if we want to close cbs channel here, we couldn't use `repeat().takeUtilOther(shutdownSignal)` to stop request new cbs channel after connection closed. Here I try to use `AmqpChannelProcess.dispose()` to mark the processor as disposed and close channels. But this need to double check the reason why use `flatMap(channel -> channel.closeAsync())` previously.
Mono<Void> closeAsync(AmqpShutdownSignal shutdownSignal) { final Mono<Void> emitShutDownSignalOperation = Mono.fromRunnable(() -> { addShutdownSignal(logger.atInfo(), shutdownSignal).log("Disposing of ReactorConnection."); final Sinks.EmitResult result = shutdownSignalSink.tryEmitValue(shutdownSignal); if (result.isFailure()) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(EMIT_RESULT_KEY, result) .log("Unable to emit shutdown signal."); } }); final Mono<Void> cbsCloseOperation; if (cbsChannelProcessor != null) { cbsCloseOperation = Mono.fromRunnable(() -> cbsChannelProcessor.dispose()); } else { cbsCloseOperation = Mono.empty(); } final Mono<Void> managementNodeCloseOperations = Mono.when( Flux.fromStream(managementNodes.values().stream()).flatMap(node -> node.closeAsync())); final Mono<Void> closeReactor = Mono.fromRunnable(() -> { logger.verbose("Scheduling closeConnection work."); final ReactorDispatcher dispatcher = reactorProvider.getReactorDispatcher(); if (dispatcher != null) { try { dispatcher.invoke(() -> closeConnectionWork()); } catch (IOException e) { logger.warning("IOException while scheduling closeConnection work. Manually disposing.", e); closeConnectionWork(); } catch (RejectedExecutionException e) { logger.info("Could not schedule closeConnection work. Manually disposing."); closeConnectionWork(); } } else { closeConnectionWork(); } }); return Mono.whenDelayError( cbsCloseOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed CBS node.")), managementNodeCloseOperations.doFinally(signalType -> logger.atVerbose() .log("Closed management nodes."))) .then(emitShutDownSignalOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Emitted connection shutdown signal. "))) .then(closeReactor.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed reactor dispatcher."))) .then(isClosedMono.asMono()); }
cbsCloseOperation = Mono.fromRunnable(() -> cbsChannelProcessor.dispose());
Mono<Void> closeAsync(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal).log("Disposing of ReactorConnection."); final Sinks.EmitResult result = shutdownSignalSink.tryEmitValue(shutdownSignal); if (result.isFailure()) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(EMIT_RESULT_KEY, result) .log("Unable to emit shutdown signal."); } final Mono<Void> cbsCloseOperation; if (cbsChannelProcessor != null) { cbsCloseOperation = cbsChannelProcessor.flatMap(channel -> channel.closeAsync()); } else { cbsCloseOperation = Mono.empty(); } final Mono<Void> managementNodeCloseOperations = Mono.when( Flux.fromStream(managementNodes.values().stream()).flatMap(node -> node.closeAsync())); final Mono<Void> closeReactor = Mono.fromRunnable(() -> { logger.verbose("Scheduling closeConnection work."); final ReactorDispatcher dispatcher = reactorProvider.getReactorDispatcher(); if (dispatcher != null) { try { dispatcher.invoke(() -> closeConnectionWork()); } catch (IOException e) { logger.warning("IOException while scheduling closeConnection work. Manually disposing.", e); closeConnectionWork(); } catch (RejectedExecutionException e) { logger.info("Could not schedule closeConnection work. Manually disposing."); closeConnectionWork(); } } else { closeConnectionWork(); } }); return Mono.whenDelayError( cbsCloseOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed CBS node.")), managementNodeCloseOperations.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed management nodes."))) .then(closeReactor.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed reactor dispatcher."))) .then(isClosedMono.asMono()); }
class ReactorConnection implements AmqpConnection { private static final String CBS_SESSION_NAME = "cbs-session"; private static final String CBS_ADDRESS = "$cbs"; private static final String CBS_LINK_NAME = "cbs"; private static final String MANAGEMENT_SESSION_NAME = "mgmt-session"; private static final String MANAGEMENT_ADDRESS = "$management"; private static final String MANAGEMENT_LINK_NAME = "mgmt"; private final ClientLogger logger; private final ConcurrentMap<String, SessionSubscription> sessionMap = new ConcurrentHashMap<>(); private final ConcurrentHashMap<String, AmqpManagementNode> managementNodes = new ConcurrentHashMap<>(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.One<AmqpShutdownSignal> shutdownSignalSink = Sinks.one(); private final Flux<AmqpEndpointState> endpointStates; private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final String connectionId; private final Mono<Connection> connectionMono; private final ConnectionHandler handler; private final ReactorHandlerProvider handlerProvider; private final TokenManagerProvider tokenManagerProvider; private final MessageSerializer messageSerializer; private final ConnectionOptions connectionOptions; private final ReactorProvider reactorProvider; private final AmqpRetryPolicy retryPolicy; private final SenderSettleMode senderSettleMode; private final ReceiverSettleMode receiverSettleMode; private final Duration operationTimeout; private final Composite subscriptions; private ReactorExecutor executor; private volatile ClaimsBasedSecurityChannel cbsChannel; private volatile AmqpChannelProcessor<RequestResponseChannel> cbsChannelProcessor; private volatile Connection connection; /** * Creates a new AMQP connection that uses proton-j. * * @param connectionId Identifier for the connection. * @param connectionOptions A set of options used to create the AMQP connection. * @param reactorProvider Provides proton-j Reactor instances. * @param handlerProvider Provides {@link BaseHandler} to listen to proton-j reactor events. * @param tokenManagerProvider Provides the appropriate token manager to authorize with CBS node. * @param messageSerializer Serializer to translate objects to and from proton-j {@link Message messages}. * @param senderSettleMode to set as {@link SenderSettleMode} on sender. * @param receiverSettleMode to set as {@link ReceiverSettleMode} on receiver. */ public ReactorConnection(String connectionId, ConnectionOptions connectionOptions, ReactorProvider reactorProvider, ReactorHandlerProvider handlerProvider, TokenManagerProvider tokenManagerProvider, MessageSerializer messageSerializer, SenderSettleMode senderSettleMode, ReceiverSettleMode receiverSettleMode) { this.connectionOptions = connectionOptions; this.reactorProvider = reactorProvider; this.connectionId = connectionId; this.logger = new ClientLogger(ReactorConnection.class, createContextWithConnectionId(connectionId)); this.handlerProvider = handlerProvider; this.tokenManagerProvider = Objects.requireNonNull(tokenManagerProvider, "'tokenManagerProvider' cannot be null."); this.messageSerializer = messageSerializer; this.handler = handlerProvider.createConnectionHandler(connectionId, connectionOptions); this.retryPolicy = RetryUtil.getRetryPolicy(connectionOptions.getRetry()); this.operationTimeout = connectionOptions.getRetry().getTryTimeout(); this.senderSettleMode = senderSettleMode; this.receiverSettleMode = receiverSettleMode; this.connectionMono = Mono.fromCallable(this::getOrCreateConnection) .flatMap(reactorConnection -> { final Mono<AmqpEndpointState> activeEndpoint = getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(operationTimeout, Mono.error(new AmqpException(true, String.format( "Connection '%s' not opened within AmqpRetryOptions.tryTimeout(): %s", connectionId, operationTimeout), handler.getErrorContext()))); return activeEndpoint.thenReturn(reactorConnection); }) .doOnError(error -> { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already disposed: Error occurred while connection was starting.", error); } else { closeAsync(new AmqpShutdownSignal(false, false, "Error occurred while connection was starting. Error: " + error)).subscribe(); } }); this.endpointStates = this.handler.getEndpointStates() .takeUntilOther(shutdownSignalSink.asMono()) .map(state -> { logger.verbose("State {}", state); return AmqpEndpointStateUtil.getConnectionState(state); }) .onErrorResume(error -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to error."); return closeAsync(new AmqpShutdownSignal(false, false, error.getMessage())).then(Mono.error(error)); } else { return Mono.error(error); } }) .doOnComplete(() -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to connection close."); closeAsync(new AmqpShutdownSignal(false, false, "Connection handler closed.")).subscribe(); } }) .cache(1); this.subscriptions = Disposables.composite(this.endpointStates.subscribe()); } /** * {@inheritDoc} */ @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } /** * Gets the shutdown signal associated with this connection. When it emits, the underlying connection is closed. * * @return Shutdown signals associated with this connection. It emits a signal when the underlying connection is * closed. */ @Override public Flux<AmqpShutdownSignal> getShutdownSignals() { return shutdownSignalSink.asMono().cache().flux(); } @Override public Mono<AmqpManagementNode> getManagementNode(String entityPath) { return Mono.defer(() -> { if (isDisposed()) { return Mono.error(logger.atError() .addKeyValue(ENTITY_PATH_KEY, entityPath) .log(Exceptions.propagate(new IllegalStateException("Connection is disposed. Cannot get management instance.")))); } final AmqpManagementNode existing = managementNodes.get(entityPath); if (existing != null) { return Mono.just(existing); } final TokenManager tokenManager = new AzureTokenManagerProvider(connectionOptions.getAuthorizationType(), connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getAuthorizationScope()) .getTokenManager(getClaimsBasedSecurityNode(), entityPath); return tokenManager.authorize().thenReturn(managementNodes.compute(entityPath, (key, current) -> { if (current != null) { logger.info("A management node exists already, returning it."); tokenManager.close(); return current; } final String sessionName = entityPath + "-" + MANAGEMENT_SESSION_NAME; final String linkName = entityPath + "-" + MANAGEMENT_LINK_NAME; final String address = entityPath + "/" + MANAGEMENT_ADDRESS; logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .addKeyValue("address", address) .log("Creating management node."); final AmqpChannelProcessor<RequestResponseChannel> requestResponseChannel = createRequestResponseChannel(sessionName, linkName, address); return new ManagementChannel(requestResponseChannel, getFullyQualifiedNamespace(), entityPath, tokenManager); })); }); } /** * {@inheritDoc} */ @Override public Mono<ClaimsBasedSecurityNode> getClaimsBasedSecurityNode() { return connectionMono.then(Mono.fromCallable(() -> getOrCreateCBSNode())); } @Override public String getId() { return connectionId; } /** * {@inheritDoc} */ @Override public String getFullyQualifiedNamespace() { return handler.getHostname(); } /** * {@inheritDoc} */ @Override public int getMaxFrameSize() { return handler.getMaxFrameSize(); } /** * {@inheritDoc} */ @Override public Map<String, Object> getConnectionProperties() { return handler.getConnectionProperties(); } /** * {@inheritDoc} */ @Override public Mono<AmqpSession> createSession(String sessionName) { return connectionMono.map(connection -> { final SessionSubscription sessionSubscription = sessionMap.computeIfAbsent(sessionName, key -> { final SessionHandler sessionHandler = handlerProvider.createSessionHandler(connectionId, getFullyQualifiedNamespace(), key, connectionOptions.getRetry().getTryTimeout()); final Session session = connection.session(); BaseHandler.setHandler(session, sessionHandler); final AmqpSession amqpSession = createSession(key, session, sessionHandler); final Disposable subscription = amqpSession.getEndpointStates() .subscribe(state -> { }, error -> { if (isDisposed.get()) { return; } logger.atInfo() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Error occurred. Removing and disposing session", error); removeSession(key); }, () -> { if (isDisposed.get()) { return; } logger.atVerbose() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Complete. Removing and disposing session."); removeSession(key); }); return new SessionSubscription(amqpSession, subscription); }); return sessionSubscription; }).flatMap(sessionSubscription -> { final Mono<AmqpEndpointState> activeSession = sessionSubscription.getSession().getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(retryPolicy.getRetryOptions().getTryTimeout(), Mono.error(new AmqpException(true, String.format("connectionId[%s] sessionName[%s] Timeout waiting for session to be active.", connectionId, sessionName), handler.getErrorContext()))); return activeSession.thenReturn(sessionSubscription.getSession()); }); } /** * Creates a new AMQP session with the given parameters. * * @param sessionName Name of the AMQP session. * @param session The reactor session associated with this session. * @param handler Session handler for the reactor session. * * @return A new instance of AMQP session. */ protected AmqpSession createSession(String sessionName, Session session, SessionHandler handler) { return new ReactorSession(this, session, handler, sessionName, reactorProvider, handlerProvider, getClaimsBasedSecurityNode(), tokenManagerProvider, messageSerializer, connectionOptions.getRetry()); } /** * {@inheritDoc} */ @Override public boolean removeSession(String sessionName) { if (sessionName == null) { return false; } final SessionSubscription removed = sessionMap.remove(sessionName); if (removed != null) { removed.dispose(); } return removed != null; } @Override public boolean isDisposed() { return isDisposed.get(); } /** * {@inheritDoc} */ @Override public void dispose() { final Duration timeout = operationTimeout.plus(operationTimeout); closeAsync().block(timeout); } /** * Gets the active AMQP connection for this instance. * * @return The AMQP connection. * * @throws AmqpException if the {@link Connection} was not transitioned to an active state within the given * {@link AmqpRetryOptions */ protected Mono<Connection> getReactorConnection() { return connectionMono; } /** * Creates a bidirectional link between the message broker and the client. * * @param sessionName Name of the session. * @param linkName Name of the link. * @param entityPath Address to the message broker. * * @return A new {@link RequestResponseChannel} to communicate with the message broker. */ protected AmqpChannelProcessor<RequestResponseChannel> createRequestResponseChannel(String sessionName, String linkName, String entityPath) { Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); final Flux<RequestResponseChannel> createChannel = createSession(sessionName) .cast(ReactorSession.class) .map(reactorSession -> new RequestResponseChannel(this, getId(), getFullyQualifiedNamespace(), linkName, entityPath, reactorSession.session(), connectionOptions.getRetry(), handlerProvider, reactorProvider, messageSerializer, senderSettleMode, receiverSettleMode)) .doOnNext(e -> { logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .log("Emitting new response channel."); }) .repeat(); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(ENTITY_PATH_KEY, entityPath); return createChannel .subscribeWith(new AmqpChannelProcessor<>(getFullyQualifiedNamespace(), channel -> channel.getEndpointStates(), retryPolicy, loggingContext)); } @Override public Mono<Void> closeAsync() { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already closed. Not disposing again."); return isClosedMono.asMono(); } return closeAsync(new AmqpShutdownSignal(false, true, "Disposed by client.")); } private synchronized void closeConnectionWork() { if (connection == null) { isClosedMono.emitEmpty((signalType, emitResult) -> { addSignalTypeAndResult(logger.atInfo(), signalType, emitResult) .log("Unable to complete closeMono."); return false; }); return; } connection.close(); handler.close(); final ArrayList<Mono<Void>> closingSessions = new ArrayList<>(); sessionMap.values().forEach(link -> closingSessions.add(link.isClosed())); final Mono<Void> closedExecutor = executor != null ? Mono.defer(() -> { synchronized (this) { logger.info("Closing executor."); return executor.closeAsync(); } }) : Mono.empty(); final Mono<Void> closeSessionAndExecutorMono = Mono.when(closingSessions) .timeout(operationTimeout) .onErrorResume(error -> { logger.info("Timed out waiting for all sessions to close."); return Mono.empty(); }) .then(closedExecutor) .then(Mono.fromRunnable(() -> { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result) .log("Unable to emit connection closed signal."); return false; }); subscriptions.dispose(); })); subscriptions.add(closeSessionAndExecutorMono.subscribe()); } private synchronized ClaimsBasedSecurityNode getOrCreateCBSNode() { if (cbsChannel == null) { logger.info("Setting CBS channel."); cbsChannelProcessor = createRequestResponseChannel(CBS_SESSION_NAME, CBS_LINK_NAME, CBS_ADDRESS); cbsChannel = new ClaimsBasedSecurityChannel( cbsChannelProcessor, connectionOptions.getTokenCredential(), connectionOptions.getAuthorizationType(), connectionOptions.getRetry()); } return cbsChannel; } private synchronized Connection getOrCreateConnection() throws IOException { if (connection == null) { logger.atInfo() .addKeyValue(HOSTNAME_KEY, handler.getHostname()) .addKeyValue("port", handler.getProtocolPort()) .log("Creating and starting connection."); final Reactor reactor = reactorProvider.createReactor(connectionId, handler.getMaxFrameSize()); connection = reactor.connectionToHost(handler.getHostname(), handler.getProtocolPort(), handler); final ReactorExceptionHandler reactorExceptionHandler = new ReactorExceptionHandler(); final Duration timeoutDivided = connectionOptions.getRetry().getTryTimeout().dividedBy(2); final Duration pendingTasksDuration = ClientConstants.SERVER_BUSY_WAIT_TIME.compareTo(timeoutDivided) < 0 ? ClientConstants.SERVER_BUSY_WAIT_TIME : timeoutDivided; final Scheduler scheduler = Schedulers.newSingle("reactor-executor"); executor = new ReactorExecutor(reactor, scheduler, connectionId, reactorExceptionHandler, pendingTasksDuration, connectionOptions.getFullyQualifiedNamespace()); final Mono<Void> executorCloseMono = Mono.defer(() -> { synchronized (this) { return executor.closeAsync(); } }); reactorProvider.getReactorDispatcher().getShutdownSignal() .flatMap(signal -> { reactorExceptionHandler.onConnectionShutdown(signal); return executorCloseMono; }) .onErrorResume(error -> { reactorExceptionHandler.onConnectionError(error); return executorCloseMono; }) .subscribe(); executor.start(); } return connection; } private final class ReactorExceptionHandler extends AmqpExceptionHandler { private ReactorExceptionHandler() { super(); } @Override public void onConnectionError(Throwable exception) { logger.atInfo() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionError, Starting new reactor", exception); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onReactorError: Disposing."); closeAsync(new AmqpShutdownSignal(false, false, "onReactorError: " + exception.toString())) .subscribe(); } } @Override void onConnectionShutdown(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown. Shutting down."); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown: disposing."); closeAsync(shutdownSignal).subscribe(); } } } private static final class SessionSubscription { private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AmqpSession session; private final Disposable subscription; private SessionSubscription(AmqpSession session, Disposable subscription) { this.session = session; this.subscription = subscription; } private AmqpSession getSession() { return session; } private void dispose() { if (isDisposed.getAndSet(true)) { return; } if (session instanceof ReactorSession) { ((ReactorSession) session).closeAsync("Closing session.", null, true) .subscribe(); } else { session.dispose(); } subscription.dispose(); } private Mono<Void> isClosed() { if (session instanceof ReactorSession) { return ((ReactorSession) session).isClosed(); } else { return Mono.empty(); } } } }
class ReactorConnection implements AmqpConnection { private static final String CBS_SESSION_NAME = "cbs-session"; private static final String CBS_ADDRESS = "$cbs"; private static final String CBS_LINK_NAME = "cbs"; private static final String MANAGEMENT_SESSION_NAME = "mgmt-session"; private static final String MANAGEMENT_ADDRESS = "$management"; private static final String MANAGEMENT_LINK_NAME = "mgmt"; private final ClientLogger logger; private final ConcurrentMap<String, SessionSubscription> sessionMap = new ConcurrentHashMap<>(); private final ConcurrentHashMap<String, AmqpManagementNode> managementNodes = new ConcurrentHashMap<>(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.One<AmqpShutdownSignal> shutdownSignalSink = Sinks.one(); private final Flux<AmqpEndpointState> endpointStates; private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final String connectionId; private final Mono<Connection> connectionMono; private final ConnectionHandler handler; private final ReactorHandlerProvider handlerProvider; private final TokenManagerProvider tokenManagerProvider; private final MessageSerializer messageSerializer; private final ConnectionOptions connectionOptions; private final ReactorProvider reactorProvider; private final AmqpRetryPolicy retryPolicy; private final SenderSettleMode senderSettleMode; private final ReceiverSettleMode receiverSettleMode; private final Duration operationTimeout; private final Composite subscriptions; private ReactorExecutor executor; private volatile ClaimsBasedSecurityChannel cbsChannel; private volatile AmqpChannelProcessor<RequestResponseChannel> cbsChannelProcessor; private volatile Connection connection; /** * Creates a new AMQP connection that uses proton-j. * * @param connectionId Identifier for the connection. * @param connectionOptions A set of options used to create the AMQP connection. * @param reactorProvider Provides proton-j Reactor instances. * @param handlerProvider Provides {@link BaseHandler} to listen to proton-j reactor events. * @param tokenManagerProvider Provides the appropriate token manager to authorize with CBS node. * @param messageSerializer Serializer to translate objects to and from proton-j {@link Message messages}. * @param senderSettleMode to set as {@link SenderSettleMode} on sender. * @param receiverSettleMode to set as {@link ReceiverSettleMode} on receiver. */ public ReactorConnection(String connectionId, ConnectionOptions connectionOptions, ReactorProvider reactorProvider, ReactorHandlerProvider handlerProvider, TokenManagerProvider tokenManagerProvider, MessageSerializer messageSerializer, SenderSettleMode senderSettleMode, ReceiverSettleMode receiverSettleMode) { this.connectionOptions = connectionOptions; this.reactorProvider = reactorProvider; this.connectionId = connectionId; this.logger = new ClientLogger(ReactorConnection.class, createContextWithConnectionId(connectionId)); this.handlerProvider = handlerProvider; this.tokenManagerProvider = Objects.requireNonNull(tokenManagerProvider, "'tokenManagerProvider' cannot be null."); this.messageSerializer = messageSerializer; this.handler = handlerProvider.createConnectionHandler(connectionId, connectionOptions); this.retryPolicy = RetryUtil.getRetryPolicy(connectionOptions.getRetry()); this.operationTimeout = connectionOptions.getRetry().getTryTimeout(); this.senderSettleMode = senderSettleMode; this.receiverSettleMode = receiverSettleMode; this.connectionMono = Mono.fromCallable(this::getOrCreateConnection) .flatMap(reactorConnection -> { final Mono<AmqpEndpointState> activeEndpoint = getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(operationTimeout, Mono.error(() -> new AmqpException(true, String.format( "Connection '%s' not opened within AmqpRetryOptions.tryTimeout(): %s", connectionId, operationTimeout), handler.getErrorContext()))); return activeEndpoint.thenReturn(reactorConnection); }) .doOnError(error -> { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already disposed: Error occurred while connection was starting.", error); } else { closeAsync(new AmqpShutdownSignal(false, false, "Error occurred while connection was starting. Error: " + error)).subscribe(); } }); this.endpointStates = this.handler.getEndpointStates() .takeUntilOther(shutdownSignalSink.asMono()) .map(state -> { logger.verbose("State {}", state); return AmqpEndpointStateUtil.getConnectionState(state); }) .onErrorResume(error -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to error."); return closeAsync(new AmqpShutdownSignal(false, false, error.getMessage())).then(Mono.error(error)); } else { return Mono.error(error); } }) .doOnComplete(() -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to connection close."); closeAsync(new AmqpShutdownSignal(false, false, "Connection handler closed.")).subscribe(); } }) .cache(1); this.subscriptions = Disposables.composite(this.endpointStates.subscribe()); } /** * {@inheritDoc} */ @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } /** * Gets the shutdown signal associated with this connection. When it emits, the underlying connection is closed. * * @return Shutdown signals associated with this connection. It emits a signal when the underlying connection is * closed. */ @Override public Flux<AmqpShutdownSignal> getShutdownSignals() { return shutdownSignalSink.asMono().cache().flux(); } @Override public Mono<AmqpManagementNode> getManagementNode(String entityPath) { return Mono.defer(() -> { if (isDisposed()) { return Mono.error(logger.atError() .addKeyValue(ENTITY_PATH_KEY, entityPath) .log(Exceptions.propagate(new IllegalStateException("Connection is disposed. Cannot get management instance.")))); } final AmqpManagementNode existing = managementNodes.get(entityPath); if (existing != null) { return Mono.just(existing); } final TokenManager tokenManager = new AzureTokenManagerProvider(connectionOptions.getAuthorizationType(), connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getAuthorizationScope()) .getTokenManager(getClaimsBasedSecurityNode(), entityPath); return tokenManager.authorize().thenReturn(managementNodes.compute(entityPath, (key, current) -> { if (current != null) { logger.info("A management node exists already, returning it."); tokenManager.close(); return current; } final String sessionName = entityPath + "-" + MANAGEMENT_SESSION_NAME; final String linkName = entityPath + "-" + MANAGEMENT_LINK_NAME; final String address = entityPath + "/" + MANAGEMENT_ADDRESS; logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .addKeyValue("address", address) .log("Creating management node."); final AmqpChannelProcessor<RequestResponseChannel> requestResponseChannel = createRequestResponseChannel(sessionName, linkName, address); return new ManagementChannel(requestResponseChannel, getFullyQualifiedNamespace(), entityPath, tokenManager); })); }); } /** * {@inheritDoc} */ @Override public Mono<ClaimsBasedSecurityNode> getClaimsBasedSecurityNode() { return connectionMono.then(Mono.fromCallable(() -> getOrCreateCBSNode())); } @Override public String getId() { return connectionId; } /** * {@inheritDoc} */ @Override public String getFullyQualifiedNamespace() { return handler.getHostname(); } /** * {@inheritDoc} */ @Override public int getMaxFrameSize() { return handler.getMaxFrameSize(); } /** * {@inheritDoc} */ @Override public Map<String, Object> getConnectionProperties() { return handler.getConnectionProperties(); } /** * {@inheritDoc} */ @Override public Mono<AmqpSession> createSession(String sessionName) { return connectionMono.map(connection -> { final SessionSubscription sessionSubscription = sessionMap.computeIfAbsent(sessionName, key -> { final SessionHandler sessionHandler = handlerProvider.createSessionHandler(connectionId, getFullyQualifiedNamespace(), key, connectionOptions.getRetry().getTryTimeout()); final Session session = connection.session(); BaseHandler.setHandler(session, sessionHandler); final AmqpSession amqpSession = createSession(key, session, sessionHandler); final Disposable subscription = amqpSession.getEndpointStates() .subscribe(state -> { }, error -> { if (isDisposed.get()) { return; } logger.atInfo() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Error occurred. Removing and disposing session", error); removeSession(key); }, () -> { if (isDisposed.get()) { return; } logger.atVerbose() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Complete. Removing and disposing session."); removeSession(key); }); return new SessionSubscription(amqpSession, subscription); }); return sessionSubscription; }).flatMap(sessionSubscription -> { final Mono<AmqpEndpointState> activeSession = sessionSubscription.getSession().getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(retryPolicy.getRetryOptions().getTryTimeout(), Mono.error(() -> new AmqpException(true, String.format("connectionId[%s] sessionName[%s] Timeout waiting for session to be active.", connectionId, sessionName), handler.getErrorContext()))); return activeSession.thenReturn(sessionSubscription.getSession()); }); } /** * Creates a new AMQP session with the given parameters. * * @param sessionName Name of the AMQP session. * @param session The reactor session associated with this session. * @param handler Session handler for the reactor session. * * @return A new instance of AMQP session. */ protected AmqpSession createSession(String sessionName, Session session, SessionHandler handler) { return new ReactorSession(this, session, handler, sessionName, reactorProvider, handlerProvider, getClaimsBasedSecurityNode(), tokenManagerProvider, messageSerializer, connectionOptions.getRetry()); } /** * {@inheritDoc} */ @Override public boolean removeSession(String sessionName) { if (sessionName == null) { return false; } final SessionSubscription removed = sessionMap.remove(sessionName); if (removed != null) { removed.dispose(); } return removed != null; } @Override public boolean isDisposed() { return isDisposed.get(); } /** * {@inheritDoc} */ @Override public void dispose() { final Duration timeout = operationTimeout.plus(operationTimeout); closeAsync().block(timeout); } /** * Gets the active AMQP connection for this instance. * * @return The AMQP connection. * * @throws AmqpException if the {@link Connection} was not transitioned to an active state within the given * {@link AmqpRetryOptions */ protected Mono<Connection> getReactorConnection() { return connectionMono; } /** * Creates a bidirectional link between the message broker and the client. * * @param sessionName Name of the session. * @param linkName Name of the link. * @param entityPath Address to the message broker. * * @return A new {@link RequestResponseChannel} to communicate with the message broker. */ protected AmqpChannelProcessor<RequestResponseChannel> createRequestResponseChannel(String sessionName, String linkName, String entityPath) { Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); final Flux<RequestResponseChannel> createChannel = createSession(sessionName) .cast(ReactorSession.class) .map(reactorSession -> new RequestResponseChannel(this, getId(), getFullyQualifiedNamespace(), linkName, entityPath, reactorSession.session(), connectionOptions.getRetry(), handlerProvider, reactorProvider, messageSerializer, senderSettleMode, receiverSettleMode)) .doOnNext(e -> { logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .log("Emitting new response channel."); }) .repeat(() -> !this.isDisposed()); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(ENTITY_PATH_KEY, entityPath); return createChannel .subscribeWith(new AmqpChannelProcessor<>(getFullyQualifiedNamespace(), channel -> channel.getEndpointStates(), retryPolicy, loggingContext)); } @Override public Mono<Void> closeAsync() { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already closed. Not disposing again."); return isClosedMono.asMono(); } return closeAsync(new AmqpShutdownSignal(false, true, "Disposed by client.")); } private synchronized void closeConnectionWork() { if (connection == null) { isClosedMono.emitEmpty((signalType, emitResult) -> { addSignalTypeAndResult(logger.atInfo(), signalType, emitResult) .log("Unable to complete closeMono."); return false; }); return; } connection.close(); handler.close(); final ArrayList<Mono<Void>> closingSessions = new ArrayList<>(); sessionMap.values().forEach(link -> closingSessions.add(link.isClosed())); final Mono<Void> closedExecutor = executor != null ? Mono.defer(() -> { synchronized (this) { logger.info("Closing executor."); return executor.closeAsync(); } }) : Mono.empty(); final Mono<Void> closeSessionAndExecutorMono = Mono.when(closingSessions) .timeout(operationTimeout) .onErrorResume(error -> { logger.info("Timed out waiting for all sessions to close."); return Mono.empty(); }) .then(closedExecutor) .then(Mono.fromRunnable(() -> { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result) .log("Unable to emit connection closed signal."); return false; }); subscriptions.dispose(); })); subscriptions.add(closeSessionAndExecutorMono.subscribe()); } private synchronized ClaimsBasedSecurityNode getOrCreateCBSNode() { if (cbsChannel == null) { logger.info("Setting CBS channel."); cbsChannelProcessor = createRequestResponseChannel(CBS_SESSION_NAME, CBS_LINK_NAME, CBS_ADDRESS); cbsChannel = new ClaimsBasedSecurityChannel( cbsChannelProcessor, connectionOptions.getTokenCredential(), connectionOptions.getAuthorizationType(), connectionOptions.getRetry()); } return cbsChannel; } private synchronized Connection getOrCreateConnection() throws IOException { if (connection == null) { logger.atInfo() .addKeyValue(HOSTNAME_KEY, handler.getHostname()) .addKeyValue("port", handler.getProtocolPort()) .log("Creating and starting connection."); final Reactor reactor = reactorProvider.createReactor(connectionId, handler.getMaxFrameSize()); connection = reactor.connectionToHost(handler.getHostname(), handler.getProtocolPort(), handler); final ReactorExceptionHandler reactorExceptionHandler = new ReactorExceptionHandler(); final Duration timeoutDivided = connectionOptions.getRetry().getTryTimeout().dividedBy(2); final Duration pendingTasksDuration = ClientConstants.SERVER_BUSY_WAIT_TIME.compareTo(timeoutDivided) < 0 ? ClientConstants.SERVER_BUSY_WAIT_TIME : timeoutDivided; final Scheduler scheduler = Schedulers.newSingle("reactor-executor"); executor = new ReactorExecutor(reactor, scheduler, connectionId, reactorExceptionHandler, pendingTasksDuration, connectionOptions.getFullyQualifiedNamespace()); final Mono<Void> executorCloseMono = Mono.defer(() -> { synchronized (this) { return executor.closeAsync(); } }); reactorProvider.getReactorDispatcher().getShutdownSignal() .flatMap(signal -> { reactorExceptionHandler.onConnectionShutdown(signal); return executorCloseMono; }) .onErrorResume(error -> { reactorExceptionHandler.onConnectionError(error); return executorCloseMono; }) .subscribe(); executor.start(); } return connection; } private final class ReactorExceptionHandler extends AmqpExceptionHandler { private ReactorExceptionHandler() { super(); } @Override public void onConnectionError(Throwable exception) { logger.atInfo() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionError, Starting new reactor", exception); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onReactorError: Disposing."); closeAsync(new AmqpShutdownSignal(false, false, "onReactorError: " + exception.toString())) .subscribe(); } } @Override void onConnectionShutdown(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown. Shutting down."); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown: disposing."); closeAsync(shutdownSignal).subscribe(); } } } private static final class SessionSubscription { private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AmqpSession session; private final Disposable subscription; private SessionSubscription(AmqpSession session, Disposable subscription) { this.session = session; this.subscription = subscription; } private AmqpSession getSession() { return session; } private void dispose() { if (isDisposed.getAndSet(true)) { return; } if (session instanceof ReactorSession) { ((ReactorSession) session).closeAsync("Closing session.", null, true) .subscribe(); } else { session.dispose(); } subscription.dispose(); } private Mono<Void> isClosed() { if (session instanceof ReactorSession) { return ((ReactorSession) session).isClosed(); } else { return Mono.empty(); } } } }
emitShutDownSignal will trigger close session and sender/receiver. Here it can make sure session is closing after CBS + mgmt node closed.
Mono<Void> closeAsync(AmqpShutdownSignal shutdownSignal) { final Mono<Void> emitShutDownSignalOperation = Mono.fromRunnable(() -> { addShutdownSignal(logger.atInfo(), shutdownSignal).log("Disposing of ReactorConnection."); final Sinks.EmitResult result = shutdownSignalSink.tryEmitValue(shutdownSignal); if (result.isFailure()) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(EMIT_RESULT_KEY, result) .log("Unable to emit shutdown signal."); } }); final Mono<Void> cbsCloseOperation; if (cbsChannelProcessor != null) { cbsCloseOperation = Mono.fromRunnable(() -> cbsChannelProcessor.dispose()); } else { cbsCloseOperation = Mono.empty(); } final Mono<Void> managementNodeCloseOperations = Mono.when( Flux.fromStream(managementNodes.values().stream()).flatMap(node -> node.closeAsync())); final Mono<Void> closeReactor = Mono.fromRunnable(() -> { logger.verbose("Scheduling closeConnection work."); final ReactorDispatcher dispatcher = reactorProvider.getReactorDispatcher(); if (dispatcher != null) { try { dispatcher.invoke(() -> closeConnectionWork()); } catch (IOException e) { logger.warning("IOException while scheduling closeConnection work. Manually disposing.", e); closeConnectionWork(); } catch (RejectedExecutionException e) { logger.info("Could not schedule closeConnection work. Manually disposing."); closeConnectionWork(); } } else { closeConnectionWork(); } }); return Mono.whenDelayError( cbsCloseOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed CBS node.")), managementNodeCloseOperations.doFinally(signalType -> logger.atVerbose() .log("Closed management nodes."))) .then(emitShutDownSignalOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Emitted connection shutdown signal. "))) .then(closeReactor.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed reactor dispatcher."))) .then(isClosedMono.asMono()); }
.then(emitShutDownSignalOperation.doFinally(signalType ->
Mono<Void> closeAsync(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal).log("Disposing of ReactorConnection."); final Sinks.EmitResult result = shutdownSignalSink.tryEmitValue(shutdownSignal); if (result.isFailure()) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(EMIT_RESULT_KEY, result) .log("Unable to emit shutdown signal."); } final Mono<Void> cbsCloseOperation; if (cbsChannelProcessor != null) { cbsCloseOperation = cbsChannelProcessor.flatMap(channel -> channel.closeAsync()); } else { cbsCloseOperation = Mono.empty(); } final Mono<Void> managementNodeCloseOperations = Mono.when( Flux.fromStream(managementNodes.values().stream()).flatMap(node -> node.closeAsync())); final Mono<Void> closeReactor = Mono.fromRunnable(() -> { logger.verbose("Scheduling closeConnection work."); final ReactorDispatcher dispatcher = reactorProvider.getReactorDispatcher(); if (dispatcher != null) { try { dispatcher.invoke(() -> closeConnectionWork()); } catch (IOException e) { logger.warning("IOException while scheduling closeConnection work. Manually disposing.", e); closeConnectionWork(); } catch (RejectedExecutionException e) { logger.info("Could not schedule closeConnection work. Manually disposing."); closeConnectionWork(); } } else { closeConnectionWork(); } }); return Mono.whenDelayError( cbsCloseOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed CBS node.")), managementNodeCloseOperations.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed management nodes."))) .then(closeReactor.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed reactor dispatcher."))) .then(isClosedMono.asMono()); }
class ReactorConnection implements AmqpConnection { private static final String CBS_SESSION_NAME = "cbs-session"; private static final String CBS_ADDRESS = "$cbs"; private static final String CBS_LINK_NAME = "cbs"; private static final String MANAGEMENT_SESSION_NAME = "mgmt-session"; private static final String MANAGEMENT_ADDRESS = "$management"; private static final String MANAGEMENT_LINK_NAME = "mgmt"; private final ClientLogger logger; private final ConcurrentMap<String, SessionSubscription> sessionMap = new ConcurrentHashMap<>(); private final ConcurrentHashMap<String, AmqpManagementNode> managementNodes = new ConcurrentHashMap<>(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.One<AmqpShutdownSignal> shutdownSignalSink = Sinks.one(); private final Flux<AmqpEndpointState> endpointStates; private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final String connectionId; private final Mono<Connection> connectionMono; private final ConnectionHandler handler; private final ReactorHandlerProvider handlerProvider; private final TokenManagerProvider tokenManagerProvider; private final MessageSerializer messageSerializer; private final ConnectionOptions connectionOptions; private final ReactorProvider reactorProvider; private final AmqpRetryPolicy retryPolicy; private final SenderSettleMode senderSettleMode; private final ReceiverSettleMode receiverSettleMode; private final Duration operationTimeout; private final Composite subscriptions; private ReactorExecutor executor; private volatile ClaimsBasedSecurityChannel cbsChannel; private volatile AmqpChannelProcessor<RequestResponseChannel> cbsChannelProcessor; private volatile Connection connection; /** * Creates a new AMQP connection that uses proton-j. * * @param connectionId Identifier for the connection. * @param connectionOptions A set of options used to create the AMQP connection. * @param reactorProvider Provides proton-j Reactor instances. * @param handlerProvider Provides {@link BaseHandler} to listen to proton-j reactor events. * @param tokenManagerProvider Provides the appropriate token manager to authorize with CBS node. * @param messageSerializer Serializer to translate objects to and from proton-j {@link Message messages}. * @param senderSettleMode to set as {@link SenderSettleMode} on sender. * @param receiverSettleMode to set as {@link ReceiverSettleMode} on receiver. */ public ReactorConnection(String connectionId, ConnectionOptions connectionOptions, ReactorProvider reactorProvider, ReactorHandlerProvider handlerProvider, TokenManagerProvider tokenManagerProvider, MessageSerializer messageSerializer, SenderSettleMode senderSettleMode, ReceiverSettleMode receiverSettleMode) { this.connectionOptions = connectionOptions; this.reactorProvider = reactorProvider; this.connectionId = connectionId; this.logger = new ClientLogger(ReactorConnection.class, createContextWithConnectionId(connectionId)); this.handlerProvider = handlerProvider; this.tokenManagerProvider = Objects.requireNonNull(tokenManagerProvider, "'tokenManagerProvider' cannot be null."); this.messageSerializer = messageSerializer; this.handler = handlerProvider.createConnectionHandler(connectionId, connectionOptions); this.retryPolicy = RetryUtil.getRetryPolicy(connectionOptions.getRetry()); this.operationTimeout = connectionOptions.getRetry().getTryTimeout(); this.senderSettleMode = senderSettleMode; this.receiverSettleMode = receiverSettleMode; this.connectionMono = Mono.fromCallable(this::getOrCreateConnection) .flatMap(reactorConnection -> { final Mono<AmqpEndpointState> activeEndpoint = getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(operationTimeout, Mono.error(new AmqpException(true, String.format( "Connection '%s' not opened within AmqpRetryOptions.tryTimeout(): %s", connectionId, operationTimeout), handler.getErrorContext()))); return activeEndpoint.thenReturn(reactorConnection); }) .doOnError(error -> { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already disposed: Error occurred while connection was starting.", error); } else { closeAsync(new AmqpShutdownSignal(false, false, "Error occurred while connection was starting. Error: " + error)).subscribe(); } }); this.endpointStates = this.handler.getEndpointStates() .takeUntilOther(shutdownSignalSink.asMono()) .map(state -> { logger.verbose("State {}", state); return AmqpEndpointStateUtil.getConnectionState(state); }) .onErrorResume(error -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to error."); return closeAsync(new AmqpShutdownSignal(false, false, error.getMessage())).then(Mono.error(error)); } else { return Mono.error(error); } }) .doOnComplete(() -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to connection close."); closeAsync(new AmqpShutdownSignal(false, false, "Connection handler closed.")).subscribe(); } }) .cache(1); this.subscriptions = Disposables.composite(this.endpointStates.subscribe()); } /** * {@inheritDoc} */ @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } /** * Gets the shutdown signal associated with this connection. When it emits, the underlying connection is closed. * * @return Shutdown signals associated with this connection. It emits a signal when the underlying connection is * closed. */ @Override public Flux<AmqpShutdownSignal> getShutdownSignals() { return shutdownSignalSink.asMono().cache().flux(); } @Override public Mono<AmqpManagementNode> getManagementNode(String entityPath) { return Mono.defer(() -> { if (isDisposed()) { return Mono.error(logger.atError() .addKeyValue(ENTITY_PATH_KEY, entityPath) .log(Exceptions.propagate(new IllegalStateException("Connection is disposed. Cannot get management instance.")))); } final AmqpManagementNode existing = managementNodes.get(entityPath); if (existing != null) { return Mono.just(existing); } final TokenManager tokenManager = new AzureTokenManagerProvider(connectionOptions.getAuthorizationType(), connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getAuthorizationScope()) .getTokenManager(getClaimsBasedSecurityNode(), entityPath); return tokenManager.authorize().thenReturn(managementNodes.compute(entityPath, (key, current) -> { if (current != null) { logger.info("A management node exists already, returning it."); tokenManager.close(); return current; } final String sessionName = entityPath + "-" + MANAGEMENT_SESSION_NAME; final String linkName = entityPath + "-" + MANAGEMENT_LINK_NAME; final String address = entityPath + "/" + MANAGEMENT_ADDRESS; logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .addKeyValue("address", address) .log("Creating management node."); final AmqpChannelProcessor<RequestResponseChannel> requestResponseChannel = createRequestResponseChannel(sessionName, linkName, address); return new ManagementChannel(requestResponseChannel, getFullyQualifiedNamespace(), entityPath, tokenManager); })); }); } /** * {@inheritDoc} */ @Override public Mono<ClaimsBasedSecurityNode> getClaimsBasedSecurityNode() { return connectionMono.then(Mono.fromCallable(() -> getOrCreateCBSNode())); } @Override public String getId() { return connectionId; } /** * {@inheritDoc} */ @Override public String getFullyQualifiedNamespace() { return handler.getHostname(); } /** * {@inheritDoc} */ @Override public int getMaxFrameSize() { return handler.getMaxFrameSize(); } /** * {@inheritDoc} */ @Override public Map<String, Object> getConnectionProperties() { return handler.getConnectionProperties(); } /** * {@inheritDoc} */ @Override public Mono<AmqpSession> createSession(String sessionName) { return connectionMono.map(connection -> { final SessionSubscription sessionSubscription = sessionMap.computeIfAbsent(sessionName, key -> { final SessionHandler sessionHandler = handlerProvider.createSessionHandler(connectionId, getFullyQualifiedNamespace(), key, connectionOptions.getRetry().getTryTimeout()); final Session session = connection.session(); BaseHandler.setHandler(session, sessionHandler); final AmqpSession amqpSession = createSession(key, session, sessionHandler); final Disposable subscription = amqpSession.getEndpointStates() .subscribe(state -> { }, error -> { if (isDisposed.get()) { return; } logger.atInfo() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Error occurred. Removing and disposing session", error); removeSession(key); }, () -> { if (isDisposed.get()) { return; } logger.atVerbose() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Complete. Removing and disposing session."); removeSession(key); }); return new SessionSubscription(amqpSession, subscription); }); return sessionSubscription; }).flatMap(sessionSubscription -> { final Mono<AmqpEndpointState> activeSession = sessionSubscription.getSession().getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(retryPolicy.getRetryOptions().getTryTimeout(), Mono.error(new AmqpException(true, String.format("connectionId[%s] sessionName[%s] Timeout waiting for session to be active.", connectionId, sessionName), handler.getErrorContext()))); return activeSession.thenReturn(sessionSubscription.getSession()); }); } /** * Creates a new AMQP session with the given parameters. * * @param sessionName Name of the AMQP session. * @param session The reactor session associated with this session. * @param handler Session handler for the reactor session. * * @return A new instance of AMQP session. */ protected AmqpSession createSession(String sessionName, Session session, SessionHandler handler) { return new ReactorSession(this, session, handler, sessionName, reactorProvider, handlerProvider, getClaimsBasedSecurityNode(), tokenManagerProvider, messageSerializer, connectionOptions.getRetry()); } /** * {@inheritDoc} */ @Override public boolean removeSession(String sessionName) { if (sessionName == null) { return false; } final SessionSubscription removed = sessionMap.remove(sessionName); if (removed != null) { removed.dispose(); } return removed != null; } @Override public boolean isDisposed() { return isDisposed.get(); } /** * {@inheritDoc} */ @Override public void dispose() { final Duration timeout = operationTimeout.plus(operationTimeout); closeAsync().block(timeout); } /** * Gets the active AMQP connection for this instance. * * @return The AMQP connection. * * @throws AmqpException if the {@link Connection} was not transitioned to an active state within the given * {@link AmqpRetryOptions */ protected Mono<Connection> getReactorConnection() { return connectionMono; } /** * Creates a bidirectional link between the message broker and the client. * * @param sessionName Name of the session. * @param linkName Name of the link. * @param entityPath Address to the message broker. * * @return A new {@link RequestResponseChannel} to communicate with the message broker. */ protected AmqpChannelProcessor<RequestResponseChannel> createRequestResponseChannel(String sessionName, String linkName, String entityPath) { Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); final Flux<RequestResponseChannel> createChannel = createSession(sessionName) .cast(ReactorSession.class) .map(reactorSession -> new RequestResponseChannel(this, getId(), getFullyQualifiedNamespace(), linkName, entityPath, reactorSession.session(), connectionOptions.getRetry(), handlerProvider, reactorProvider, messageSerializer, senderSettleMode, receiverSettleMode)) .doOnNext(e -> { logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .log("Emitting new response channel."); }) .repeat(); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(ENTITY_PATH_KEY, entityPath); return createChannel .subscribeWith(new AmqpChannelProcessor<>(getFullyQualifiedNamespace(), channel -> channel.getEndpointStates(), retryPolicy, loggingContext)); } @Override public Mono<Void> closeAsync() { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already closed. Not disposing again."); return isClosedMono.asMono(); } return closeAsync(new AmqpShutdownSignal(false, true, "Disposed by client.")); } private synchronized void closeConnectionWork() { if (connection == null) { isClosedMono.emitEmpty((signalType, emitResult) -> { addSignalTypeAndResult(logger.atInfo(), signalType, emitResult) .log("Unable to complete closeMono."); return false; }); return; } connection.close(); handler.close(); final ArrayList<Mono<Void>> closingSessions = new ArrayList<>(); sessionMap.values().forEach(link -> closingSessions.add(link.isClosed())); final Mono<Void> closedExecutor = executor != null ? Mono.defer(() -> { synchronized (this) { logger.info("Closing executor."); return executor.closeAsync(); } }) : Mono.empty(); final Mono<Void> closeSessionAndExecutorMono = Mono.when(closingSessions) .timeout(operationTimeout) .onErrorResume(error -> { logger.info("Timed out waiting for all sessions to close."); return Mono.empty(); }) .then(closedExecutor) .then(Mono.fromRunnable(() -> { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result) .log("Unable to emit connection closed signal."); return false; }); subscriptions.dispose(); })); subscriptions.add(closeSessionAndExecutorMono.subscribe()); } private synchronized ClaimsBasedSecurityNode getOrCreateCBSNode() { if (cbsChannel == null) { logger.info("Setting CBS channel."); cbsChannelProcessor = createRequestResponseChannel(CBS_SESSION_NAME, CBS_LINK_NAME, CBS_ADDRESS); cbsChannel = new ClaimsBasedSecurityChannel( cbsChannelProcessor, connectionOptions.getTokenCredential(), connectionOptions.getAuthorizationType(), connectionOptions.getRetry()); } return cbsChannel; } private synchronized Connection getOrCreateConnection() throws IOException { if (connection == null) { logger.atInfo() .addKeyValue(HOSTNAME_KEY, handler.getHostname()) .addKeyValue("port", handler.getProtocolPort()) .log("Creating and starting connection."); final Reactor reactor = reactorProvider.createReactor(connectionId, handler.getMaxFrameSize()); connection = reactor.connectionToHost(handler.getHostname(), handler.getProtocolPort(), handler); final ReactorExceptionHandler reactorExceptionHandler = new ReactorExceptionHandler(); final Duration timeoutDivided = connectionOptions.getRetry().getTryTimeout().dividedBy(2); final Duration pendingTasksDuration = ClientConstants.SERVER_BUSY_WAIT_TIME.compareTo(timeoutDivided) < 0 ? ClientConstants.SERVER_BUSY_WAIT_TIME : timeoutDivided; final Scheduler scheduler = Schedulers.newSingle("reactor-executor"); executor = new ReactorExecutor(reactor, scheduler, connectionId, reactorExceptionHandler, pendingTasksDuration, connectionOptions.getFullyQualifiedNamespace()); final Mono<Void> executorCloseMono = Mono.defer(() -> { synchronized (this) { return executor.closeAsync(); } }); reactorProvider.getReactorDispatcher().getShutdownSignal() .flatMap(signal -> { reactorExceptionHandler.onConnectionShutdown(signal); return executorCloseMono; }) .onErrorResume(error -> { reactorExceptionHandler.onConnectionError(error); return executorCloseMono; }) .subscribe(); executor.start(); } return connection; } private final class ReactorExceptionHandler extends AmqpExceptionHandler { private ReactorExceptionHandler() { super(); } @Override public void onConnectionError(Throwable exception) { logger.atInfo() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionError, Starting new reactor", exception); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onReactorError: Disposing."); closeAsync(new AmqpShutdownSignal(false, false, "onReactorError: " + exception.toString())) .subscribe(); } } @Override void onConnectionShutdown(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown. Shutting down."); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown: disposing."); closeAsync(shutdownSignal).subscribe(); } } } private static final class SessionSubscription { private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AmqpSession session; private final Disposable subscription; private SessionSubscription(AmqpSession session, Disposable subscription) { this.session = session; this.subscription = subscription; } private AmqpSession getSession() { return session; } private void dispose() { if (isDisposed.getAndSet(true)) { return; } if (session instanceof ReactorSession) { ((ReactorSession) session).closeAsync("Closing session.", null, true) .subscribe(); } else { session.dispose(); } subscription.dispose(); } private Mono<Void> isClosed() { if (session instanceof ReactorSession) { return ((ReactorSession) session).isClosed(); } else { return Mono.empty(); } } } }
class ReactorConnection implements AmqpConnection { private static final String CBS_SESSION_NAME = "cbs-session"; private static final String CBS_ADDRESS = "$cbs"; private static final String CBS_LINK_NAME = "cbs"; private static final String MANAGEMENT_SESSION_NAME = "mgmt-session"; private static final String MANAGEMENT_ADDRESS = "$management"; private static final String MANAGEMENT_LINK_NAME = "mgmt"; private final ClientLogger logger; private final ConcurrentMap<String, SessionSubscription> sessionMap = new ConcurrentHashMap<>(); private final ConcurrentHashMap<String, AmqpManagementNode> managementNodes = new ConcurrentHashMap<>(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.One<AmqpShutdownSignal> shutdownSignalSink = Sinks.one(); private final Flux<AmqpEndpointState> endpointStates; private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final String connectionId; private final Mono<Connection> connectionMono; private final ConnectionHandler handler; private final ReactorHandlerProvider handlerProvider; private final TokenManagerProvider tokenManagerProvider; private final MessageSerializer messageSerializer; private final ConnectionOptions connectionOptions; private final ReactorProvider reactorProvider; private final AmqpRetryPolicy retryPolicy; private final SenderSettleMode senderSettleMode; private final ReceiverSettleMode receiverSettleMode; private final Duration operationTimeout; private final Composite subscriptions; private ReactorExecutor executor; private volatile ClaimsBasedSecurityChannel cbsChannel; private volatile AmqpChannelProcessor<RequestResponseChannel> cbsChannelProcessor; private volatile Connection connection; /** * Creates a new AMQP connection that uses proton-j. * * @param connectionId Identifier for the connection. * @param connectionOptions A set of options used to create the AMQP connection. * @param reactorProvider Provides proton-j Reactor instances. * @param handlerProvider Provides {@link BaseHandler} to listen to proton-j reactor events. * @param tokenManagerProvider Provides the appropriate token manager to authorize with CBS node. * @param messageSerializer Serializer to translate objects to and from proton-j {@link Message messages}. * @param senderSettleMode to set as {@link SenderSettleMode} on sender. * @param receiverSettleMode to set as {@link ReceiverSettleMode} on receiver. */ public ReactorConnection(String connectionId, ConnectionOptions connectionOptions, ReactorProvider reactorProvider, ReactorHandlerProvider handlerProvider, TokenManagerProvider tokenManagerProvider, MessageSerializer messageSerializer, SenderSettleMode senderSettleMode, ReceiverSettleMode receiverSettleMode) { this.connectionOptions = connectionOptions; this.reactorProvider = reactorProvider; this.connectionId = connectionId; this.logger = new ClientLogger(ReactorConnection.class, createContextWithConnectionId(connectionId)); this.handlerProvider = handlerProvider; this.tokenManagerProvider = Objects.requireNonNull(tokenManagerProvider, "'tokenManagerProvider' cannot be null."); this.messageSerializer = messageSerializer; this.handler = handlerProvider.createConnectionHandler(connectionId, connectionOptions); this.retryPolicy = RetryUtil.getRetryPolicy(connectionOptions.getRetry()); this.operationTimeout = connectionOptions.getRetry().getTryTimeout(); this.senderSettleMode = senderSettleMode; this.receiverSettleMode = receiverSettleMode; this.connectionMono = Mono.fromCallable(this::getOrCreateConnection) .flatMap(reactorConnection -> { final Mono<AmqpEndpointState> activeEndpoint = getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(operationTimeout, Mono.error(() -> new AmqpException(true, String.format( "Connection '%s' not opened within AmqpRetryOptions.tryTimeout(): %s", connectionId, operationTimeout), handler.getErrorContext()))); return activeEndpoint.thenReturn(reactorConnection); }) .doOnError(error -> { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already disposed: Error occurred while connection was starting.", error); } else { closeAsync(new AmqpShutdownSignal(false, false, "Error occurred while connection was starting. Error: " + error)).subscribe(); } }); this.endpointStates = this.handler.getEndpointStates() .takeUntilOther(shutdownSignalSink.asMono()) .map(state -> { logger.verbose("State {}", state); return AmqpEndpointStateUtil.getConnectionState(state); }) .onErrorResume(error -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to error."); return closeAsync(new AmqpShutdownSignal(false, false, error.getMessage())).then(Mono.error(error)); } else { return Mono.error(error); } }) .doOnComplete(() -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to connection close."); closeAsync(new AmqpShutdownSignal(false, false, "Connection handler closed.")).subscribe(); } }) .cache(1); this.subscriptions = Disposables.composite(this.endpointStates.subscribe()); } /** * {@inheritDoc} */ @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } /** * Gets the shutdown signal associated with this connection. When it emits, the underlying connection is closed. * * @return Shutdown signals associated with this connection. It emits a signal when the underlying connection is * closed. */ @Override public Flux<AmqpShutdownSignal> getShutdownSignals() { return shutdownSignalSink.asMono().cache().flux(); } @Override public Mono<AmqpManagementNode> getManagementNode(String entityPath) { return Mono.defer(() -> { if (isDisposed()) { return Mono.error(logger.atError() .addKeyValue(ENTITY_PATH_KEY, entityPath) .log(Exceptions.propagate(new IllegalStateException("Connection is disposed. Cannot get management instance.")))); } final AmqpManagementNode existing = managementNodes.get(entityPath); if (existing != null) { return Mono.just(existing); } final TokenManager tokenManager = new AzureTokenManagerProvider(connectionOptions.getAuthorizationType(), connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getAuthorizationScope()) .getTokenManager(getClaimsBasedSecurityNode(), entityPath); return tokenManager.authorize().thenReturn(managementNodes.compute(entityPath, (key, current) -> { if (current != null) { logger.info("A management node exists already, returning it."); tokenManager.close(); return current; } final String sessionName = entityPath + "-" + MANAGEMENT_SESSION_NAME; final String linkName = entityPath + "-" + MANAGEMENT_LINK_NAME; final String address = entityPath + "/" + MANAGEMENT_ADDRESS; logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .addKeyValue("address", address) .log("Creating management node."); final AmqpChannelProcessor<RequestResponseChannel> requestResponseChannel = createRequestResponseChannel(sessionName, linkName, address); return new ManagementChannel(requestResponseChannel, getFullyQualifiedNamespace(), entityPath, tokenManager); })); }); } /** * {@inheritDoc} */ @Override public Mono<ClaimsBasedSecurityNode> getClaimsBasedSecurityNode() { return connectionMono.then(Mono.fromCallable(() -> getOrCreateCBSNode())); } @Override public String getId() { return connectionId; } /** * {@inheritDoc} */ @Override public String getFullyQualifiedNamespace() { return handler.getHostname(); } /** * {@inheritDoc} */ @Override public int getMaxFrameSize() { return handler.getMaxFrameSize(); } /** * {@inheritDoc} */ @Override public Map<String, Object> getConnectionProperties() { return handler.getConnectionProperties(); } /** * {@inheritDoc} */ @Override public Mono<AmqpSession> createSession(String sessionName) { return connectionMono.map(connection -> { final SessionSubscription sessionSubscription = sessionMap.computeIfAbsent(sessionName, key -> { final SessionHandler sessionHandler = handlerProvider.createSessionHandler(connectionId, getFullyQualifiedNamespace(), key, connectionOptions.getRetry().getTryTimeout()); final Session session = connection.session(); BaseHandler.setHandler(session, sessionHandler); final AmqpSession amqpSession = createSession(key, session, sessionHandler); final Disposable subscription = amqpSession.getEndpointStates() .subscribe(state -> { }, error -> { if (isDisposed.get()) { return; } logger.atInfo() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Error occurred. Removing and disposing session", error); removeSession(key); }, () -> { if (isDisposed.get()) { return; } logger.atVerbose() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Complete. Removing and disposing session."); removeSession(key); }); return new SessionSubscription(amqpSession, subscription); }); return sessionSubscription; }).flatMap(sessionSubscription -> { final Mono<AmqpEndpointState> activeSession = sessionSubscription.getSession().getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(retryPolicy.getRetryOptions().getTryTimeout(), Mono.error(() -> new AmqpException(true, String.format("connectionId[%s] sessionName[%s] Timeout waiting for session to be active.", connectionId, sessionName), handler.getErrorContext()))); return activeSession.thenReturn(sessionSubscription.getSession()); }); } /** * Creates a new AMQP session with the given parameters. * * @param sessionName Name of the AMQP session. * @param session The reactor session associated with this session. * @param handler Session handler for the reactor session. * * @return A new instance of AMQP session. */ protected AmqpSession createSession(String sessionName, Session session, SessionHandler handler) { return new ReactorSession(this, session, handler, sessionName, reactorProvider, handlerProvider, getClaimsBasedSecurityNode(), tokenManagerProvider, messageSerializer, connectionOptions.getRetry()); } /** * {@inheritDoc} */ @Override public boolean removeSession(String sessionName) { if (sessionName == null) { return false; } final SessionSubscription removed = sessionMap.remove(sessionName); if (removed != null) { removed.dispose(); } return removed != null; } @Override public boolean isDisposed() { return isDisposed.get(); } /** * {@inheritDoc} */ @Override public void dispose() { final Duration timeout = operationTimeout.plus(operationTimeout); closeAsync().block(timeout); } /** * Gets the active AMQP connection for this instance. * * @return The AMQP connection. * * @throws AmqpException if the {@link Connection} was not transitioned to an active state within the given * {@link AmqpRetryOptions */ protected Mono<Connection> getReactorConnection() { return connectionMono; } /** * Creates a bidirectional link between the message broker and the client. * * @param sessionName Name of the session. * @param linkName Name of the link. * @param entityPath Address to the message broker. * * @return A new {@link RequestResponseChannel} to communicate with the message broker. */ protected AmqpChannelProcessor<RequestResponseChannel> createRequestResponseChannel(String sessionName, String linkName, String entityPath) { Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); final Flux<RequestResponseChannel> createChannel = createSession(sessionName) .cast(ReactorSession.class) .map(reactorSession -> new RequestResponseChannel(this, getId(), getFullyQualifiedNamespace(), linkName, entityPath, reactorSession.session(), connectionOptions.getRetry(), handlerProvider, reactorProvider, messageSerializer, senderSettleMode, receiverSettleMode)) .doOnNext(e -> { logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .log("Emitting new response channel."); }) .repeat(() -> !this.isDisposed()); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(ENTITY_PATH_KEY, entityPath); return createChannel .subscribeWith(new AmqpChannelProcessor<>(getFullyQualifiedNamespace(), channel -> channel.getEndpointStates(), retryPolicy, loggingContext)); } @Override public Mono<Void> closeAsync() { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already closed. Not disposing again."); return isClosedMono.asMono(); } return closeAsync(new AmqpShutdownSignal(false, true, "Disposed by client.")); } private synchronized void closeConnectionWork() { if (connection == null) { isClosedMono.emitEmpty((signalType, emitResult) -> { addSignalTypeAndResult(logger.atInfo(), signalType, emitResult) .log("Unable to complete closeMono."); return false; }); return; } connection.close(); handler.close(); final ArrayList<Mono<Void>> closingSessions = new ArrayList<>(); sessionMap.values().forEach(link -> closingSessions.add(link.isClosed())); final Mono<Void> closedExecutor = executor != null ? Mono.defer(() -> { synchronized (this) { logger.info("Closing executor."); return executor.closeAsync(); } }) : Mono.empty(); final Mono<Void> closeSessionAndExecutorMono = Mono.when(closingSessions) .timeout(operationTimeout) .onErrorResume(error -> { logger.info("Timed out waiting for all sessions to close."); return Mono.empty(); }) .then(closedExecutor) .then(Mono.fromRunnable(() -> { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result) .log("Unable to emit connection closed signal."); return false; }); subscriptions.dispose(); })); subscriptions.add(closeSessionAndExecutorMono.subscribe()); } private synchronized ClaimsBasedSecurityNode getOrCreateCBSNode() { if (cbsChannel == null) { logger.info("Setting CBS channel."); cbsChannelProcessor = createRequestResponseChannel(CBS_SESSION_NAME, CBS_LINK_NAME, CBS_ADDRESS); cbsChannel = new ClaimsBasedSecurityChannel( cbsChannelProcessor, connectionOptions.getTokenCredential(), connectionOptions.getAuthorizationType(), connectionOptions.getRetry()); } return cbsChannel; } private synchronized Connection getOrCreateConnection() throws IOException { if (connection == null) { logger.atInfo() .addKeyValue(HOSTNAME_KEY, handler.getHostname()) .addKeyValue("port", handler.getProtocolPort()) .log("Creating and starting connection."); final Reactor reactor = reactorProvider.createReactor(connectionId, handler.getMaxFrameSize()); connection = reactor.connectionToHost(handler.getHostname(), handler.getProtocolPort(), handler); final ReactorExceptionHandler reactorExceptionHandler = new ReactorExceptionHandler(); final Duration timeoutDivided = connectionOptions.getRetry().getTryTimeout().dividedBy(2); final Duration pendingTasksDuration = ClientConstants.SERVER_BUSY_WAIT_TIME.compareTo(timeoutDivided) < 0 ? ClientConstants.SERVER_BUSY_WAIT_TIME : timeoutDivided; final Scheduler scheduler = Schedulers.newSingle("reactor-executor"); executor = new ReactorExecutor(reactor, scheduler, connectionId, reactorExceptionHandler, pendingTasksDuration, connectionOptions.getFullyQualifiedNamespace()); final Mono<Void> executorCloseMono = Mono.defer(() -> { synchronized (this) { return executor.closeAsync(); } }); reactorProvider.getReactorDispatcher().getShutdownSignal() .flatMap(signal -> { reactorExceptionHandler.onConnectionShutdown(signal); return executorCloseMono; }) .onErrorResume(error -> { reactorExceptionHandler.onConnectionError(error); return executorCloseMono; }) .subscribe(); executor.start(); } return connection; } private final class ReactorExceptionHandler extends AmqpExceptionHandler { private ReactorExceptionHandler() { super(); } @Override public void onConnectionError(Throwable exception) { logger.atInfo() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionError, Starting new reactor", exception); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onReactorError: Disposing."); closeAsync(new AmqpShutdownSignal(false, false, "onReactorError: " + exception.toString())) .subscribe(); } } @Override void onConnectionShutdown(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown. Shutting down."); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown: disposing."); closeAsync(shutdownSignal).subscribe(); } } } private static final class SessionSubscription { private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AmqpSession session; private final Disposable subscription; private SessionSubscription(AmqpSession session, Disposable subscription) { this.session = session; this.subscription = subscription; } private AmqpSession getSession() { return session; } private void dispose() { if (isDisposed.getAndSet(true)) { return; } if (session instanceof ReactorSession) { ((ReactorSession) session).closeAsync("Closing session.", null, true) .subscribe(); } else { session.dispose(); } subscription.dispose(); } private Mono<Void> isClosed() { if (session instanceof ReactorSession) { return ((ReactorSession) session).isClosed(); } else { return Mono.empty(); } } } }
One case I could think of is - With the connection waiting for closure of RRChannel before notifying all the ReactorReceiver(s), a potential delay (default timeout: 60sec) in RRChannel's close (e.g., due to remote-close didn't complete) further delays recovery of ReactorReceiver(s), which means downstream wait for more time while recovery is happening. Today, the new connection gets created immediately, serving downstream, while the old connection closure happens in parallel.
Mono<Void> closeAsync(AmqpShutdownSignal shutdownSignal) { final Mono<Void> emitShutDownSignalOperation = Mono.fromRunnable(() -> { addShutdownSignal(logger.atInfo(), shutdownSignal).log("Disposing of ReactorConnection."); final Sinks.EmitResult result = shutdownSignalSink.tryEmitValue(shutdownSignal); if (result.isFailure()) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(EMIT_RESULT_KEY, result) .log("Unable to emit shutdown signal."); } }); final Mono<Void> cbsCloseOperation; if (cbsChannelProcessor != null) { cbsCloseOperation = cbsChannelProcessor.flatMap(channel -> channel.closeAsync()); } else { cbsCloseOperation = Mono.empty(); } final Mono<Void> managementNodeCloseOperations = Mono.when( Flux.fromStream(managementNodes.values().stream()).flatMap(node -> node.closeAsync())); final Mono<Void> closeReactor = Mono.fromRunnable(() -> { logger.verbose("Scheduling closeConnection work."); final ReactorDispatcher dispatcher = reactorProvider.getReactorDispatcher(); if (dispatcher != null) { try { dispatcher.invoke(() -> closeConnectionWork()); } catch (IOException e) { logger.warning("IOException while scheduling closeConnection work. Manually disposing.", e); closeConnectionWork(); } catch (RejectedExecutionException e) { logger.info("Could not schedule closeConnection work. Manually disposing."); closeConnectionWork(); } } else { closeConnectionWork(); } }); return Mono.whenDelayError( cbsCloseOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed CBS node.")), managementNodeCloseOperations.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed management nodes."))) .then(emitShutDownSignalOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Emitted connection shutdown signal. "))) .then(closeReactor.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed reactor dispatcher."))) .then(isClosedMono.asMono()); }
logger.atVerbose()
Mono<Void> closeAsync(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal).log("Disposing of ReactorConnection."); final Sinks.EmitResult result = shutdownSignalSink.tryEmitValue(shutdownSignal); if (result.isFailure()) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(EMIT_RESULT_KEY, result) .log("Unable to emit shutdown signal."); } final Mono<Void> cbsCloseOperation; if (cbsChannelProcessor != null) { cbsCloseOperation = cbsChannelProcessor.flatMap(channel -> channel.closeAsync()); } else { cbsCloseOperation = Mono.empty(); } final Mono<Void> managementNodeCloseOperations = Mono.when( Flux.fromStream(managementNodes.values().stream()).flatMap(node -> node.closeAsync())); final Mono<Void> closeReactor = Mono.fromRunnable(() -> { logger.verbose("Scheduling closeConnection work."); final ReactorDispatcher dispatcher = reactorProvider.getReactorDispatcher(); if (dispatcher != null) { try { dispatcher.invoke(() -> closeConnectionWork()); } catch (IOException e) { logger.warning("IOException while scheduling closeConnection work. Manually disposing.", e); closeConnectionWork(); } catch (RejectedExecutionException e) { logger.info("Could not schedule closeConnection work. Manually disposing."); closeConnectionWork(); } } else { closeConnectionWork(); } }); return Mono.whenDelayError( cbsCloseOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed CBS node.")), managementNodeCloseOperations.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed management nodes."))) .then(closeReactor.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed reactor dispatcher."))) .then(isClosedMono.asMono()); }
class ReactorConnection implements AmqpConnection { private static final String CBS_SESSION_NAME = "cbs-session"; private static final String CBS_ADDRESS = "$cbs"; private static final String CBS_LINK_NAME = "cbs"; private static final String MANAGEMENT_SESSION_NAME = "mgmt-session"; private static final String MANAGEMENT_ADDRESS = "$management"; private static final String MANAGEMENT_LINK_NAME = "mgmt"; private final ClientLogger logger; private final ConcurrentMap<String, SessionSubscription> sessionMap = new ConcurrentHashMap<>(); private final ConcurrentHashMap<String, AmqpManagementNode> managementNodes = new ConcurrentHashMap<>(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.One<AmqpShutdownSignal> shutdownSignalSink = Sinks.one(); private final Flux<AmqpEndpointState> endpointStates; private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final String connectionId; private final Mono<Connection> connectionMono; private final ConnectionHandler handler; private final ReactorHandlerProvider handlerProvider; private final TokenManagerProvider tokenManagerProvider; private final MessageSerializer messageSerializer; private final ConnectionOptions connectionOptions; private final ReactorProvider reactorProvider; private final AmqpRetryPolicy retryPolicy; private final SenderSettleMode senderSettleMode; private final ReceiverSettleMode receiverSettleMode; private final Duration operationTimeout; private final Composite subscriptions; private ReactorExecutor executor; private volatile ClaimsBasedSecurityChannel cbsChannel; private volatile AmqpChannelProcessor<RequestResponseChannel> cbsChannelProcessor; private volatile Connection connection; /** * Creates a new AMQP connection that uses proton-j. * * @param connectionId Identifier for the connection. * @param connectionOptions A set of options used to create the AMQP connection. * @param reactorProvider Provides proton-j Reactor instances. * @param handlerProvider Provides {@link BaseHandler} to listen to proton-j reactor events. * @param tokenManagerProvider Provides the appropriate token manager to authorize with CBS node. * @param messageSerializer Serializer to translate objects to and from proton-j {@link Message messages}. * @param senderSettleMode to set as {@link SenderSettleMode} on sender. * @param receiverSettleMode to set as {@link ReceiverSettleMode} on receiver. */ public ReactorConnection(String connectionId, ConnectionOptions connectionOptions, ReactorProvider reactorProvider, ReactorHandlerProvider handlerProvider, TokenManagerProvider tokenManagerProvider, MessageSerializer messageSerializer, SenderSettleMode senderSettleMode, ReceiverSettleMode receiverSettleMode) { this.connectionOptions = connectionOptions; this.reactorProvider = reactorProvider; this.connectionId = connectionId; this.logger = new ClientLogger(ReactorConnection.class, createContextWithConnectionId(connectionId)); this.handlerProvider = handlerProvider; this.tokenManagerProvider = Objects.requireNonNull(tokenManagerProvider, "'tokenManagerProvider' cannot be null."); this.messageSerializer = messageSerializer; this.handler = handlerProvider.createConnectionHandler(connectionId, connectionOptions); this.retryPolicy = RetryUtil.getRetryPolicy(connectionOptions.getRetry()); this.operationTimeout = connectionOptions.getRetry().getTryTimeout(); this.senderSettleMode = senderSettleMode; this.receiverSettleMode = receiverSettleMode; this.connectionMono = Mono.fromCallable(this::getOrCreateConnection) .flatMap(reactorConnection -> { final Mono<AmqpEndpointState> activeEndpoint = getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(operationTimeout, Mono.error(() -> new AmqpException(true, String.format( "Connection '%s' not opened within AmqpRetryOptions.tryTimeout(): %s", connectionId, operationTimeout), handler.getErrorContext()))); return activeEndpoint.thenReturn(reactorConnection); }) .doOnError(error -> { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already disposed: Error occurred while connection was starting.", error); } else { closeAsync(new AmqpShutdownSignal(false, false, "Error occurred while connection was starting. Error: " + error)).subscribe(); } }); this.endpointStates = this.handler.getEndpointStates() .takeUntilOther(shutdownSignalSink.asMono()) .map(state -> { logger.verbose("State {}", state); return AmqpEndpointStateUtil.getConnectionState(state); }) .onErrorResume(error -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to error."); return closeAsync(new AmqpShutdownSignal(false, false, error.getMessage())).then(Mono.error(error)); } else { return Mono.error(error); } }) .doOnComplete(() -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to connection close."); closeAsync(new AmqpShutdownSignal(false, false, "Connection handler closed.")).subscribe(); } }) .cache(1); this.subscriptions = Disposables.composite(this.endpointStates.subscribe()); } /** * {@inheritDoc} */ @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } /** * Gets the shutdown signal associated with this connection. When it emits, the underlying connection is closed. * * @return Shutdown signals associated with this connection. It emits a signal when the underlying connection is * closed. */ @Override public Flux<AmqpShutdownSignal> getShutdownSignals() { return shutdownSignalSink.asMono().cache().flux(); } @Override public Mono<AmqpManagementNode> getManagementNode(String entityPath) { return Mono.defer(() -> { if (isDisposed()) { return Mono.error(logger.atError() .addKeyValue(ENTITY_PATH_KEY, entityPath) .log(Exceptions.propagate(new IllegalStateException("Connection is disposed. Cannot get management instance.")))); } final AmqpManagementNode existing = managementNodes.get(entityPath); if (existing != null) { return Mono.just(existing); } final TokenManager tokenManager = new AzureTokenManagerProvider(connectionOptions.getAuthorizationType(), connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getAuthorizationScope()) .getTokenManager(getClaimsBasedSecurityNode(), entityPath); return tokenManager.authorize().thenReturn(managementNodes.compute(entityPath, (key, current) -> { if (current != null) { logger.info("A management node exists already, returning it."); tokenManager.close(); return current; } final String sessionName = entityPath + "-" + MANAGEMENT_SESSION_NAME; final String linkName = entityPath + "-" + MANAGEMENT_LINK_NAME; final String address = entityPath + "/" + MANAGEMENT_ADDRESS; logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .addKeyValue("address", address) .log("Creating management node."); final AmqpChannelProcessor<RequestResponseChannel> requestResponseChannel = createRequestResponseChannel(sessionName, linkName, address); return new ManagementChannel(requestResponseChannel, getFullyQualifiedNamespace(), entityPath, tokenManager); })); }); } /** * {@inheritDoc} */ @Override public Mono<ClaimsBasedSecurityNode> getClaimsBasedSecurityNode() { return connectionMono.then(Mono.fromCallable(() -> getOrCreateCBSNode())); } @Override public String getId() { return connectionId; } /** * {@inheritDoc} */ @Override public String getFullyQualifiedNamespace() { return handler.getHostname(); } /** * {@inheritDoc} */ @Override public int getMaxFrameSize() { return handler.getMaxFrameSize(); } /** * {@inheritDoc} */ @Override public Map<String, Object> getConnectionProperties() { return handler.getConnectionProperties(); } /** * {@inheritDoc} */ @Override public Mono<AmqpSession> createSession(String sessionName) { return connectionMono.map(connection -> { final SessionSubscription sessionSubscription = sessionMap.computeIfAbsent(sessionName, key -> { final SessionHandler sessionHandler = handlerProvider.createSessionHandler(connectionId, getFullyQualifiedNamespace(), key, connectionOptions.getRetry().getTryTimeout()); final Session session = connection.session(); BaseHandler.setHandler(session, sessionHandler); final AmqpSession amqpSession = createSession(key, session, sessionHandler); final Disposable subscription = amqpSession.getEndpointStates() .subscribe(state -> { }, error -> { if (isDisposed.get()) { return; } logger.atInfo() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Error occurred. Removing and disposing session", error); removeSession(key); }, () -> { if (isDisposed.get()) { return; } logger.atVerbose() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Complete. Removing and disposing session."); removeSession(key); }); return new SessionSubscription(amqpSession, subscription); }); return sessionSubscription; }).flatMap(sessionSubscription -> { final Mono<AmqpEndpointState> activeSession = sessionSubscription.getSession().getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(retryPolicy.getRetryOptions().getTryTimeout(), Mono.error(() -> new AmqpException(true, String.format("connectionId[%s] sessionName[%s] Timeout waiting for session to be active.", connectionId, sessionName), handler.getErrorContext()))); return activeSession.thenReturn(sessionSubscription.getSession()); }); } /** * Creates a new AMQP session with the given parameters. * * @param sessionName Name of the AMQP session. * @param session The reactor session associated with this session. * @param handler Session handler for the reactor session. * * @return A new instance of AMQP session. */ protected AmqpSession createSession(String sessionName, Session session, SessionHandler handler) { return new ReactorSession(this, session, handler, sessionName, reactorProvider, handlerProvider, getClaimsBasedSecurityNode(), tokenManagerProvider, messageSerializer, connectionOptions.getRetry()); } /** * {@inheritDoc} */ @Override public boolean removeSession(String sessionName) { if (sessionName == null) { return false; } final SessionSubscription removed = sessionMap.remove(sessionName); if (removed != null) { removed.dispose(); } return removed != null; } @Override public boolean isDisposed() { return isDisposed.get(); } /** * {@inheritDoc} */ @Override public void dispose() { final Duration timeout = operationTimeout.plus(operationTimeout); closeAsync().block(timeout); } /** * Gets the active AMQP connection for this instance. * * @return The AMQP connection. * * @throws AmqpException if the {@link Connection} was not transitioned to an active state within the given * {@link AmqpRetryOptions */ protected Mono<Connection> getReactorConnection() { return connectionMono; } /** * Creates a bidirectional link between the message broker and the client. * * @param sessionName Name of the session. * @param linkName Name of the link. * @param entityPath Address to the message broker. * * @return A new {@link RequestResponseChannel} to communicate with the message broker. */ protected AmqpChannelProcessor<RequestResponseChannel> createRequestResponseChannel(String sessionName, String linkName, String entityPath) { Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); final Flux<RequestResponseChannel> createChannel = createSession(sessionName) .cast(ReactorSession.class) .map(reactorSession -> new RequestResponseChannel(this, getId(), getFullyQualifiedNamespace(), linkName, entityPath, reactorSession.session(), connectionOptions.getRetry(), handlerProvider, reactorProvider, messageSerializer, senderSettleMode, receiverSettleMode)) .doOnNext(e -> { logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .log("Emitting new response channel."); }) .repeat() .takeUntilOther(shutdownSignalSink.asMono()); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(ENTITY_PATH_KEY, entityPath); return createChannel .subscribeWith(new AmqpChannelProcessor<>(getFullyQualifiedNamespace(), channel -> channel.getEndpointStates(), retryPolicy, loggingContext)); } @Override public Mono<Void> closeAsync() { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already closed. Not disposing again."); return isClosedMono.asMono(); } return closeAsync(new AmqpShutdownSignal(false, true, "Disposed by client.")); } private synchronized void closeConnectionWork() { if (connection == null) { isClosedMono.emitEmpty((signalType, emitResult) -> { addSignalTypeAndResult(logger.atInfo(), signalType, emitResult) .log("Unable to complete closeMono."); return false; }); return; } connection.close(); handler.close(); final ArrayList<Mono<Void>> closingSessions = new ArrayList<>(); sessionMap.values().forEach(link -> closingSessions.add(link.isClosed())); final Mono<Void> closedExecutor = executor != null ? Mono.defer(() -> { synchronized (this) { logger.info("Closing executor."); return executor.closeAsync(); } }) : Mono.empty(); final Mono<Void> closeSessionAndExecutorMono = Mono.when(closingSessions) .timeout(operationTimeout) .onErrorResume(error -> { logger.info("Timed out waiting for all sessions to close."); return Mono.empty(); }) .then(closedExecutor) .then(Mono.fromRunnable(() -> { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result) .log("Unable to emit connection closed signal."); return false; }); subscriptions.dispose(); })); subscriptions.add(closeSessionAndExecutorMono.subscribe()); } private synchronized ClaimsBasedSecurityNode getOrCreateCBSNode() { if (cbsChannel == null) { logger.info("Setting CBS channel."); cbsChannelProcessor = createRequestResponseChannel(CBS_SESSION_NAME, CBS_LINK_NAME, CBS_ADDRESS); cbsChannel = new ClaimsBasedSecurityChannel( cbsChannelProcessor, connectionOptions.getTokenCredential(), connectionOptions.getAuthorizationType(), connectionOptions.getRetry()); } return cbsChannel; } private synchronized Connection getOrCreateConnection() throws IOException { if (connection == null) { logger.atInfo() .addKeyValue(HOSTNAME_KEY, handler.getHostname()) .addKeyValue("port", handler.getProtocolPort()) .log("Creating and starting connection."); final Reactor reactor = reactorProvider.createReactor(connectionId, handler.getMaxFrameSize()); connection = reactor.connectionToHost(handler.getHostname(), handler.getProtocolPort(), handler); final ReactorExceptionHandler reactorExceptionHandler = new ReactorExceptionHandler(); final Duration timeoutDivided = connectionOptions.getRetry().getTryTimeout().dividedBy(2); final Duration pendingTasksDuration = ClientConstants.SERVER_BUSY_WAIT_TIME.compareTo(timeoutDivided) < 0 ? ClientConstants.SERVER_BUSY_WAIT_TIME : timeoutDivided; final Scheduler scheduler = Schedulers.newSingle("reactor-executor"); executor = new ReactorExecutor(reactor, scheduler, connectionId, reactorExceptionHandler, pendingTasksDuration, connectionOptions.getFullyQualifiedNamespace()); final Mono<Void> executorCloseMono = Mono.defer(() -> { synchronized (this) { return executor.closeAsync(); } }); reactorProvider.getReactorDispatcher().getShutdownSignal() .flatMap(signal -> { reactorExceptionHandler.onConnectionShutdown(signal); return executorCloseMono; }) .onErrorResume(error -> { reactorExceptionHandler.onConnectionError(error); return executorCloseMono; }) .subscribe(); executor.start(); } return connection; } private final class ReactorExceptionHandler extends AmqpExceptionHandler { private ReactorExceptionHandler() { super(); } @Override public void onConnectionError(Throwable exception) { logger.atInfo() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionError, Starting new reactor", exception); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onReactorError: Disposing."); closeAsync(new AmqpShutdownSignal(false, false, "onReactorError: " + exception.toString())) .subscribe(); } } @Override void onConnectionShutdown(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown. Shutting down."); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown: disposing."); closeAsync(shutdownSignal).subscribe(); } } } private static final class SessionSubscription { private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AmqpSession session; private final Disposable subscription; private SessionSubscription(AmqpSession session, Disposable subscription) { this.session = session; this.subscription = subscription; } private AmqpSession getSession() { return session; } private void dispose() { if (isDisposed.getAndSet(true)) { return; } if (session instanceof ReactorSession) { ((ReactorSession) session).closeAsync("Closing session.", null, true) .subscribe(); } else { session.dispose(); } subscription.dispose(); } private Mono<Void> isClosed() { if (session instanceof ReactorSession) { return ((ReactorSession) session).isClosed(); } else { return Mono.empty(); } } } }
class ReactorConnection implements AmqpConnection { private static final String CBS_SESSION_NAME = "cbs-session"; private static final String CBS_ADDRESS = "$cbs"; private static final String CBS_LINK_NAME = "cbs"; private static final String MANAGEMENT_SESSION_NAME = "mgmt-session"; private static final String MANAGEMENT_ADDRESS = "$management"; private static final String MANAGEMENT_LINK_NAME = "mgmt"; private final ClientLogger logger; private final ConcurrentMap<String, SessionSubscription> sessionMap = new ConcurrentHashMap<>(); private final ConcurrentHashMap<String, AmqpManagementNode> managementNodes = new ConcurrentHashMap<>(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.One<AmqpShutdownSignal> shutdownSignalSink = Sinks.one(); private final Flux<AmqpEndpointState> endpointStates; private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final String connectionId; private final Mono<Connection> connectionMono; private final ConnectionHandler handler; private final ReactorHandlerProvider handlerProvider; private final TokenManagerProvider tokenManagerProvider; private final MessageSerializer messageSerializer; private final ConnectionOptions connectionOptions; private final ReactorProvider reactorProvider; private final AmqpRetryPolicy retryPolicy; private final SenderSettleMode senderSettleMode; private final ReceiverSettleMode receiverSettleMode; private final Duration operationTimeout; private final Composite subscriptions; private ReactorExecutor executor; private volatile ClaimsBasedSecurityChannel cbsChannel; private volatile AmqpChannelProcessor<RequestResponseChannel> cbsChannelProcessor; private volatile Connection connection; /** * Creates a new AMQP connection that uses proton-j. * * @param connectionId Identifier for the connection. * @param connectionOptions A set of options used to create the AMQP connection. * @param reactorProvider Provides proton-j Reactor instances. * @param handlerProvider Provides {@link BaseHandler} to listen to proton-j reactor events. * @param tokenManagerProvider Provides the appropriate token manager to authorize with CBS node. * @param messageSerializer Serializer to translate objects to and from proton-j {@link Message messages}. * @param senderSettleMode to set as {@link SenderSettleMode} on sender. * @param receiverSettleMode to set as {@link ReceiverSettleMode} on receiver. */ public ReactorConnection(String connectionId, ConnectionOptions connectionOptions, ReactorProvider reactorProvider, ReactorHandlerProvider handlerProvider, TokenManagerProvider tokenManagerProvider, MessageSerializer messageSerializer, SenderSettleMode senderSettleMode, ReceiverSettleMode receiverSettleMode) { this.connectionOptions = connectionOptions; this.reactorProvider = reactorProvider; this.connectionId = connectionId; this.logger = new ClientLogger(ReactorConnection.class, createContextWithConnectionId(connectionId)); this.handlerProvider = handlerProvider; this.tokenManagerProvider = Objects.requireNonNull(tokenManagerProvider, "'tokenManagerProvider' cannot be null."); this.messageSerializer = messageSerializer; this.handler = handlerProvider.createConnectionHandler(connectionId, connectionOptions); this.retryPolicy = RetryUtil.getRetryPolicy(connectionOptions.getRetry()); this.operationTimeout = connectionOptions.getRetry().getTryTimeout(); this.senderSettleMode = senderSettleMode; this.receiverSettleMode = receiverSettleMode; this.connectionMono = Mono.fromCallable(this::getOrCreateConnection) .flatMap(reactorConnection -> { final Mono<AmqpEndpointState> activeEndpoint = getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(operationTimeout, Mono.error(() -> new AmqpException(true, String.format( "Connection '%s' not opened within AmqpRetryOptions.tryTimeout(): %s", connectionId, operationTimeout), handler.getErrorContext()))); return activeEndpoint.thenReturn(reactorConnection); }) .doOnError(error -> { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already disposed: Error occurred while connection was starting.", error); } else { closeAsync(new AmqpShutdownSignal(false, false, "Error occurred while connection was starting. Error: " + error)).subscribe(); } }); this.endpointStates = this.handler.getEndpointStates() .takeUntilOther(shutdownSignalSink.asMono()) .map(state -> { logger.verbose("State {}", state); return AmqpEndpointStateUtil.getConnectionState(state); }) .onErrorResume(error -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to error."); return closeAsync(new AmqpShutdownSignal(false, false, error.getMessage())).then(Mono.error(error)); } else { return Mono.error(error); } }) .doOnComplete(() -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to connection close."); closeAsync(new AmqpShutdownSignal(false, false, "Connection handler closed.")).subscribe(); } }) .cache(1); this.subscriptions = Disposables.composite(this.endpointStates.subscribe()); } /** * {@inheritDoc} */ @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } /** * Gets the shutdown signal associated with this connection. When it emits, the underlying connection is closed. * * @return Shutdown signals associated with this connection. It emits a signal when the underlying connection is * closed. */ @Override public Flux<AmqpShutdownSignal> getShutdownSignals() { return shutdownSignalSink.asMono().cache().flux(); } @Override public Mono<AmqpManagementNode> getManagementNode(String entityPath) { return Mono.defer(() -> { if (isDisposed()) { return Mono.error(logger.atError() .addKeyValue(ENTITY_PATH_KEY, entityPath) .log(Exceptions.propagate(new IllegalStateException("Connection is disposed. Cannot get management instance.")))); } final AmqpManagementNode existing = managementNodes.get(entityPath); if (existing != null) { return Mono.just(existing); } final TokenManager tokenManager = new AzureTokenManagerProvider(connectionOptions.getAuthorizationType(), connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getAuthorizationScope()) .getTokenManager(getClaimsBasedSecurityNode(), entityPath); return tokenManager.authorize().thenReturn(managementNodes.compute(entityPath, (key, current) -> { if (current != null) { logger.info("A management node exists already, returning it."); tokenManager.close(); return current; } final String sessionName = entityPath + "-" + MANAGEMENT_SESSION_NAME; final String linkName = entityPath + "-" + MANAGEMENT_LINK_NAME; final String address = entityPath + "/" + MANAGEMENT_ADDRESS; logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .addKeyValue("address", address) .log("Creating management node."); final AmqpChannelProcessor<RequestResponseChannel> requestResponseChannel = createRequestResponseChannel(sessionName, linkName, address); return new ManagementChannel(requestResponseChannel, getFullyQualifiedNamespace(), entityPath, tokenManager); })); }); } /** * {@inheritDoc} */ @Override public Mono<ClaimsBasedSecurityNode> getClaimsBasedSecurityNode() { return connectionMono.then(Mono.fromCallable(() -> getOrCreateCBSNode())); } @Override public String getId() { return connectionId; } /** * {@inheritDoc} */ @Override public String getFullyQualifiedNamespace() { return handler.getHostname(); } /** * {@inheritDoc} */ @Override public int getMaxFrameSize() { return handler.getMaxFrameSize(); } /** * {@inheritDoc} */ @Override public Map<String, Object> getConnectionProperties() { return handler.getConnectionProperties(); } /** * {@inheritDoc} */ @Override public Mono<AmqpSession> createSession(String sessionName) { return connectionMono.map(connection -> { final SessionSubscription sessionSubscription = sessionMap.computeIfAbsent(sessionName, key -> { final SessionHandler sessionHandler = handlerProvider.createSessionHandler(connectionId, getFullyQualifiedNamespace(), key, connectionOptions.getRetry().getTryTimeout()); final Session session = connection.session(); BaseHandler.setHandler(session, sessionHandler); final AmqpSession amqpSession = createSession(key, session, sessionHandler); final Disposable subscription = amqpSession.getEndpointStates() .subscribe(state -> { }, error -> { if (isDisposed.get()) { return; } logger.atInfo() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Error occurred. Removing and disposing session", error); removeSession(key); }, () -> { if (isDisposed.get()) { return; } logger.atVerbose() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Complete. Removing and disposing session."); removeSession(key); }); return new SessionSubscription(amqpSession, subscription); }); return sessionSubscription; }).flatMap(sessionSubscription -> { final Mono<AmqpEndpointState> activeSession = sessionSubscription.getSession().getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(retryPolicy.getRetryOptions().getTryTimeout(), Mono.error(() -> new AmqpException(true, String.format("connectionId[%s] sessionName[%s] Timeout waiting for session to be active.", connectionId, sessionName), handler.getErrorContext()))); return activeSession.thenReturn(sessionSubscription.getSession()); }); } /** * Creates a new AMQP session with the given parameters. * * @param sessionName Name of the AMQP session. * @param session The reactor session associated with this session. * @param handler Session handler for the reactor session. * * @return A new instance of AMQP session. */ protected AmqpSession createSession(String sessionName, Session session, SessionHandler handler) { return new ReactorSession(this, session, handler, sessionName, reactorProvider, handlerProvider, getClaimsBasedSecurityNode(), tokenManagerProvider, messageSerializer, connectionOptions.getRetry()); } /** * {@inheritDoc} */ @Override public boolean removeSession(String sessionName) { if (sessionName == null) { return false; } final SessionSubscription removed = sessionMap.remove(sessionName); if (removed != null) { removed.dispose(); } return removed != null; } @Override public boolean isDisposed() { return isDisposed.get(); } /** * {@inheritDoc} */ @Override public void dispose() { final Duration timeout = operationTimeout.plus(operationTimeout); closeAsync().block(timeout); } /** * Gets the active AMQP connection for this instance. * * @return The AMQP connection. * * @throws AmqpException if the {@link Connection} was not transitioned to an active state within the given * {@link AmqpRetryOptions */ protected Mono<Connection> getReactorConnection() { return connectionMono; } /** * Creates a bidirectional link between the message broker and the client. * * @param sessionName Name of the session. * @param linkName Name of the link. * @param entityPath Address to the message broker. * * @return A new {@link RequestResponseChannel} to communicate with the message broker. */ protected AmqpChannelProcessor<RequestResponseChannel> createRequestResponseChannel(String sessionName, String linkName, String entityPath) { Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); final Flux<RequestResponseChannel> createChannel = createSession(sessionName) .cast(ReactorSession.class) .map(reactorSession -> new RequestResponseChannel(this, getId(), getFullyQualifiedNamespace(), linkName, entityPath, reactorSession.session(), connectionOptions.getRetry(), handlerProvider, reactorProvider, messageSerializer, senderSettleMode, receiverSettleMode)) .doOnNext(e -> { logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .log("Emitting new response channel."); }) .repeat(() -> !this.isDisposed()); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(ENTITY_PATH_KEY, entityPath); return createChannel .subscribeWith(new AmqpChannelProcessor<>(getFullyQualifiedNamespace(), channel -> channel.getEndpointStates(), retryPolicy, loggingContext)); } @Override public Mono<Void> closeAsync() { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already closed. Not disposing again."); return isClosedMono.asMono(); } return closeAsync(new AmqpShutdownSignal(false, true, "Disposed by client.")); } private synchronized void closeConnectionWork() { if (connection == null) { isClosedMono.emitEmpty((signalType, emitResult) -> { addSignalTypeAndResult(logger.atInfo(), signalType, emitResult) .log("Unable to complete closeMono."); return false; }); return; } connection.close(); handler.close(); final ArrayList<Mono<Void>> closingSessions = new ArrayList<>(); sessionMap.values().forEach(link -> closingSessions.add(link.isClosed())); final Mono<Void> closedExecutor = executor != null ? Mono.defer(() -> { synchronized (this) { logger.info("Closing executor."); return executor.closeAsync(); } }) : Mono.empty(); final Mono<Void> closeSessionAndExecutorMono = Mono.when(closingSessions) .timeout(operationTimeout) .onErrorResume(error -> { logger.info("Timed out waiting for all sessions to close."); return Mono.empty(); }) .then(closedExecutor) .then(Mono.fromRunnable(() -> { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result) .log("Unable to emit connection closed signal."); return false; }); subscriptions.dispose(); })); subscriptions.add(closeSessionAndExecutorMono.subscribe()); } private synchronized ClaimsBasedSecurityNode getOrCreateCBSNode() { if (cbsChannel == null) { logger.info("Setting CBS channel."); cbsChannelProcessor = createRequestResponseChannel(CBS_SESSION_NAME, CBS_LINK_NAME, CBS_ADDRESS); cbsChannel = new ClaimsBasedSecurityChannel( cbsChannelProcessor, connectionOptions.getTokenCredential(), connectionOptions.getAuthorizationType(), connectionOptions.getRetry()); } return cbsChannel; } private synchronized Connection getOrCreateConnection() throws IOException { if (connection == null) { logger.atInfo() .addKeyValue(HOSTNAME_KEY, handler.getHostname()) .addKeyValue("port", handler.getProtocolPort()) .log("Creating and starting connection."); final Reactor reactor = reactorProvider.createReactor(connectionId, handler.getMaxFrameSize()); connection = reactor.connectionToHost(handler.getHostname(), handler.getProtocolPort(), handler); final ReactorExceptionHandler reactorExceptionHandler = new ReactorExceptionHandler(); final Duration timeoutDivided = connectionOptions.getRetry().getTryTimeout().dividedBy(2); final Duration pendingTasksDuration = ClientConstants.SERVER_BUSY_WAIT_TIME.compareTo(timeoutDivided) < 0 ? ClientConstants.SERVER_BUSY_WAIT_TIME : timeoutDivided; final Scheduler scheduler = Schedulers.newSingle("reactor-executor"); executor = new ReactorExecutor(reactor, scheduler, connectionId, reactorExceptionHandler, pendingTasksDuration, connectionOptions.getFullyQualifiedNamespace()); final Mono<Void> executorCloseMono = Mono.defer(() -> { synchronized (this) { return executor.closeAsync(); } }); reactorProvider.getReactorDispatcher().getShutdownSignal() .flatMap(signal -> { reactorExceptionHandler.onConnectionShutdown(signal); return executorCloseMono; }) .onErrorResume(error -> { reactorExceptionHandler.onConnectionError(error); return executorCloseMono; }) .subscribe(); executor.start(); } return connection; } private final class ReactorExceptionHandler extends AmqpExceptionHandler { private ReactorExceptionHandler() { super(); } @Override public void onConnectionError(Throwable exception) { logger.atInfo() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionError, Starting new reactor", exception); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onReactorError: Disposing."); closeAsync(new AmqpShutdownSignal(false, false, "onReactorError: " + exception.toString())) .subscribe(); } } @Override void onConnectionShutdown(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown. Shutting down."); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown: disposing."); closeAsync(shutdownSignal).subscribe(); } } } private static final class SessionSubscription { private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AmqpSession session; private final Disposable subscription; private SessionSubscription(AmqpSession session, Disposable subscription) { this.session = session; this.subscription = subscription; } private AmqpSession getSession() { return session; } private void dispose() { if (isDisposed.getAndSet(true)) { return; } if (session instanceof ReactorSession) { ((ReactorSession) session).closeAsync("Closing session.", null, true) .subscribe(); } else { session.dispose(); } subscription.dispose(); } private Mono<Void> isClosed() { if (session instanceof ReactorSession) { return ((ReactorSession) session).isClosed(); } else { return Mono.empty(); } } } }
1. Use `cbsCloseOperation = cbsChannelProcessor.flatMap(channel -> channel.closeAsync())` will return `Mono<Void> closeOperationWithTimeout`, so cbsCloseOperation will block until Detach frame is received from remote link. ![cbs-closing-frame-logic1](https://user-images.githubusercontent.com/97088315/162147201-7ffe61cf-da73-470a-b95c-99d4917e4bf7.png) 2. Use `cbsCloseOperation = Mono.fromRunnable(() -> cbsChannelProcessor.dispose())` only invoke the `link.close()` on `reactor-executor` thread. So it just trigger to send out the detach frame. ![cbs-closing-frame-logic2](https://user-images.githubusercontent.com/97088315/162147229-e5c88819-1fd7-4446-9b97-b93513559354.png)
Mono<Void> closeAsync(AmqpShutdownSignal shutdownSignal) { final Mono<Void> emitShutDownSignalOperation = Mono.fromRunnable(() -> { addShutdownSignal(logger.atInfo(), shutdownSignal).log("Disposing of ReactorConnection."); final Sinks.EmitResult result = shutdownSignalSink.tryEmitValue(shutdownSignal); if (result.isFailure()) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(EMIT_RESULT_KEY, result) .log("Unable to emit shutdown signal."); } }); final Mono<Void> cbsCloseOperation; if (cbsChannelProcessor != null) { cbsCloseOperation = Mono.fromRunnable(() -> cbsChannelProcessor.dispose()); } else { cbsCloseOperation = Mono.empty(); } final Mono<Void> managementNodeCloseOperations = Mono.when( Flux.fromStream(managementNodes.values().stream()).flatMap(node -> node.closeAsync())); final Mono<Void> closeReactor = Mono.fromRunnable(() -> { logger.verbose("Scheduling closeConnection work."); final ReactorDispatcher dispatcher = reactorProvider.getReactorDispatcher(); if (dispatcher != null) { try { dispatcher.invoke(() -> closeConnectionWork()); } catch (IOException e) { logger.warning("IOException while scheduling closeConnection work. Manually disposing.", e); closeConnectionWork(); } catch (RejectedExecutionException e) { logger.info("Could not schedule closeConnection work. Manually disposing."); closeConnectionWork(); } } else { closeConnectionWork(); } }); return Mono.whenDelayError( cbsCloseOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed CBS node.")), managementNodeCloseOperations.doFinally(signalType -> logger.atVerbose() .log("Closed management nodes."))) .then(emitShutDownSignalOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Emitted connection shutdown signal. "))) .then(closeReactor.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed reactor dispatcher."))) .then(isClosedMono.asMono()); }
cbsCloseOperation = Mono.fromRunnable(() -> cbsChannelProcessor.dispose());
Mono<Void> closeAsync(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal).log("Disposing of ReactorConnection."); final Sinks.EmitResult result = shutdownSignalSink.tryEmitValue(shutdownSignal); if (result.isFailure()) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(EMIT_RESULT_KEY, result) .log("Unable to emit shutdown signal."); } final Mono<Void> cbsCloseOperation; if (cbsChannelProcessor != null) { cbsCloseOperation = cbsChannelProcessor.flatMap(channel -> channel.closeAsync()); } else { cbsCloseOperation = Mono.empty(); } final Mono<Void> managementNodeCloseOperations = Mono.when( Flux.fromStream(managementNodes.values().stream()).flatMap(node -> node.closeAsync())); final Mono<Void> closeReactor = Mono.fromRunnable(() -> { logger.verbose("Scheduling closeConnection work."); final ReactorDispatcher dispatcher = reactorProvider.getReactorDispatcher(); if (dispatcher != null) { try { dispatcher.invoke(() -> closeConnectionWork()); } catch (IOException e) { logger.warning("IOException while scheduling closeConnection work. Manually disposing.", e); closeConnectionWork(); } catch (RejectedExecutionException e) { logger.info("Could not schedule closeConnection work. Manually disposing."); closeConnectionWork(); } } else { closeConnectionWork(); } }); return Mono.whenDelayError( cbsCloseOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed CBS node.")), managementNodeCloseOperations.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed management nodes."))) .then(closeReactor.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed reactor dispatcher."))) .then(isClosedMono.asMono()); }
class ReactorConnection implements AmqpConnection { private static final String CBS_SESSION_NAME = "cbs-session"; private static final String CBS_ADDRESS = "$cbs"; private static final String CBS_LINK_NAME = "cbs"; private static final String MANAGEMENT_SESSION_NAME = "mgmt-session"; private static final String MANAGEMENT_ADDRESS = "$management"; private static final String MANAGEMENT_LINK_NAME = "mgmt"; private final ClientLogger logger; private final ConcurrentMap<String, SessionSubscription> sessionMap = new ConcurrentHashMap<>(); private final ConcurrentHashMap<String, AmqpManagementNode> managementNodes = new ConcurrentHashMap<>(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.One<AmqpShutdownSignal> shutdownSignalSink = Sinks.one(); private final Flux<AmqpEndpointState> endpointStates; private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final String connectionId; private final Mono<Connection> connectionMono; private final ConnectionHandler handler; private final ReactorHandlerProvider handlerProvider; private final TokenManagerProvider tokenManagerProvider; private final MessageSerializer messageSerializer; private final ConnectionOptions connectionOptions; private final ReactorProvider reactorProvider; private final AmqpRetryPolicy retryPolicy; private final SenderSettleMode senderSettleMode; private final ReceiverSettleMode receiverSettleMode; private final Duration operationTimeout; private final Composite subscriptions; private ReactorExecutor executor; private volatile ClaimsBasedSecurityChannel cbsChannel; private volatile AmqpChannelProcessor<RequestResponseChannel> cbsChannelProcessor; private volatile Connection connection; /** * Creates a new AMQP connection that uses proton-j. * * @param connectionId Identifier for the connection. * @param connectionOptions A set of options used to create the AMQP connection. * @param reactorProvider Provides proton-j Reactor instances. * @param handlerProvider Provides {@link BaseHandler} to listen to proton-j reactor events. * @param tokenManagerProvider Provides the appropriate token manager to authorize with CBS node. * @param messageSerializer Serializer to translate objects to and from proton-j {@link Message messages}. * @param senderSettleMode to set as {@link SenderSettleMode} on sender. * @param receiverSettleMode to set as {@link ReceiverSettleMode} on receiver. */ public ReactorConnection(String connectionId, ConnectionOptions connectionOptions, ReactorProvider reactorProvider, ReactorHandlerProvider handlerProvider, TokenManagerProvider tokenManagerProvider, MessageSerializer messageSerializer, SenderSettleMode senderSettleMode, ReceiverSettleMode receiverSettleMode) { this.connectionOptions = connectionOptions; this.reactorProvider = reactorProvider; this.connectionId = connectionId; this.logger = new ClientLogger(ReactorConnection.class, createContextWithConnectionId(connectionId)); this.handlerProvider = handlerProvider; this.tokenManagerProvider = Objects.requireNonNull(tokenManagerProvider, "'tokenManagerProvider' cannot be null."); this.messageSerializer = messageSerializer; this.handler = handlerProvider.createConnectionHandler(connectionId, connectionOptions); this.retryPolicy = RetryUtil.getRetryPolicy(connectionOptions.getRetry()); this.operationTimeout = connectionOptions.getRetry().getTryTimeout(); this.senderSettleMode = senderSettleMode; this.receiverSettleMode = receiverSettleMode; this.connectionMono = Mono.fromCallable(this::getOrCreateConnection) .flatMap(reactorConnection -> { final Mono<AmqpEndpointState> activeEndpoint = getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(operationTimeout, Mono.error(new AmqpException(true, String.format( "Connection '%s' not opened within AmqpRetryOptions.tryTimeout(): %s", connectionId, operationTimeout), handler.getErrorContext()))); return activeEndpoint.thenReturn(reactorConnection); }) .doOnError(error -> { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already disposed: Error occurred while connection was starting.", error); } else { closeAsync(new AmqpShutdownSignal(false, false, "Error occurred while connection was starting. Error: " + error)).subscribe(); } }); this.endpointStates = this.handler.getEndpointStates() .takeUntilOther(shutdownSignalSink.asMono()) .map(state -> { logger.verbose("State {}", state); return AmqpEndpointStateUtil.getConnectionState(state); }) .onErrorResume(error -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to error."); return closeAsync(new AmqpShutdownSignal(false, false, error.getMessage())).then(Mono.error(error)); } else { return Mono.error(error); } }) .doOnComplete(() -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to connection close."); closeAsync(new AmqpShutdownSignal(false, false, "Connection handler closed.")).subscribe(); } }) .cache(1); this.subscriptions = Disposables.composite(this.endpointStates.subscribe()); } /** * {@inheritDoc} */ @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } /** * Gets the shutdown signal associated with this connection. When it emits, the underlying connection is closed. * * @return Shutdown signals associated with this connection. It emits a signal when the underlying connection is * closed. */ @Override public Flux<AmqpShutdownSignal> getShutdownSignals() { return shutdownSignalSink.asMono().cache().flux(); } @Override public Mono<AmqpManagementNode> getManagementNode(String entityPath) { return Mono.defer(() -> { if (isDisposed()) { return Mono.error(logger.atError() .addKeyValue(ENTITY_PATH_KEY, entityPath) .log(Exceptions.propagate(new IllegalStateException("Connection is disposed. Cannot get management instance.")))); } final AmqpManagementNode existing = managementNodes.get(entityPath); if (existing != null) { return Mono.just(existing); } final TokenManager tokenManager = new AzureTokenManagerProvider(connectionOptions.getAuthorizationType(), connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getAuthorizationScope()) .getTokenManager(getClaimsBasedSecurityNode(), entityPath); return tokenManager.authorize().thenReturn(managementNodes.compute(entityPath, (key, current) -> { if (current != null) { logger.info("A management node exists already, returning it."); tokenManager.close(); return current; } final String sessionName = entityPath + "-" + MANAGEMENT_SESSION_NAME; final String linkName = entityPath + "-" + MANAGEMENT_LINK_NAME; final String address = entityPath + "/" + MANAGEMENT_ADDRESS; logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .addKeyValue("address", address) .log("Creating management node."); final AmqpChannelProcessor<RequestResponseChannel> requestResponseChannel = createRequestResponseChannel(sessionName, linkName, address); return new ManagementChannel(requestResponseChannel, getFullyQualifiedNamespace(), entityPath, tokenManager); })); }); } /** * {@inheritDoc} */ @Override public Mono<ClaimsBasedSecurityNode> getClaimsBasedSecurityNode() { return connectionMono.then(Mono.fromCallable(() -> getOrCreateCBSNode())); } @Override public String getId() { return connectionId; } /** * {@inheritDoc} */ @Override public String getFullyQualifiedNamespace() { return handler.getHostname(); } /** * {@inheritDoc} */ @Override public int getMaxFrameSize() { return handler.getMaxFrameSize(); } /** * {@inheritDoc} */ @Override public Map<String, Object> getConnectionProperties() { return handler.getConnectionProperties(); } /** * {@inheritDoc} */ @Override public Mono<AmqpSession> createSession(String sessionName) { return connectionMono.map(connection -> { final SessionSubscription sessionSubscription = sessionMap.computeIfAbsent(sessionName, key -> { final SessionHandler sessionHandler = handlerProvider.createSessionHandler(connectionId, getFullyQualifiedNamespace(), key, connectionOptions.getRetry().getTryTimeout()); final Session session = connection.session(); BaseHandler.setHandler(session, sessionHandler); final AmqpSession amqpSession = createSession(key, session, sessionHandler); final Disposable subscription = amqpSession.getEndpointStates() .subscribe(state -> { }, error -> { if (isDisposed.get()) { return; } logger.atInfo() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Error occurred. Removing and disposing session", error); removeSession(key); }, () -> { if (isDisposed.get()) { return; } logger.atVerbose() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Complete. Removing and disposing session."); removeSession(key); }); return new SessionSubscription(amqpSession, subscription); }); return sessionSubscription; }).flatMap(sessionSubscription -> { final Mono<AmqpEndpointState> activeSession = sessionSubscription.getSession().getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(retryPolicy.getRetryOptions().getTryTimeout(), Mono.error(new AmqpException(true, String.format("connectionId[%s] sessionName[%s] Timeout waiting for session to be active.", connectionId, sessionName), handler.getErrorContext()))); return activeSession.thenReturn(sessionSubscription.getSession()); }); } /** * Creates a new AMQP session with the given parameters. * * @param sessionName Name of the AMQP session. * @param session The reactor session associated with this session. * @param handler Session handler for the reactor session. * * @return A new instance of AMQP session. */ protected AmqpSession createSession(String sessionName, Session session, SessionHandler handler) { return new ReactorSession(this, session, handler, sessionName, reactorProvider, handlerProvider, getClaimsBasedSecurityNode(), tokenManagerProvider, messageSerializer, connectionOptions.getRetry()); } /** * {@inheritDoc} */ @Override public boolean removeSession(String sessionName) { if (sessionName == null) { return false; } final SessionSubscription removed = sessionMap.remove(sessionName); if (removed != null) { removed.dispose(); } return removed != null; } @Override public boolean isDisposed() { return isDisposed.get(); } /** * {@inheritDoc} */ @Override public void dispose() { final Duration timeout = operationTimeout.plus(operationTimeout); closeAsync().block(timeout); } /** * Gets the active AMQP connection for this instance. * * @return The AMQP connection. * * @throws AmqpException if the {@link Connection} was not transitioned to an active state within the given * {@link AmqpRetryOptions */ protected Mono<Connection> getReactorConnection() { return connectionMono; } /** * Creates a bidirectional link between the message broker and the client. * * @param sessionName Name of the session. * @param linkName Name of the link. * @param entityPath Address to the message broker. * * @return A new {@link RequestResponseChannel} to communicate with the message broker. */ protected AmqpChannelProcessor<RequestResponseChannel> createRequestResponseChannel(String sessionName, String linkName, String entityPath) { Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); final Flux<RequestResponseChannel> createChannel = createSession(sessionName) .cast(ReactorSession.class) .map(reactorSession -> new RequestResponseChannel(this, getId(), getFullyQualifiedNamespace(), linkName, entityPath, reactorSession.session(), connectionOptions.getRetry(), handlerProvider, reactorProvider, messageSerializer, senderSettleMode, receiverSettleMode)) .doOnNext(e -> { logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .log("Emitting new response channel."); }) .repeat(); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(ENTITY_PATH_KEY, entityPath); return createChannel .subscribeWith(new AmqpChannelProcessor<>(getFullyQualifiedNamespace(), channel -> channel.getEndpointStates(), retryPolicy, loggingContext)); } @Override public Mono<Void> closeAsync() { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already closed. Not disposing again."); return isClosedMono.asMono(); } return closeAsync(new AmqpShutdownSignal(false, true, "Disposed by client.")); } private synchronized void closeConnectionWork() { if (connection == null) { isClosedMono.emitEmpty((signalType, emitResult) -> { addSignalTypeAndResult(logger.atInfo(), signalType, emitResult) .log("Unable to complete closeMono."); return false; }); return; } connection.close(); handler.close(); final ArrayList<Mono<Void>> closingSessions = new ArrayList<>(); sessionMap.values().forEach(link -> closingSessions.add(link.isClosed())); final Mono<Void> closedExecutor = executor != null ? Mono.defer(() -> { synchronized (this) { logger.info("Closing executor."); return executor.closeAsync(); } }) : Mono.empty(); final Mono<Void> closeSessionAndExecutorMono = Mono.when(closingSessions) .timeout(operationTimeout) .onErrorResume(error -> { logger.info("Timed out waiting for all sessions to close."); return Mono.empty(); }) .then(closedExecutor) .then(Mono.fromRunnable(() -> { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result) .log("Unable to emit connection closed signal."); return false; }); subscriptions.dispose(); })); subscriptions.add(closeSessionAndExecutorMono.subscribe()); } private synchronized ClaimsBasedSecurityNode getOrCreateCBSNode() { if (cbsChannel == null) { logger.info("Setting CBS channel."); cbsChannelProcessor = createRequestResponseChannel(CBS_SESSION_NAME, CBS_LINK_NAME, CBS_ADDRESS); cbsChannel = new ClaimsBasedSecurityChannel( cbsChannelProcessor, connectionOptions.getTokenCredential(), connectionOptions.getAuthorizationType(), connectionOptions.getRetry()); } return cbsChannel; } private synchronized Connection getOrCreateConnection() throws IOException { if (connection == null) { logger.atInfo() .addKeyValue(HOSTNAME_KEY, handler.getHostname()) .addKeyValue("port", handler.getProtocolPort()) .log("Creating and starting connection."); final Reactor reactor = reactorProvider.createReactor(connectionId, handler.getMaxFrameSize()); connection = reactor.connectionToHost(handler.getHostname(), handler.getProtocolPort(), handler); final ReactorExceptionHandler reactorExceptionHandler = new ReactorExceptionHandler(); final Duration timeoutDivided = connectionOptions.getRetry().getTryTimeout().dividedBy(2); final Duration pendingTasksDuration = ClientConstants.SERVER_BUSY_WAIT_TIME.compareTo(timeoutDivided) < 0 ? ClientConstants.SERVER_BUSY_WAIT_TIME : timeoutDivided; final Scheduler scheduler = Schedulers.newSingle("reactor-executor"); executor = new ReactorExecutor(reactor, scheduler, connectionId, reactorExceptionHandler, pendingTasksDuration, connectionOptions.getFullyQualifiedNamespace()); final Mono<Void> executorCloseMono = Mono.defer(() -> { synchronized (this) { return executor.closeAsync(); } }); reactorProvider.getReactorDispatcher().getShutdownSignal() .flatMap(signal -> { reactorExceptionHandler.onConnectionShutdown(signal); return executorCloseMono; }) .onErrorResume(error -> { reactorExceptionHandler.onConnectionError(error); return executorCloseMono; }) .subscribe(); executor.start(); } return connection; } private final class ReactorExceptionHandler extends AmqpExceptionHandler { private ReactorExceptionHandler() { super(); } @Override public void onConnectionError(Throwable exception) { logger.atInfo() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionError, Starting new reactor", exception); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onReactorError: Disposing."); closeAsync(new AmqpShutdownSignal(false, false, "onReactorError: " + exception.toString())) .subscribe(); } } @Override void onConnectionShutdown(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown. Shutting down."); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown: disposing."); closeAsync(shutdownSignal).subscribe(); } } } private static final class SessionSubscription { private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AmqpSession session; private final Disposable subscription; private SessionSubscription(AmqpSession session, Disposable subscription) { this.session = session; this.subscription = subscription; } private AmqpSession getSession() { return session; } private void dispose() { if (isDisposed.getAndSet(true)) { return; } if (session instanceof ReactorSession) { ((ReactorSession) session).closeAsync("Closing session.", null, true) .subscribe(); } else { session.dispose(); } subscription.dispose(); } private Mono<Void> isClosed() { if (session instanceof ReactorSession) { return ((ReactorSession) session).isClosed(); } else { return Mono.empty(); } } } }
class ReactorConnection implements AmqpConnection { private static final String CBS_SESSION_NAME = "cbs-session"; private static final String CBS_ADDRESS = "$cbs"; private static final String CBS_LINK_NAME = "cbs"; private static final String MANAGEMENT_SESSION_NAME = "mgmt-session"; private static final String MANAGEMENT_ADDRESS = "$management"; private static final String MANAGEMENT_LINK_NAME = "mgmt"; private final ClientLogger logger; private final ConcurrentMap<String, SessionSubscription> sessionMap = new ConcurrentHashMap<>(); private final ConcurrentHashMap<String, AmqpManagementNode> managementNodes = new ConcurrentHashMap<>(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.One<AmqpShutdownSignal> shutdownSignalSink = Sinks.one(); private final Flux<AmqpEndpointState> endpointStates; private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final String connectionId; private final Mono<Connection> connectionMono; private final ConnectionHandler handler; private final ReactorHandlerProvider handlerProvider; private final TokenManagerProvider tokenManagerProvider; private final MessageSerializer messageSerializer; private final ConnectionOptions connectionOptions; private final ReactorProvider reactorProvider; private final AmqpRetryPolicy retryPolicy; private final SenderSettleMode senderSettleMode; private final ReceiverSettleMode receiverSettleMode; private final Duration operationTimeout; private final Composite subscriptions; private ReactorExecutor executor; private volatile ClaimsBasedSecurityChannel cbsChannel; private volatile AmqpChannelProcessor<RequestResponseChannel> cbsChannelProcessor; private volatile Connection connection; /** * Creates a new AMQP connection that uses proton-j. * * @param connectionId Identifier for the connection. * @param connectionOptions A set of options used to create the AMQP connection. * @param reactorProvider Provides proton-j Reactor instances. * @param handlerProvider Provides {@link BaseHandler} to listen to proton-j reactor events. * @param tokenManagerProvider Provides the appropriate token manager to authorize with CBS node. * @param messageSerializer Serializer to translate objects to and from proton-j {@link Message messages}. * @param senderSettleMode to set as {@link SenderSettleMode} on sender. * @param receiverSettleMode to set as {@link ReceiverSettleMode} on receiver. */ public ReactorConnection(String connectionId, ConnectionOptions connectionOptions, ReactorProvider reactorProvider, ReactorHandlerProvider handlerProvider, TokenManagerProvider tokenManagerProvider, MessageSerializer messageSerializer, SenderSettleMode senderSettleMode, ReceiverSettleMode receiverSettleMode) { this.connectionOptions = connectionOptions; this.reactorProvider = reactorProvider; this.connectionId = connectionId; this.logger = new ClientLogger(ReactorConnection.class, createContextWithConnectionId(connectionId)); this.handlerProvider = handlerProvider; this.tokenManagerProvider = Objects.requireNonNull(tokenManagerProvider, "'tokenManagerProvider' cannot be null."); this.messageSerializer = messageSerializer; this.handler = handlerProvider.createConnectionHandler(connectionId, connectionOptions); this.retryPolicy = RetryUtil.getRetryPolicy(connectionOptions.getRetry()); this.operationTimeout = connectionOptions.getRetry().getTryTimeout(); this.senderSettleMode = senderSettleMode; this.receiverSettleMode = receiverSettleMode; this.connectionMono = Mono.fromCallable(this::getOrCreateConnection) .flatMap(reactorConnection -> { final Mono<AmqpEndpointState> activeEndpoint = getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(operationTimeout, Mono.error(() -> new AmqpException(true, String.format( "Connection '%s' not opened within AmqpRetryOptions.tryTimeout(): %s", connectionId, operationTimeout), handler.getErrorContext()))); return activeEndpoint.thenReturn(reactorConnection); }) .doOnError(error -> { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already disposed: Error occurred while connection was starting.", error); } else { closeAsync(new AmqpShutdownSignal(false, false, "Error occurred while connection was starting. Error: " + error)).subscribe(); } }); this.endpointStates = this.handler.getEndpointStates() .takeUntilOther(shutdownSignalSink.asMono()) .map(state -> { logger.verbose("State {}", state); return AmqpEndpointStateUtil.getConnectionState(state); }) .onErrorResume(error -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to error."); return closeAsync(new AmqpShutdownSignal(false, false, error.getMessage())).then(Mono.error(error)); } else { return Mono.error(error); } }) .doOnComplete(() -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to connection close."); closeAsync(new AmqpShutdownSignal(false, false, "Connection handler closed.")).subscribe(); } }) .cache(1); this.subscriptions = Disposables.composite(this.endpointStates.subscribe()); } /** * {@inheritDoc} */ @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } /** * Gets the shutdown signal associated with this connection. When it emits, the underlying connection is closed. * * @return Shutdown signals associated with this connection. It emits a signal when the underlying connection is * closed. */ @Override public Flux<AmqpShutdownSignal> getShutdownSignals() { return shutdownSignalSink.asMono().cache().flux(); } @Override public Mono<AmqpManagementNode> getManagementNode(String entityPath) { return Mono.defer(() -> { if (isDisposed()) { return Mono.error(logger.atError() .addKeyValue(ENTITY_PATH_KEY, entityPath) .log(Exceptions.propagate(new IllegalStateException("Connection is disposed. Cannot get management instance.")))); } final AmqpManagementNode existing = managementNodes.get(entityPath); if (existing != null) { return Mono.just(existing); } final TokenManager tokenManager = new AzureTokenManagerProvider(connectionOptions.getAuthorizationType(), connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getAuthorizationScope()) .getTokenManager(getClaimsBasedSecurityNode(), entityPath); return tokenManager.authorize().thenReturn(managementNodes.compute(entityPath, (key, current) -> { if (current != null) { logger.info("A management node exists already, returning it."); tokenManager.close(); return current; } final String sessionName = entityPath + "-" + MANAGEMENT_SESSION_NAME; final String linkName = entityPath + "-" + MANAGEMENT_LINK_NAME; final String address = entityPath + "/" + MANAGEMENT_ADDRESS; logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .addKeyValue("address", address) .log("Creating management node."); final AmqpChannelProcessor<RequestResponseChannel> requestResponseChannel = createRequestResponseChannel(sessionName, linkName, address); return new ManagementChannel(requestResponseChannel, getFullyQualifiedNamespace(), entityPath, tokenManager); })); }); } /** * {@inheritDoc} */ @Override public Mono<ClaimsBasedSecurityNode> getClaimsBasedSecurityNode() { return connectionMono.then(Mono.fromCallable(() -> getOrCreateCBSNode())); } @Override public String getId() { return connectionId; } /** * {@inheritDoc} */ @Override public String getFullyQualifiedNamespace() { return handler.getHostname(); } /** * {@inheritDoc} */ @Override public int getMaxFrameSize() { return handler.getMaxFrameSize(); } /** * {@inheritDoc} */ @Override public Map<String, Object> getConnectionProperties() { return handler.getConnectionProperties(); } /** * {@inheritDoc} */ @Override public Mono<AmqpSession> createSession(String sessionName) { return connectionMono.map(connection -> { final SessionSubscription sessionSubscription = sessionMap.computeIfAbsent(sessionName, key -> { final SessionHandler sessionHandler = handlerProvider.createSessionHandler(connectionId, getFullyQualifiedNamespace(), key, connectionOptions.getRetry().getTryTimeout()); final Session session = connection.session(); BaseHandler.setHandler(session, sessionHandler); final AmqpSession amqpSession = createSession(key, session, sessionHandler); final Disposable subscription = amqpSession.getEndpointStates() .subscribe(state -> { }, error -> { if (isDisposed.get()) { return; } logger.atInfo() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Error occurred. Removing and disposing session", error); removeSession(key); }, () -> { if (isDisposed.get()) { return; } logger.atVerbose() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Complete. Removing and disposing session."); removeSession(key); }); return new SessionSubscription(amqpSession, subscription); }); return sessionSubscription; }).flatMap(sessionSubscription -> { final Mono<AmqpEndpointState> activeSession = sessionSubscription.getSession().getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(retryPolicy.getRetryOptions().getTryTimeout(), Mono.error(() -> new AmqpException(true, String.format("connectionId[%s] sessionName[%s] Timeout waiting for session to be active.", connectionId, sessionName), handler.getErrorContext()))); return activeSession.thenReturn(sessionSubscription.getSession()); }); } /** * Creates a new AMQP session with the given parameters. * * @param sessionName Name of the AMQP session. * @param session The reactor session associated with this session. * @param handler Session handler for the reactor session. * * @return A new instance of AMQP session. */ protected AmqpSession createSession(String sessionName, Session session, SessionHandler handler) { return new ReactorSession(this, session, handler, sessionName, reactorProvider, handlerProvider, getClaimsBasedSecurityNode(), tokenManagerProvider, messageSerializer, connectionOptions.getRetry()); } /** * {@inheritDoc} */ @Override public boolean removeSession(String sessionName) { if (sessionName == null) { return false; } final SessionSubscription removed = sessionMap.remove(sessionName); if (removed != null) { removed.dispose(); } return removed != null; } @Override public boolean isDisposed() { return isDisposed.get(); } /** * {@inheritDoc} */ @Override public void dispose() { final Duration timeout = operationTimeout.plus(operationTimeout); closeAsync().block(timeout); } /** * Gets the active AMQP connection for this instance. * * @return The AMQP connection. * * @throws AmqpException if the {@link Connection} was not transitioned to an active state within the given * {@link AmqpRetryOptions */ protected Mono<Connection> getReactorConnection() { return connectionMono; } /** * Creates a bidirectional link between the message broker and the client. * * @param sessionName Name of the session. * @param linkName Name of the link. * @param entityPath Address to the message broker. * * @return A new {@link RequestResponseChannel} to communicate with the message broker. */ protected AmqpChannelProcessor<RequestResponseChannel> createRequestResponseChannel(String sessionName, String linkName, String entityPath) { Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); final Flux<RequestResponseChannel> createChannel = createSession(sessionName) .cast(ReactorSession.class) .map(reactorSession -> new RequestResponseChannel(this, getId(), getFullyQualifiedNamespace(), linkName, entityPath, reactorSession.session(), connectionOptions.getRetry(), handlerProvider, reactorProvider, messageSerializer, senderSettleMode, receiverSettleMode)) .doOnNext(e -> { logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .log("Emitting new response channel."); }) .repeat(() -> !this.isDisposed()); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(ENTITY_PATH_KEY, entityPath); return createChannel .subscribeWith(new AmqpChannelProcessor<>(getFullyQualifiedNamespace(), channel -> channel.getEndpointStates(), retryPolicy, loggingContext)); } @Override public Mono<Void> closeAsync() { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already closed. Not disposing again."); return isClosedMono.asMono(); } return closeAsync(new AmqpShutdownSignal(false, true, "Disposed by client.")); } private synchronized void closeConnectionWork() { if (connection == null) { isClosedMono.emitEmpty((signalType, emitResult) -> { addSignalTypeAndResult(logger.atInfo(), signalType, emitResult) .log("Unable to complete closeMono."); return false; }); return; } connection.close(); handler.close(); final ArrayList<Mono<Void>> closingSessions = new ArrayList<>(); sessionMap.values().forEach(link -> closingSessions.add(link.isClosed())); final Mono<Void> closedExecutor = executor != null ? Mono.defer(() -> { synchronized (this) { logger.info("Closing executor."); return executor.closeAsync(); } }) : Mono.empty(); final Mono<Void> closeSessionAndExecutorMono = Mono.when(closingSessions) .timeout(operationTimeout) .onErrorResume(error -> { logger.info("Timed out waiting for all sessions to close."); return Mono.empty(); }) .then(closedExecutor) .then(Mono.fromRunnable(() -> { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result) .log("Unable to emit connection closed signal."); return false; }); subscriptions.dispose(); })); subscriptions.add(closeSessionAndExecutorMono.subscribe()); } private synchronized ClaimsBasedSecurityNode getOrCreateCBSNode() { if (cbsChannel == null) { logger.info("Setting CBS channel."); cbsChannelProcessor = createRequestResponseChannel(CBS_SESSION_NAME, CBS_LINK_NAME, CBS_ADDRESS); cbsChannel = new ClaimsBasedSecurityChannel( cbsChannelProcessor, connectionOptions.getTokenCredential(), connectionOptions.getAuthorizationType(), connectionOptions.getRetry()); } return cbsChannel; } private synchronized Connection getOrCreateConnection() throws IOException { if (connection == null) { logger.atInfo() .addKeyValue(HOSTNAME_KEY, handler.getHostname()) .addKeyValue("port", handler.getProtocolPort()) .log("Creating and starting connection."); final Reactor reactor = reactorProvider.createReactor(connectionId, handler.getMaxFrameSize()); connection = reactor.connectionToHost(handler.getHostname(), handler.getProtocolPort(), handler); final ReactorExceptionHandler reactorExceptionHandler = new ReactorExceptionHandler(); final Duration timeoutDivided = connectionOptions.getRetry().getTryTimeout().dividedBy(2); final Duration pendingTasksDuration = ClientConstants.SERVER_BUSY_WAIT_TIME.compareTo(timeoutDivided) < 0 ? ClientConstants.SERVER_BUSY_WAIT_TIME : timeoutDivided; final Scheduler scheduler = Schedulers.newSingle("reactor-executor"); executor = new ReactorExecutor(reactor, scheduler, connectionId, reactorExceptionHandler, pendingTasksDuration, connectionOptions.getFullyQualifiedNamespace()); final Mono<Void> executorCloseMono = Mono.defer(() -> { synchronized (this) { return executor.closeAsync(); } }); reactorProvider.getReactorDispatcher().getShutdownSignal() .flatMap(signal -> { reactorExceptionHandler.onConnectionShutdown(signal); return executorCloseMono; }) .onErrorResume(error -> { reactorExceptionHandler.onConnectionError(error); return executorCloseMono; }) .subscribe(); executor.start(); } return connection; } private final class ReactorExceptionHandler extends AmqpExceptionHandler { private ReactorExceptionHandler() { super(); } @Override public void onConnectionError(Throwable exception) { logger.atInfo() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionError, Starting new reactor", exception); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onReactorError: Disposing."); closeAsync(new AmqpShutdownSignal(false, false, "onReactorError: " + exception.toString())) .subscribe(); } } @Override void onConnectionShutdown(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown. Shutting down."); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown: disposing."); closeAsync(shutdownSignal).subscribe(); } } } private static final class SessionSubscription { private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AmqpSession session; private final Disposable subscription; private SessionSubscription(AmqpSession session, Disposable subscription) { this.session = session; this.subscription = subscription; } private AmqpSession getSession() { return session; } private void dispose() { if (isDisposed.getAndSet(true)) { return; } if (session instanceof ReactorSession) { ((ReactorSession) session).closeAsync("Closing session.", null, true) .subscribe(); } else { session.dispose(); } subscription.dispose(); } private Mono<Void> isClosed() { if (session instanceof ReactorSession) { return ((ReactorSession) session).isClosed(); } else { return Mono.empty(); } } } }
If we don't make emitShutDownSignal later than close RRchannel, the CBS channel may not send the link:DETACH frame and frame sequence may like: ![cbs-closing-frame-logic3](https://user-images.githubusercontent.com/97088315/162151409-67de04ea-dd7e-48cc-80d3-4a31182df10b.png)
Mono<Void> closeAsync(AmqpShutdownSignal shutdownSignal) { final Mono<Void> emitShutDownSignalOperation = Mono.fromRunnable(() -> { addShutdownSignal(logger.atInfo(), shutdownSignal).log("Disposing of ReactorConnection."); final Sinks.EmitResult result = shutdownSignalSink.tryEmitValue(shutdownSignal); if (result.isFailure()) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(EMIT_RESULT_KEY, result) .log("Unable to emit shutdown signal."); } }); final Mono<Void> cbsCloseOperation; if (cbsChannelProcessor != null) { cbsCloseOperation = Mono.fromRunnable(() -> cbsChannelProcessor.dispose()); } else { cbsCloseOperation = Mono.empty(); } final Mono<Void> managementNodeCloseOperations = Mono.when( Flux.fromStream(managementNodes.values().stream()).flatMap(node -> node.closeAsync())); final Mono<Void> closeReactor = Mono.fromRunnable(() -> { logger.verbose("Scheduling closeConnection work."); final ReactorDispatcher dispatcher = reactorProvider.getReactorDispatcher(); if (dispatcher != null) { try { dispatcher.invoke(() -> closeConnectionWork()); } catch (IOException e) { logger.warning("IOException while scheduling closeConnection work. Manually disposing.", e); closeConnectionWork(); } catch (RejectedExecutionException e) { logger.info("Could not schedule closeConnection work. Manually disposing."); closeConnectionWork(); } } else { closeConnectionWork(); } }); return Mono.whenDelayError( cbsCloseOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed CBS node.")), managementNodeCloseOperations.doFinally(signalType -> logger.atVerbose() .log("Closed management nodes."))) .then(emitShutDownSignalOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Emitted connection shutdown signal. "))) .then(closeReactor.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed reactor dispatcher."))) .then(isClosedMono.asMono()); }
.then(emitShutDownSignalOperation.doFinally(signalType ->
Mono<Void> closeAsync(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal).log("Disposing of ReactorConnection."); final Sinks.EmitResult result = shutdownSignalSink.tryEmitValue(shutdownSignal); if (result.isFailure()) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(EMIT_RESULT_KEY, result) .log("Unable to emit shutdown signal."); } final Mono<Void> cbsCloseOperation; if (cbsChannelProcessor != null) { cbsCloseOperation = cbsChannelProcessor.flatMap(channel -> channel.closeAsync()); } else { cbsCloseOperation = Mono.empty(); } final Mono<Void> managementNodeCloseOperations = Mono.when( Flux.fromStream(managementNodes.values().stream()).flatMap(node -> node.closeAsync())); final Mono<Void> closeReactor = Mono.fromRunnable(() -> { logger.verbose("Scheduling closeConnection work."); final ReactorDispatcher dispatcher = reactorProvider.getReactorDispatcher(); if (dispatcher != null) { try { dispatcher.invoke(() -> closeConnectionWork()); } catch (IOException e) { logger.warning("IOException while scheduling closeConnection work. Manually disposing.", e); closeConnectionWork(); } catch (RejectedExecutionException e) { logger.info("Could not schedule closeConnection work. Manually disposing."); closeConnectionWork(); } } else { closeConnectionWork(); } }); return Mono.whenDelayError( cbsCloseOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed CBS node.")), managementNodeCloseOperations.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed management nodes."))) .then(closeReactor.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed reactor dispatcher."))) .then(isClosedMono.asMono()); }
class ReactorConnection implements AmqpConnection { private static final String CBS_SESSION_NAME = "cbs-session"; private static final String CBS_ADDRESS = "$cbs"; private static final String CBS_LINK_NAME = "cbs"; private static final String MANAGEMENT_SESSION_NAME = "mgmt-session"; private static final String MANAGEMENT_ADDRESS = "$management"; private static final String MANAGEMENT_LINK_NAME = "mgmt"; private final ClientLogger logger; private final ConcurrentMap<String, SessionSubscription> sessionMap = new ConcurrentHashMap<>(); private final ConcurrentHashMap<String, AmqpManagementNode> managementNodes = new ConcurrentHashMap<>(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.One<AmqpShutdownSignal> shutdownSignalSink = Sinks.one(); private final Flux<AmqpEndpointState> endpointStates; private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final String connectionId; private final Mono<Connection> connectionMono; private final ConnectionHandler handler; private final ReactorHandlerProvider handlerProvider; private final TokenManagerProvider tokenManagerProvider; private final MessageSerializer messageSerializer; private final ConnectionOptions connectionOptions; private final ReactorProvider reactorProvider; private final AmqpRetryPolicy retryPolicy; private final SenderSettleMode senderSettleMode; private final ReceiverSettleMode receiverSettleMode; private final Duration operationTimeout; private final Composite subscriptions; private ReactorExecutor executor; private volatile ClaimsBasedSecurityChannel cbsChannel; private volatile AmqpChannelProcessor<RequestResponseChannel> cbsChannelProcessor; private volatile Connection connection; /** * Creates a new AMQP connection that uses proton-j. * * @param connectionId Identifier for the connection. * @param connectionOptions A set of options used to create the AMQP connection. * @param reactorProvider Provides proton-j Reactor instances. * @param handlerProvider Provides {@link BaseHandler} to listen to proton-j reactor events. * @param tokenManagerProvider Provides the appropriate token manager to authorize with CBS node. * @param messageSerializer Serializer to translate objects to and from proton-j {@link Message messages}. * @param senderSettleMode to set as {@link SenderSettleMode} on sender. * @param receiverSettleMode to set as {@link ReceiverSettleMode} on receiver. */ public ReactorConnection(String connectionId, ConnectionOptions connectionOptions, ReactorProvider reactorProvider, ReactorHandlerProvider handlerProvider, TokenManagerProvider tokenManagerProvider, MessageSerializer messageSerializer, SenderSettleMode senderSettleMode, ReceiverSettleMode receiverSettleMode) { this.connectionOptions = connectionOptions; this.reactorProvider = reactorProvider; this.connectionId = connectionId; this.logger = new ClientLogger(ReactorConnection.class, createContextWithConnectionId(connectionId)); this.handlerProvider = handlerProvider; this.tokenManagerProvider = Objects.requireNonNull(tokenManagerProvider, "'tokenManagerProvider' cannot be null."); this.messageSerializer = messageSerializer; this.handler = handlerProvider.createConnectionHandler(connectionId, connectionOptions); this.retryPolicy = RetryUtil.getRetryPolicy(connectionOptions.getRetry()); this.operationTimeout = connectionOptions.getRetry().getTryTimeout(); this.senderSettleMode = senderSettleMode; this.receiverSettleMode = receiverSettleMode; this.connectionMono = Mono.fromCallable(this::getOrCreateConnection) .flatMap(reactorConnection -> { final Mono<AmqpEndpointState> activeEndpoint = getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(operationTimeout, Mono.error(new AmqpException(true, String.format( "Connection '%s' not opened within AmqpRetryOptions.tryTimeout(): %s", connectionId, operationTimeout), handler.getErrorContext()))); return activeEndpoint.thenReturn(reactorConnection); }) .doOnError(error -> { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already disposed: Error occurred while connection was starting.", error); } else { closeAsync(new AmqpShutdownSignal(false, false, "Error occurred while connection was starting. Error: " + error)).subscribe(); } }); this.endpointStates = this.handler.getEndpointStates() .takeUntilOther(shutdownSignalSink.asMono()) .map(state -> { logger.verbose("State {}", state); return AmqpEndpointStateUtil.getConnectionState(state); }) .onErrorResume(error -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to error."); return closeAsync(new AmqpShutdownSignal(false, false, error.getMessage())).then(Mono.error(error)); } else { return Mono.error(error); } }) .doOnComplete(() -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to connection close."); closeAsync(new AmqpShutdownSignal(false, false, "Connection handler closed.")).subscribe(); } }) .cache(1); this.subscriptions = Disposables.composite(this.endpointStates.subscribe()); } /** * {@inheritDoc} */ @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } /** * Gets the shutdown signal associated with this connection. When it emits, the underlying connection is closed. * * @return Shutdown signals associated with this connection. It emits a signal when the underlying connection is * closed. */ @Override public Flux<AmqpShutdownSignal> getShutdownSignals() { return shutdownSignalSink.asMono().cache().flux(); } @Override public Mono<AmqpManagementNode> getManagementNode(String entityPath) { return Mono.defer(() -> { if (isDisposed()) { return Mono.error(logger.atError() .addKeyValue(ENTITY_PATH_KEY, entityPath) .log(Exceptions.propagate(new IllegalStateException("Connection is disposed. Cannot get management instance.")))); } final AmqpManagementNode existing = managementNodes.get(entityPath); if (existing != null) { return Mono.just(existing); } final TokenManager tokenManager = new AzureTokenManagerProvider(connectionOptions.getAuthorizationType(), connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getAuthorizationScope()) .getTokenManager(getClaimsBasedSecurityNode(), entityPath); return tokenManager.authorize().thenReturn(managementNodes.compute(entityPath, (key, current) -> { if (current != null) { logger.info("A management node exists already, returning it."); tokenManager.close(); return current; } final String sessionName = entityPath + "-" + MANAGEMENT_SESSION_NAME; final String linkName = entityPath + "-" + MANAGEMENT_LINK_NAME; final String address = entityPath + "/" + MANAGEMENT_ADDRESS; logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .addKeyValue("address", address) .log("Creating management node."); final AmqpChannelProcessor<RequestResponseChannel> requestResponseChannel = createRequestResponseChannel(sessionName, linkName, address); return new ManagementChannel(requestResponseChannel, getFullyQualifiedNamespace(), entityPath, tokenManager); })); }); } /** * {@inheritDoc} */ @Override public Mono<ClaimsBasedSecurityNode> getClaimsBasedSecurityNode() { return connectionMono.then(Mono.fromCallable(() -> getOrCreateCBSNode())); } @Override public String getId() { return connectionId; } /** * {@inheritDoc} */ @Override public String getFullyQualifiedNamespace() { return handler.getHostname(); } /** * {@inheritDoc} */ @Override public int getMaxFrameSize() { return handler.getMaxFrameSize(); } /** * {@inheritDoc} */ @Override public Map<String, Object> getConnectionProperties() { return handler.getConnectionProperties(); } /** * {@inheritDoc} */ @Override public Mono<AmqpSession> createSession(String sessionName) { return connectionMono.map(connection -> { final SessionSubscription sessionSubscription = sessionMap.computeIfAbsent(sessionName, key -> { final SessionHandler sessionHandler = handlerProvider.createSessionHandler(connectionId, getFullyQualifiedNamespace(), key, connectionOptions.getRetry().getTryTimeout()); final Session session = connection.session(); BaseHandler.setHandler(session, sessionHandler); final AmqpSession amqpSession = createSession(key, session, sessionHandler); final Disposable subscription = amqpSession.getEndpointStates() .subscribe(state -> { }, error -> { if (isDisposed.get()) { return; } logger.atInfo() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Error occurred. Removing and disposing session", error); removeSession(key); }, () -> { if (isDisposed.get()) { return; } logger.atVerbose() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Complete. Removing and disposing session."); removeSession(key); }); return new SessionSubscription(amqpSession, subscription); }); return sessionSubscription; }).flatMap(sessionSubscription -> { final Mono<AmqpEndpointState> activeSession = sessionSubscription.getSession().getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(retryPolicy.getRetryOptions().getTryTimeout(), Mono.error(new AmqpException(true, String.format("connectionId[%s] sessionName[%s] Timeout waiting for session to be active.", connectionId, sessionName), handler.getErrorContext()))); return activeSession.thenReturn(sessionSubscription.getSession()); }); } /** * Creates a new AMQP session with the given parameters. * * @param sessionName Name of the AMQP session. * @param session The reactor session associated with this session. * @param handler Session handler for the reactor session. * * @return A new instance of AMQP session. */ protected AmqpSession createSession(String sessionName, Session session, SessionHandler handler) { return new ReactorSession(this, session, handler, sessionName, reactorProvider, handlerProvider, getClaimsBasedSecurityNode(), tokenManagerProvider, messageSerializer, connectionOptions.getRetry()); } /** * {@inheritDoc} */ @Override public boolean removeSession(String sessionName) { if (sessionName == null) { return false; } final SessionSubscription removed = sessionMap.remove(sessionName); if (removed != null) { removed.dispose(); } return removed != null; } @Override public boolean isDisposed() { return isDisposed.get(); } /** * {@inheritDoc} */ @Override public void dispose() { final Duration timeout = operationTimeout.plus(operationTimeout); closeAsync().block(timeout); } /** * Gets the active AMQP connection for this instance. * * @return The AMQP connection. * * @throws AmqpException if the {@link Connection} was not transitioned to an active state within the given * {@link AmqpRetryOptions */ protected Mono<Connection> getReactorConnection() { return connectionMono; } /** * Creates a bidirectional link between the message broker and the client. * * @param sessionName Name of the session. * @param linkName Name of the link. * @param entityPath Address to the message broker. * * @return A new {@link RequestResponseChannel} to communicate with the message broker. */ protected AmqpChannelProcessor<RequestResponseChannel> createRequestResponseChannel(String sessionName, String linkName, String entityPath) { Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); final Flux<RequestResponseChannel> createChannel = createSession(sessionName) .cast(ReactorSession.class) .map(reactorSession -> new RequestResponseChannel(this, getId(), getFullyQualifiedNamespace(), linkName, entityPath, reactorSession.session(), connectionOptions.getRetry(), handlerProvider, reactorProvider, messageSerializer, senderSettleMode, receiverSettleMode)) .doOnNext(e -> { logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .log("Emitting new response channel."); }) .repeat(); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(ENTITY_PATH_KEY, entityPath); return createChannel .subscribeWith(new AmqpChannelProcessor<>(getFullyQualifiedNamespace(), channel -> channel.getEndpointStates(), retryPolicy, loggingContext)); } @Override public Mono<Void> closeAsync() { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already closed. Not disposing again."); return isClosedMono.asMono(); } return closeAsync(new AmqpShutdownSignal(false, true, "Disposed by client.")); } private synchronized void closeConnectionWork() { if (connection == null) { isClosedMono.emitEmpty((signalType, emitResult) -> { addSignalTypeAndResult(logger.atInfo(), signalType, emitResult) .log("Unable to complete closeMono."); return false; }); return; } connection.close(); handler.close(); final ArrayList<Mono<Void>> closingSessions = new ArrayList<>(); sessionMap.values().forEach(link -> closingSessions.add(link.isClosed())); final Mono<Void> closedExecutor = executor != null ? Mono.defer(() -> { synchronized (this) { logger.info("Closing executor."); return executor.closeAsync(); } }) : Mono.empty(); final Mono<Void> closeSessionAndExecutorMono = Mono.when(closingSessions) .timeout(operationTimeout) .onErrorResume(error -> { logger.info("Timed out waiting for all sessions to close."); return Mono.empty(); }) .then(closedExecutor) .then(Mono.fromRunnable(() -> { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result) .log("Unable to emit connection closed signal."); return false; }); subscriptions.dispose(); })); subscriptions.add(closeSessionAndExecutorMono.subscribe()); } private synchronized ClaimsBasedSecurityNode getOrCreateCBSNode() { if (cbsChannel == null) { logger.info("Setting CBS channel."); cbsChannelProcessor = createRequestResponseChannel(CBS_SESSION_NAME, CBS_LINK_NAME, CBS_ADDRESS); cbsChannel = new ClaimsBasedSecurityChannel( cbsChannelProcessor, connectionOptions.getTokenCredential(), connectionOptions.getAuthorizationType(), connectionOptions.getRetry()); } return cbsChannel; } private synchronized Connection getOrCreateConnection() throws IOException { if (connection == null) { logger.atInfo() .addKeyValue(HOSTNAME_KEY, handler.getHostname()) .addKeyValue("port", handler.getProtocolPort()) .log("Creating and starting connection."); final Reactor reactor = reactorProvider.createReactor(connectionId, handler.getMaxFrameSize()); connection = reactor.connectionToHost(handler.getHostname(), handler.getProtocolPort(), handler); final ReactorExceptionHandler reactorExceptionHandler = new ReactorExceptionHandler(); final Duration timeoutDivided = connectionOptions.getRetry().getTryTimeout().dividedBy(2); final Duration pendingTasksDuration = ClientConstants.SERVER_BUSY_WAIT_TIME.compareTo(timeoutDivided) < 0 ? ClientConstants.SERVER_BUSY_WAIT_TIME : timeoutDivided; final Scheduler scheduler = Schedulers.newSingle("reactor-executor"); executor = new ReactorExecutor(reactor, scheduler, connectionId, reactorExceptionHandler, pendingTasksDuration, connectionOptions.getFullyQualifiedNamespace()); final Mono<Void> executorCloseMono = Mono.defer(() -> { synchronized (this) { return executor.closeAsync(); } }); reactorProvider.getReactorDispatcher().getShutdownSignal() .flatMap(signal -> { reactorExceptionHandler.onConnectionShutdown(signal); return executorCloseMono; }) .onErrorResume(error -> { reactorExceptionHandler.onConnectionError(error); return executorCloseMono; }) .subscribe(); executor.start(); } return connection; } private final class ReactorExceptionHandler extends AmqpExceptionHandler { private ReactorExceptionHandler() { super(); } @Override public void onConnectionError(Throwable exception) { logger.atInfo() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionError, Starting new reactor", exception); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onReactorError: Disposing."); closeAsync(new AmqpShutdownSignal(false, false, "onReactorError: " + exception.toString())) .subscribe(); } } @Override void onConnectionShutdown(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown. Shutting down."); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown: disposing."); closeAsync(shutdownSignal).subscribe(); } } } private static final class SessionSubscription { private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AmqpSession session; private final Disposable subscription; private SessionSubscription(AmqpSession session, Disposable subscription) { this.session = session; this.subscription = subscription; } private AmqpSession getSession() { return session; } private void dispose() { if (isDisposed.getAndSet(true)) { return; } if (session instanceof ReactorSession) { ((ReactorSession) session).closeAsync("Closing session.", null, true) .subscribe(); } else { session.dispose(); } subscription.dispose(); } private Mono<Void> isClosed() { if (session instanceof ReactorSession) { return ((ReactorSession) session).isClosed(); } else { return Mono.empty(); } } } }
class ReactorConnection implements AmqpConnection { private static final String CBS_SESSION_NAME = "cbs-session"; private static final String CBS_ADDRESS = "$cbs"; private static final String CBS_LINK_NAME = "cbs"; private static final String MANAGEMENT_SESSION_NAME = "mgmt-session"; private static final String MANAGEMENT_ADDRESS = "$management"; private static final String MANAGEMENT_LINK_NAME = "mgmt"; private final ClientLogger logger; private final ConcurrentMap<String, SessionSubscription> sessionMap = new ConcurrentHashMap<>(); private final ConcurrentHashMap<String, AmqpManagementNode> managementNodes = new ConcurrentHashMap<>(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.One<AmqpShutdownSignal> shutdownSignalSink = Sinks.one(); private final Flux<AmqpEndpointState> endpointStates; private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final String connectionId; private final Mono<Connection> connectionMono; private final ConnectionHandler handler; private final ReactorHandlerProvider handlerProvider; private final TokenManagerProvider tokenManagerProvider; private final MessageSerializer messageSerializer; private final ConnectionOptions connectionOptions; private final ReactorProvider reactorProvider; private final AmqpRetryPolicy retryPolicy; private final SenderSettleMode senderSettleMode; private final ReceiverSettleMode receiverSettleMode; private final Duration operationTimeout; private final Composite subscriptions; private ReactorExecutor executor; private volatile ClaimsBasedSecurityChannel cbsChannel; private volatile AmqpChannelProcessor<RequestResponseChannel> cbsChannelProcessor; private volatile Connection connection; /** * Creates a new AMQP connection that uses proton-j. * * @param connectionId Identifier for the connection. * @param connectionOptions A set of options used to create the AMQP connection. * @param reactorProvider Provides proton-j Reactor instances. * @param handlerProvider Provides {@link BaseHandler} to listen to proton-j reactor events. * @param tokenManagerProvider Provides the appropriate token manager to authorize with CBS node. * @param messageSerializer Serializer to translate objects to and from proton-j {@link Message messages}. * @param senderSettleMode to set as {@link SenderSettleMode} on sender. * @param receiverSettleMode to set as {@link ReceiverSettleMode} on receiver. */ public ReactorConnection(String connectionId, ConnectionOptions connectionOptions, ReactorProvider reactorProvider, ReactorHandlerProvider handlerProvider, TokenManagerProvider tokenManagerProvider, MessageSerializer messageSerializer, SenderSettleMode senderSettleMode, ReceiverSettleMode receiverSettleMode) { this.connectionOptions = connectionOptions; this.reactorProvider = reactorProvider; this.connectionId = connectionId; this.logger = new ClientLogger(ReactorConnection.class, createContextWithConnectionId(connectionId)); this.handlerProvider = handlerProvider; this.tokenManagerProvider = Objects.requireNonNull(tokenManagerProvider, "'tokenManagerProvider' cannot be null."); this.messageSerializer = messageSerializer; this.handler = handlerProvider.createConnectionHandler(connectionId, connectionOptions); this.retryPolicy = RetryUtil.getRetryPolicy(connectionOptions.getRetry()); this.operationTimeout = connectionOptions.getRetry().getTryTimeout(); this.senderSettleMode = senderSettleMode; this.receiverSettleMode = receiverSettleMode; this.connectionMono = Mono.fromCallable(this::getOrCreateConnection) .flatMap(reactorConnection -> { final Mono<AmqpEndpointState> activeEndpoint = getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(operationTimeout, Mono.error(() -> new AmqpException(true, String.format( "Connection '%s' not opened within AmqpRetryOptions.tryTimeout(): %s", connectionId, operationTimeout), handler.getErrorContext()))); return activeEndpoint.thenReturn(reactorConnection); }) .doOnError(error -> { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already disposed: Error occurred while connection was starting.", error); } else { closeAsync(new AmqpShutdownSignal(false, false, "Error occurred while connection was starting. Error: " + error)).subscribe(); } }); this.endpointStates = this.handler.getEndpointStates() .takeUntilOther(shutdownSignalSink.asMono()) .map(state -> { logger.verbose("State {}", state); return AmqpEndpointStateUtil.getConnectionState(state); }) .onErrorResume(error -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to error."); return closeAsync(new AmqpShutdownSignal(false, false, error.getMessage())).then(Mono.error(error)); } else { return Mono.error(error); } }) .doOnComplete(() -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to connection close."); closeAsync(new AmqpShutdownSignal(false, false, "Connection handler closed.")).subscribe(); } }) .cache(1); this.subscriptions = Disposables.composite(this.endpointStates.subscribe()); } /** * {@inheritDoc} */ @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } /** * Gets the shutdown signal associated with this connection. When it emits, the underlying connection is closed. * * @return Shutdown signals associated with this connection. It emits a signal when the underlying connection is * closed. */ @Override public Flux<AmqpShutdownSignal> getShutdownSignals() { return shutdownSignalSink.asMono().cache().flux(); } @Override public Mono<AmqpManagementNode> getManagementNode(String entityPath) { return Mono.defer(() -> { if (isDisposed()) { return Mono.error(logger.atError() .addKeyValue(ENTITY_PATH_KEY, entityPath) .log(Exceptions.propagate(new IllegalStateException("Connection is disposed. Cannot get management instance.")))); } final AmqpManagementNode existing = managementNodes.get(entityPath); if (existing != null) { return Mono.just(existing); } final TokenManager tokenManager = new AzureTokenManagerProvider(connectionOptions.getAuthorizationType(), connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getAuthorizationScope()) .getTokenManager(getClaimsBasedSecurityNode(), entityPath); return tokenManager.authorize().thenReturn(managementNodes.compute(entityPath, (key, current) -> { if (current != null) { logger.info("A management node exists already, returning it."); tokenManager.close(); return current; } final String sessionName = entityPath + "-" + MANAGEMENT_SESSION_NAME; final String linkName = entityPath + "-" + MANAGEMENT_LINK_NAME; final String address = entityPath + "/" + MANAGEMENT_ADDRESS; logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .addKeyValue("address", address) .log("Creating management node."); final AmqpChannelProcessor<RequestResponseChannel> requestResponseChannel = createRequestResponseChannel(sessionName, linkName, address); return new ManagementChannel(requestResponseChannel, getFullyQualifiedNamespace(), entityPath, tokenManager); })); }); } /** * {@inheritDoc} */ @Override public Mono<ClaimsBasedSecurityNode> getClaimsBasedSecurityNode() { return connectionMono.then(Mono.fromCallable(() -> getOrCreateCBSNode())); } @Override public String getId() { return connectionId; } /** * {@inheritDoc} */ @Override public String getFullyQualifiedNamespace() { return handler.getHostname(); } /** * {@inheritDoc} */ @Override public int getMaxFrameSize() { return handler.getMaxFrameSize(); } /** * {@inheritDoc} */ @Override public Map<String, Object> getConnectionProperties() { return handler.getConnectionProperties(); } /** * {@inheritDoc} */ @Override public Mono<AmqpSession> createSession(String sessionName) { return connectionMono.map(connection -> { final SessionSubscription sessionSubscription = sessionMap.computeIfAbsent(sessionName, key -> { final SessionHandler sessionHandler = handlerProvider.createSessionHandler(connectionId, getFullyQualifiedNamespace(), key, connectionOptions.getRetry().getTryTimeout()); final Session session = connection.session(); BaseHandler.setHandler(session, sessionHandler); final AmqpSession amqpSession = createSession(key, session, sessionHandler); final Disposable subscription = amqpSession.getEndpointStates() .subscribe(state -> { }, error -> { if (isDisposed.get()) { return; } logger.atInfo() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Error occurred. Removing and disposing session", error); removeSession(key); }, () -> { if (isDisposed.get()) { return; } logger.atVerbose() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Complete. Removing and disposing session."); removeSession(key); }); return new SessionSubscription(amqpSession, subscription); }); return sessionSubscription; }).flatMap(sessionSubscription -> { final Mono<AmqpEndpointState> activeSession = sessionSubscription.getSession().getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(retryPolicy.getRetryOptions().getTryTimeout(), Mono.error(() -> new AmqpException(true, String.format("connectionId[%s] sessionName[%s] Timeout waiting for session to be active.", connectionId, sessionName), handler.getErrorContext()))); return activeSession.thenReturn(sessionSubscription.getSession()); }); } /** * Creates a new AMQP session with the given parameters. * * @param sessionName Name of the AMQP session. * @param session The reactor session associated with this session. * @param handler Session handler for the reactor session. * * @return A new instance of AMQP session. */ protected AmqpSession createSession(String sessionName, Session session, SessionHandler handler) { return new ReactorSession(this, session, handler, sessionName, reactorProvider, handlerProvider, getClaimsBasedSecurityNode(), tokenManagerProvider, messageSerializer, connectionOptions.getRetry()); } /** * {@inheritDoc} */ @Override public boolean removeSession(String sessionName) { if (sessionName == null) { return false; } final SessionSubscription removed = sessionMap.remove(sessionName); if (removed != null) { removed.dispose(); } return removed != null; } @Override public boolean isDisposed() { return isDisposed.get(); } /** * {@inheritDoc} */ @Override public void dispose() { final Duration timeout = operationTimeout.plus(operationTimeout); closeAsync().block(timeout); } /** * Gets the active AMQP connection for this instance. * * @return The AMQP connection. * * @throws AmqpException if the {@link Connection} was not transitioned to an active state within the given * {@link AmqpRetryOptions */ protected Mono<Connection> getReactorConnection() { return connectionMono; } /** * Creates a bidirectional link between the message broker and the client. * * @param sessionName Name of the session. * @param linkName Name of the link. * @param entityPath Address to the message broker. * * @return A new {@link RequestResponseChannel} to communicate with the message broker. */ protected AmqpChannelProcessor<RequestResponseChannel> createRequestResponseChannel(String sessionName, String linkName, String entityPath) { Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); final Flux<RequestResponseChannel> createChannel = createSession(sessionName) .cast(ReactorSession.class) .map(reactorSession -> new RequestResponseChannel(this, getId(), getFullyQualifiedNamespace(), linkName, entityPath, reactorSession.session(), connectionOptions.getRetry(), handlerProvider, reactorProvider, messageSerializer, senderSettleMode, receiverSettleMode)) .doOnNext(e -> { logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .log("Emitting new response channel."); }) .repeat(() -> !this.isDisposed()); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(ENTITY_PATH_KEY, entityPath); return createChannel .subscribeWith(new AmqpChannelProcessor<>(getFullyQualifiedNamespace(), channel -> channel.getEndpointStates(), retryPolicy, loggingContext)); } @Override public Mono<Void> closeAsync() { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already closed. Not disposing again."); return isClosedMono.asMono(); } return closeAsync(new AmqpShutdownSignal(false, true, "Disposed by client.")); } private synchronized void closeConnectionWork() { if (connection == null) { isClosedMono.emitEmpty((signalType, emitResult) -> { addSignalTypeAndResult(logger.atInfo(), signalType, emitResult) .log("Unable to complete closeMono."); return false; }); return; } connection.close(); handler.close(); final ArrayList<Mono<Void>> closingSessions = new ArrayList<>(); sessionMap.values().forEach(link -> closingSessions.add(link.isClosed())); final Mono<Void> closedExecutor = executor != null ? Mono.defer(() -> { synchronized (this) { logger.info("Closing executor."); return executor.closeAsync(); } }) : Mono.empty(); final Mono<Void> closeSessionAndExecutorMono = Mono.when(closingSessions) .timeout(operationTimeout) .onErrorResume(error -> { logger.info("Timed out waiting for all sessions to close."); return Mono.empty(); }) .then(closedExecutor) .then(Mono.fromRunnable(() -> { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result) .log("Unable to emit connection closed signal."); return false; }); subscriptions.dispose(); })); subscriptions.add(closeSessionAndExecutorMono.subscribe()); } private synchronized ClaimsBasedSecurityNode getOrCreateCBSNode() { if (cbsChannel == null) { logger.info("Setting CBS channel."); cbsChannelProcessor = createRequestResponseChannel(CBS_SESSION_NAME, CBS_LINK_NAME, CBS_ADDRESS); cbsChannel = new ClaimsBasedSecurityChannel( cbsChannelProcessor, connectionOptions.getTokenCredential(), connectionOptions.getAuthorizationType(), connectionOptions.getRetry()); } return cbsChannel; } private synchronized Connection getOrCreateConnection() throws IOException { if (connection == null) { logger.atInfo() .addKeyValue(HOSTNAME_KEY, handler.getHostname()) .addKeyValue("port", handler.getProtocolPort()) .log("Creating and starting connection."); final Reactor reactor = reactorProvider.createReactor(connectionId, handler.getMaxFrameSize()); connection = reactor.connectionToHost(handler.getHostname(), handler.getProtocolPort(), handler); final ReactorExceptionHandler reactorExceptionHandler = new ReactorExceptionHandler(); final Duration timeoutDivided = connectionOptions.getRetry().getTryTimeout().dividedBy(2); final Duration pendingTasksDuration = ClientConstants.SERVER_BUSY_WAIT_TIME.compareTo(timeoutDivided) < 0 ? ClientConstants.SERVER_BUSY_WAIT_TIME : timeoutDivided; final Scheduler scheduler = Schedulers.newSingle("reactor-executor"); executor = new ReactorExecutor(reactor, scheduler, connectionId, reactorExceptionHandler, pendingTasksDuration, connectionOptions.getFullyQualifiedNamespace()); final Mono<Void> executorCloseMono = Mono.defer(() -> { synchronized (this) { return executor.closeAsync(); } }); reactorProvider.getReactorDispatcher().getShutdownSignal() .flatMap(signal -> { reactorExceptionHandler.onConnectionShutdown(signal); return executorCloseMono; }) .onErrorResume(error -> { reactorExceptionHandler.onConnectionError(error); return executorCloseMono; }) .subscribe(); executor.start(); } return connection; } private final class ReactorExceptionHandler extends AmqpExceptionHandler { private ReactorExceptionHandler() { super(); } @Override public void onConnectionError(Throwable exception) { logger.atInfo() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionError, Starting new reactor", exception); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onReactorError: Disposing."); closeAsync(new AmqpShutdownSignal(false, false, "onReactorError: " + exception.toString())) .subscribe(); } } @Override void onConnectionShutdown(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown. Shutting down."); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown: disposing."); closeAsync(shutdownSignal).subscribe(); } } } private static final class SessionSubscription { private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AmqpSession session; private final Disposable subscription; private SessionSubscription(AmqpSession session, Disposable subscription) { this.session = session; this.subscription = subscription; } private AmqpSession getSession() { return session; } private void dispose() { if (isDisposed.getAndSet(true)) { return; } if (session instanceof ReactorSession) { ((ReactorSession) session).closeAsync("Closing session.", null, true) .subscribe(); } else { session.dispose(); } subscription.dispose(); } private Mono<Void> isClosed() { if (session instanceof ReactorSession) { return ((ReactorSession) session).isClosed(); } else { return Mono.empty(); } } } }
We should choose the option 1 to make sure CBS node is closed before closing sessions.
Mono<Void> closeAsync(AmqpShutdownSignal shutdownSignal) { final Mono<Void> emitShutDownSignalOperation = Mono.fromRunnable(() -> { addShutdownSignal(logger.atInfo(), shutdownSignal).log("Disposing of ReactorConnection."); final Sinks.EmitResult result = shutdownSignalSink.tryEmitValue(shutdownSignal); if (result.isFailure()) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(EMIT_RESULT_KEY, result) .log("Unable to emit shutdown signal."); } }); final Mono<Void> cbsCloseOperation; if (cbsChannelProcessor != null) { cbsCloseOperation = Mono.fromRunnable(() -> cbsChannelProcessor.dispose()); } else { cbsCloseOperation = Mono.empty(); } final Mono<Void> managementNodeCloseOperations = Mono.when( Flux.fromStream(managementNodes.values().stream()).flatMap(node -> node.closeAsync())); final Mono<Void> closeReactor = Mono.fromRunnable(() -> { logger.verbose("Scheduling closeConnection work."); final ReactorDispatcher dispatcher = reactorProvider.getReactorDispatcher(); if (dispatcher != null) { try { dispatcher.invoke(() -> closeConnectionWork()); } catch (IOException e) { logger.warning("IOException while scheduling closeConnection work. Manually disposing.", e); closeConnectionWork(); } catch (RejectedExecutionException e) { logger.info("Could not schedule closeConnection work. Manually disposing."); closeConnectionWork(); } } else { closeConnectionWork(); } }); return Mono.whenDelayError( cbsCloseOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed CBS node.")), managementNodeCloseOperations.doFinally(signalType -> logger.atVerbose() .log("Closed management nodes."))) .then(emitShutDownSignalOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Emitted connection shutdown signal. "))) .then(closeReactor.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed reactor dispatcher."))) .then(isClosedMono.asMono()); }
cbsCloseOperation = Mono.fromRunnable(() -> cbsChannelProcessor.dispose());
Mono<Void> closeAsync(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal).log("Disposing of ReactorConnection."); final Sinks.EmitResult result = shutdownSignalSink.tryEmitValue(shutdownSignal); if (result.isFailure()) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(EMIT_RESULT_KEY, result) .log("Unable to emit shutdown signal."); } final Mono<Void> cbsCloseOperation; if (cbsChannelProcessor != null) { cbsCloseOperation = cbsChannelProcessor.flatMap(channel -> channel.closeAsync()); } else { cbsCloseOperation = Mono.empty(); } final Mono<Void> managementNodeCloseOperations = Mono.when( Flux.fromStream(managementNodes.values().stream()).flatMap(node -> node.closeAsync())); final Mono<Void> closeReactor = Mono.fromRunnable(() -> { logger.verbose("Scheduling closeConnection work."); final ReactorDispatcher dispatcher = reactorProvider.getReactorDispatcher(); if (dispatcher != null) { try { dispatcher.invoke(() -> closeConnectionWork()); } catch (IOException e) { logger.warning("IOException while scheduling closeConnection work. Manually disposing.", e); closeConnectionWork(); } catch (RejectedExecutionException e) { logger.info("Could not schedule closeConnection work. Manually disposing."); closeConnectionWork(); } } else { closeConnectionWork(); } }); return Mono.whenDelayError( cbsCloseOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed CBS node.")), managementNodeCloseOperations.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed management nodes."))) .then(closeReactor.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed reactor dispatcher."))) .then(isClosedMono.asMono()); }
class ReactorConnection implements AmqpConnection { private static final String CBS_SESSION_NAME = "cbs-session"; private static final String CBS_ADDRESS = "$cbs"; private static final String CBS_LINK_NAME = "cbs"; private static final String MANAGEMENT_SESSION_NAME = "mgmt-session"; private static final String MANAGEMENT_ADDRESS = "$management"; private static final String MANAGEMENT_LINK_NAME = "mgmt"; private final ClientLogger logger; private final ConcurrentMap<String, SessionSubscription> sessionMap = new ConcurrentHashMap<>(); private final ConcurrentHashMap<String, AmqpManagementNode> managementNodes = new ConcurrentHashMap<>(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.One<AmqpShutdownSignal> shutdownSignalSink = Sinks.one(); private final Flux<AmqpEndpointState> endpointStates; private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final String connectionId; private final Mono<Connection> connectionMono; private final ConnectionHandler handler; private final ReactorHandlerProvider handlerProvider; private final TokenManagerProvider tokenManagerProvider; private final MessageSerializer messageSerializer; private final ConnectionOptions connectionOptions; private final ReactorProvider reactorProvider; private final AmqpRetryPolicy retryPolicy; private final SenderSettleMode senderSettleMode; private final ReceiverSettleMode receiverSettleMode; private final Duration operationTimeout; private final Composite subscriptions; private ReactorExecutor executor; private volatile ClaimsBasedSecurityChannel cbsChannel; private volatile AmqpChannelProcessor<RequestResponseChannel> cbsChannelProcessor; private volatile Connection connection; /** * Creates a new AMQP connection that uses proton-j. * * @param connectionId Identifier for the connection. * @param connectionOptions A set of options used to create the AMQP connection. * @param reactorProvider Provides proton-j Reactor instances. * @param handlerProvider Provides {@link BaseHandler} to listen to proton-j reactor events. * @param tokenManagerProvider Provides the appropriate token manager to authorize with CBS node. * @param messageSerializer Serializer to translate objects to and from proton-j {@link Message messages}. * @param senderSettleMode to set as {@link SenderSettleMode} on sender. * @param receiverSettleMode to set as {@link ReceiverSettleMode} on receiver. */ public ReactorConnection(String connectionId, ConnectionOptions connectionOptions, ReactorProvider reactorProvider, ReactorHandlerProvider handlerProvider, TokenManagerProvider tokenManagerProvider, MessageSerializer messageSerializer, SenderSettleMode senderSettleMode, ReceiverSettleMode receiverSettleMode) { this.connectionOptions = connectionOptions; this.reactorProvider = reactorProvider; this.connectionId = connectionId; this.logger = new ClientLogger(ReactorConnection.class, createContextWithConnectionId(connectionId)); this.handlerProvider = handlerProvider; this.tokenManagerProvider = Objects.requireNonNull(tokenManagerProvider, "'tokenManagerProvider' cannot be null."); this.messageSerializer = messageSerializer; this.handler = handlerProvider.createConnectionHandler(connectionId, connectionOptions); this.retryPolicy = RetryUtil.getRetryPolicy(connectionOptions.getRetry()); this.operationTimeout = connectionOptions.getRetry().getTryTimeout(); this.senderSettleMode = senderSettleMode; this.receiverSettleMode = receiverSettleMode; this.connectionMono = Mono.fromCallable(this::getOrCreateConnection) .flatMap(reactorConnection -> { final Mono<AmqpEndpointState> activeEndpoint = getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(operationTimeout, Mono.error(new AmqpException(true, String.format( "Connection '%s' not opened within AmqpRetryOptions.tryTimeout(): %s", connectionId, operationTimeout), handler.getErrorContext()))); return activeEndpoint.thenReturn(reactorConnection); }) .doOnError(error -> { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already disposed: Error occurred while connection was starting.", error); } else { closeAsync(new AmqpShutdownSignal(false, false, "Error occurred while connection was starting. Error: " + error)).subscribe(); } }); this.endpointStates = this.handler.getEndpointStates() .takeUntilOther(shutdownSignalSink.asMono()) .map(state -> { logger.verbose("State {}", state); return AmqpEndpointStateUtil.getConnectionState(state); }) .onErrorResume(error -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to error."); return closeAsync(new AmqpShutdownSignal(false, false, error.getMessage())).then(Mono.error(error)); } else { return Mono.error(error); } }) .doOnComplete(() -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to connection close."); closeAsync(new AmqpShutdownSignal(false, false, "Connection handler closed.")).subscribe(); } }) .cache(1); this.subscriptions = Disposables.composite(this.endpointStates.subscribe()); } /** * {@inheritDoc} */ @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } /** * Gets the shutdown signal associated with this connection. When it emits, the underlying connection is closed. * * @return Shutdown signals associated with this connection. It emits a signal when the underlying connection is * closed. */ @Override public Flux<AmqpShutdownSignal> getShutdownSignals() { return shutdownSignalSink.asMono().cache().flux(); } @Override public Mono<AmqpManagementNode> getManagementNode(String entityPath) { return Mono.defer(() -> { if (isDisposed()) { return Mono.error(logger.atError() .addKeyValue(ENTITY_PATH_KEY, entityPath) .log(Exceptions.propagate(new IllegalStateException("Connection is disposed. Cannot get management instance.")))); } final AmqpManagementNode existing = managementNodes.get(entityPath); if (existing != null) { return Mono.just(existing); } final TokenManager tokenManager = new AzureTokenManagerProvider(connectionOptions.getAuthorizationType(), connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getAuthorizationScope()) .getTokenManager(getClaimsBasedSecurityNode(), entityPath); return tokenManager.authorize().thenReturn(managementNodes.compute(entityPath, (key, current) -> { if (current != null) { logger.info("A management node exists already, returning it."); tokenManager.close(); return current; } final String sessionName = entityPath + "-" + MANAGEMENT_SESSION_NAME; final String linkName = entityPath + "-" + MANAGEMENT_LINK_NAME; final String address = entityPath + "/" + MANAGEMENT_ADDRESS; logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .addKeyValue("address", address) .log("Creating management node."); final AmqpChannelProcessor<RequestResponseChannel> requestResponseChannel = createRequestResponseChannel(sessionName, linkName, address); return new ManagementChannel(requestResponseChannel, getFullyQualifiedNamespace(), entityPath, tokenManager); })); }); } /** * {@inheritDoc} */ @Override public Mono<ClaimsBasedSecurityNode> getClaimsBasedSecurityNode() { return connectionMono.then(Mono.fromCallable(() -> getOrCreateCBSNode())); } @Override public String getId() { return connectionId; } /** * {@inheritDoc} */ @Override public String getFullyQualifiedNamespace() { return handler.getHostname(); } /** * {@inheritDoc} */ @Override public int getMaxFrameSize() { return handler.getMaxFrameSize(); } /** * {@inheritDoc} */ @Override public Map<String, Object> getConnectionProperties() { return handler.getConnectionProperties(); } /** * {@inheritDoc} */ @Override public Mono<AmqpSession> createSession(String sessionName) { return connectionMono.map(connection -> { final SessionSubscription sessionSubscription = sessionMap.computeIfAbsent(sessionName, key -> { final SessionHandler sessionHandler = handlerProvider.createSessionHandler(connectionId, getFullyQualifiedNamespace(), key, connectionOptions.getRetry().getTryTimeout()); final Session session = connection.session(); BaseHandler.setHandler(session, sessionHandler); final AmqpSession amqpSession = createSession(key, session, sessionHandler); final Disposable subscription = amqpSession.getEndpointStates() .subscribe(state -> { }, error -> { if (isDisposed.get()) { return; } logger.atInfo() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Error occurred. Removing and disposing session", error); removeSession(key); }, () -> { if (isDisposed.get()) { return; } logger.atVerbose() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Complete. Removing and disposing session."); removeSession(key); }); return new SessionSubscription(amqpSession, subscription); }); return sessionSubscription; }).flatMap(sessionSubscription -> { final Mono<AmqpEndpointState> activeSession = sessionSubscription.getSession().getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(retryPolicy.getRetryOptions().getTryTimeout(), Mono.error(new AmqpException(true, String.format("connectionId[%s] sessionName[%s] Timeout waiting for session to be active.", connectionId, sessionName), handler.getErrorContext()))); return activeSession.thenReturn(sessionSubscription.getSession()); }); } /** * Creates a new AMQP session with the given parameters. * * @param sessionName Name of the AMQP session. * @param session The reactor session associated with this session. * @param handler Session handler for the reactor session. * * @return A new instance of AMQP session. */ protected AmqpSession createSession(String sessionName, Session session, SessionHandler handler) { return new ReactorSession(this, session, handler, sessionName, reactorProvider, handlerProvider, getClaimsBasedSecurityNode(), tokenManagerProvider, messageSerializer, connectionOptions.getRetry()); } /** * {@inheritDoc} */ @Override public boolean removeSession(String sessionName) { if (sessionName == null) { return false; } final SessionSubscription removed = sessionMap.remove(sessionName); if (removed != null) { removed.dispose(); } return removed != null; } @Override public boolean isDisposed() { return isDisposed.get(); } /** * {@inheritDoc} */ @Override public void dispose() { final Duration timeout = operationTimeout.plus(operationTimeout); closeAsync().block(timeout); } /** * Gets the active AMQP connection for this instance. * * @return The AMQP connection. * * @throws AmqpException if the {@link Connection} was not transitioned to an active state within the given * {@link AmqpRetryOptions */ protected Mono<Connection> getReactorConnection() { return connectionMono; } /** * Creates a bidirectional link between the message broker and the client. * * @param sessionName Name of the session. * @param linkName Name of the link. * @param entityPath Address to the message broker. * * @return A new {@link RequestResponseChannel} to communicate with the message broker. */ protected AmqpChannelProcessor<RequestResponseChannel> createRequestResponseChannel(String sessionName, String linkName, String entityPath) { Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); final Flux<RequestResponseChannel> createChannel = createSession(sessionName) .cast(ReactorSession.class) .map(reactorSession -> new RequestResponseChannel(this, getId(), getFullyQualifiedNamespace(), linkName, entityPath, reactorSession.session(), connectionOptions.getRetry(), handlerProvider, reactorProvider, messageSerializer, senderSettleMode, receiverSettleMode)) .doOnNext(e -> { logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .log("Emitting new response channel."); }) .repeat(); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(ENTITY_PATH_KEY, entityPath); return createChannel .subscribeWith(new AmqpChannelProcessor<>(getFullyQualifiedNamespace(), channel -> channel.getEndpointStates(), retryPolicy, loggingContext)); } @Override public Mono<Void> closeAsync() { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already closed. Not disposing again."); return isClosedMono.asMono(); } return closeAsync(new AmqpShutdownSignal(false, true, "Disposed by client.")); } private synchronized void closeConnectionWork() { if (connection == null) { isClosedMono.emitEmpty((signalType, emitResult) -> { addSignalTypeAndResult(logger.atInfo(), signalType, emitResult) .log("Unable to complete closeMono."); return false; }); return; } connection.close(); handler.close(); final ArrayList<Mono<Void>> closingSessions = new ArrayList<>(); sessionMap.values().forEach(link -> closingSessions.add(link.isClosed())); final Mono<Void> closedExecutor = executor != null ? Mono.defer(() -> { synchronized (this) { logger.info("Closing executor."); return executor.closeAsync(); } }) : Mono.empty(); final Mono<Void> closeSessionAndExecutorMono = Mono.when(closingSessions) .timeout(operationTimeout) .onErrorResume(error -> { logger.info("Timed out waiting for all sessions to close."); return Mono.empty(); }) .then(closedExecutor) .then(Mono.fromRunnable(() -> { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result) .log("Unable to emit connection closed signal."); return false; }); subscriptions.dispose(); })); subscriptions.add(closeSessionAndExecutorMono.subscribe()); } private synchronized ClaimsBasedSecurityNode getOrCreateCBSNode() { if (cbsChannel == null) { logger.info("Setting CBS channel."); cbsChannelProcessor = createRequestResponseChannel(CBS_SESSION_NAME, CBS_LINK_NAME, CBS_ADDRESS); cbsChannel = new ClaimsBasedSecurityChannel( cbsChannelProcessor, connectionOptions.getTokenCredential(), connectionOptions.getAuthorizationType(), connectionOptions.getRetry()); } return cbsChannel; } private synchronized Connection getOrCreateConnection() throws IOException { if (connection == null) { logger.atInfo() .addKeyValue(HOSTNAME_KEY, handler.getHostname()) .addKeyValue("port", handler.getProtocolPort()) .log("Creating and starting connection."); final Reactor reactor = reactorProvider.createReactor(connectionId, handler.getMaxFrameSize()); connection = reactor.connectionToHost(handler.getHostname(), handler.getProtocolPort(), handler); final ReactorExceptionHandler reactorExceptionHandler = new ReactorExceptionHandler(); final Duration timeoutDivided = connectionOptions.getRetry().getTryTimeout().dividedBy(2); final Duration pendingTasksDuration = ClientConstants.SERVER_BUSY_WAIT_TIME.compareTo(timeoutDivided) < 0 ? ClientConstants.SERVER_BUSY_WAIT_TIME : timeoutDivided; final Scheduler scheduler = Schedulers.newSingle("reactor-executor"); executor = new ReactorExecutor(reactor, scheduler, connectionId, reactorExceptionHandler, pendingTasksDuration, connectionOptions.getFullyQualifiedNamespace()); final Mono<Void> executorCloseMono = Mono.defer(() -> { synchronized (this) { return executor.closeAsync(); } }); reactorProvider.getReactorDispatcher().getShutdownSignal() .flatMap(signal -> { reactorExceptionHandler.onConnectionShutdown(signal); return executorCloseMono; }) .onErrorResume(error -> { reactorExceptionHandler.onConnectionError(error); return executorCloseMono; }) .subscribe(); executor.start(); } return connection; } private final class ReactorExceptionHandler extends AmqpExceptionHandler { private ReactorExceptionHandler() { super(); } @Override public void onConnectionError(Throwable exception) { logger.atInfo() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionError, Starting new reactor", exception); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onReactorError: Disposing."); closeAsync(new AmqpShutdownSignal(false, false, "onReactorError: " + exception.toString())) .subscribe(); } } @Override void onConnectionShutdown(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown. Shutting down."); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown: disposing."); closeAsync(shutdownSignal).subscribe(); } } } private static final class SessionSubscription { private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AmqpSession session; private final Disposable subscription; private SessionSubscription(AmqpSession session, Disposable subscription) { this.session = session; this.subscription = subscription; } private AmqpSession getSession() { return session; } private void dispose() { if (isDisposed.getAndSet(true)) { return; } if (session instanceof ReactorSession) { ((ReactorSession) session).closeAsync("Closing session.", null, true) .subscribe(); } else { session.dispose(); } subscription.dispose(); } private Mono<Void> isClosed() { if (session instanceof ReactorSession) { return ((ReactorSession) session).isClosed(); } else { return Mono.empty(); } } } }
class ReactorConnection implements AmqpConnection { private static final String CBS_SESSION_NAME = "cbs-session"; private static final String CBS_ADDRESS = "$cbs"; private static final String CBS_LINK_NAME = "cbs"; private static final String MANAGEMENT_SESSION_NAME = "mgmt-session"; private static final String MANAGEMENT_ADDRESS = "$management"; private static final String MANAGEMENT_LINK_NAME = "mgmt"; private final ClientLogger logger; private final ConcurrentMap<String, SessionSubscription> sessionMap = new ConcurrentHashMap<>(); private final ConcurrentHashMap<String, AmqpManagementNode> managementNodes = new ConcurrentHashMap<>(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.One<AmqpShutdownSignal> shutdownSignalSink = Sinks.one(); private final Flux<AmqpEndpointState> endpointStates; private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final String connectionId; private final Mono<Connection> connectionMono; private final ConnectionHandler handler; private final ReactorHandlerProvider handlerProvider; private final TokenManagerProvider tokenManagerProvider; private final MessageSerializer messageSerializer; private final ConnectionOptions connectionOptions; private final ReactorProvider reactorProvider; private final AmqpRetryPolicy retryPolicy; private final SenderSettleMode senderSettleMode; private final ReceiverSettleMode receiverSettleMode; private final Duration operationTimeout; private final Composite subscriptions; private ReactorExecutor executor; private volatile ClaimsBasedSecurityChannel cbsChannel; private volatile AmqpChannelProcessor<RequestResponseChannel> cbsChannelProcessor; private volatile Connection connection; /** * Creates a new AMQP connection that uses proton-j. * * @param connectionId Identifier for the connection. * @param connectionOptions A set of options used to create the AMQP connection. * @param reactorProvider Provides proton-j Reactor instances. * @param handlerProvider Provides {@link BaseHandler} to listen to proton-j reactor events. * @param tokenManagerProvider Provides the appropriate token manager to authorize with CBS node. * @param messageSerializer Serializer to translate objects to and from proton-j {@link Message messages}. * @param senderSettleMode to set as {@link SenderSettleMode} on sender. * @param receiverSettleMode to set as {@link ReceiverSettleMode} on receiver. */ public ReactorConnection(String connectionId, ConnectionOptions connectionOptions, ReactorProvider reactorProvider, ReactorHandlerProvider handlerProvider, TokenManagerProvider tokenManagerProvider, MessageSerializer messageSerializer, SenderSettleMode senderSettleMode, ReceiverSettleMode receiverSettleMode) { this.connectionOptions = connectionOptions; this.reactorProvider = reactorProvider; this.connectionId = connectionId; this.logger = new ClientLogger(ReactorConnection.class, createContextWithConnectionId(connectionId)); this.handlerProvider = handlerProvider; this.tokenManagerProvider = Objects.requireNonNull(tokenManagerProvider, "'tokenManagerProvider' cannot be null."); this.messageSerializer = messageSerializer; this.handler = handlerProvider.createConnectionHandler(connectionId, connectionOptions); this.retryPolicy = RetryUtil.getRetryPolicy(connectionOptions.getRetry()); this.operationTimeout = connectionOptions.getRetry().getTryTimeout(); this.senderSettleMode = senderSettleMode; this.receiverSettleMode = receiverSettleMode; this.connectionMono = Mono.fromCallable(this::getOrCreateConnection) .flatMap(reactorConnection -> { final Mono<AmqpEndpointState> activeEndpoint = getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(operationTimeout, Mono.error(() -> new AmqpException(true, String.format( "Connection '%s' not opened within AmqpRetryOptions.tryTimeout(): %s", connectionId, operationTimeout), handler.getErrorContext()))); return activeEndpoint.thenReturn(reactorConnection); }) .doOnError(error -> { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already disposed: Error occurred while connection was starting.", error); } else { closeAsync(new AmqpShutdownSignal(false, false, "Error occurred while connection was starting. Error: " + error)).subscribe(); } }); this.endpointStates = this.handler.getEndpointStates() .takeUntilOther(shutdownSignalSink.asMono()) .map(state -> { logger.verbose("State {}", state); return AmqpEndpointStateUtil.getConnectionState(state); }) .onErrorResume(error -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to error."); return closeAsync(new AmqpShutdownSignal(false, false, error.getMessage())).then(Mono.error(error)); } else { return Mono.error(error); } }) .doOnComplete(() -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to connection close."); closeAsync(new AmqpShutdownSignal(false, false, "Connection handler closed.")).subscribe(); } }) .cache(1); this.subscriptions = Disposables.composite(this.endpointStates.subscribe()); } /** * {@inheritDoc} */ @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } /** * Gets the shutdown signal associated with this connection. When it emits, the underlying connection is closed. * * @return Shutdown signals associated with this connection. It emits a signal when the underlying connection is * closed. */ @Override public Flux<AmqpShutdownSignal> getShutdownSignals() { return shutdownSignalSink.asMono().cache().flux(); } @Override public Mono<AmqpManagementNode> getManagementNode(String entityPath) { return Mono.defer(() -> { if (isDisposed()) { return Mono.error(logger.atError() .addKeyValue(ENTITY_PATH_KEY, entityPath) .log(Exceptions.propagate(new IllegalStateException("Connection is disposed. Cannot get management instance.")))); } final AmqpManagementNode existing = managementNodes.get(entityPath); if (existing != null) { return Mono.just(existing); } final TokenManager tokenManager = new AzureTokenManagerProvider(connectionOptions.getAuthorizationType(), connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getAuthorizationScope()) .getTokenManager(getClaimsBasedSecurityNode(), entityPath); return tokenManager.authorize().thenReturn(managementNodes.compute(entityPath, (key, current) -> { if (current != null) { logger.info("A management node exists already, returning it."); tokenManager.close(); return current; } final String sessionName = entityPath + "-" + MANAGEMENT_SESSION_NAME; final String linkName = entityPath + "-" + MANAGEMENT_LINK_NAME; final String address = entityPath + "/" + MANAGEMENT_ADDRESS; logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .addKeyValue("address", address) .log("Creating management node."); final AmqpChannelProcessor<RequestResponseChannel> requestResponseChannel = createRequestResponseChannel(sessionName, linkName, address); return new ManagementChannel(requestResponseChannel, getFullyQualifiedNamespace(), entityPath, tokenManager); })); }); } /** * {@inheritDoc} */ @Override public Mono<ClaimsBasedSecurityNode> getClaimsBasedSecurityNode() { return connectionMono.then(Mono.fromCallable(() -> getOrCreateCBSNode())); } @Override public String getId() { return connectionId; } /** * {@inheritDoc} */ @Override public String getFullyQualifiedNamespace() { return handler.getHostname(); } /** * {@inheritDoc} */ @Override public int getMaxFrameSize() { return handler.getMaxFrameSize(); } /** * {@inheritDoc} */ @Override public Map<String, Object> getConnectionProperties() { return handler.getConnectionProperties(); } /** * {@inheritDoc} */ @Override public Mono<AmqpSession> createSession(String sessionName) { return connectionMono.map(connection -> { final SessionSubscription sessionSubscription = sessionMap.computeIfAbsent(sessionName, key -> { final SessionHandler sessionHandler = handlerProvider.createSessionHandler(connectionId, getFullyQualifiedNamespace(), key, connectionOptions.getRetry().getTryTimeout()); final Session session = connection.session(); BaseHandler.setHandler(session, sessionHandler); final AmqpSession amqpSession = createSession(key, session, sessionHandler); final Disposable subscription = amqpSession.getEndpointStates() .subscribe(state -> { }, error -> { if (isDisposed.get()) { return; } logger.atInfo() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Error occurred. Removing and disposing session", error); removeSession(key); }, () -> { if (isDisposed.get()) { return; } logger.atVerbose() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Complete. Removing and disposing session."); removeSession(key); }); return new SessionSubscription(amqpSession, subscription); }); return sessionSubscription; }).flatMap(sessionSubscription -> { final Mono<AmqpEndpointState> activeSession = sessionSubscription.getSession().getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(retryPolicy.getRetryOptions().getTryTimeout(), Mono.error(() -> new AmqpException(true, String.format("connectionId[%s] sessionName[%s] Timeout waiting for session to be active.", connectionId, sessionName), handler.getErrorContext()))); return activeSession.thenReturn(sessionSubscription.getSession()); }); } /** * Creates a new AMQP session with the given parameters. * * @param sessionName Name of the AMQP session. * @param session The reactor session associated with this session. * @param handler Session handler for the reactor session. * * @return A new instance of AMQP session. */ protected AmqpSession createSession(String sessionName, Session session, SessionHandler handler) { return new ReactorSession(this, session, handler, sessionName, reactorProvider, handlerProvider, getClaimsBasedSecurityNode(), tokenManagerProvider, messageSerializer, connectionOptions.getRetry()); } /** * {@inheritDoc} */ @Override public boolean removeSession(String sessionName) { if (sessionName == null) { return false; } final SessionSubscription removed = sessionMap.remove(sessionName); if (removed != null) { removed.dispose(); } return removed != null; } @Override public boolean isDisposed() { return isDisposed.get(); } /** * {@inheritDoc} */ @Override public void dispose() { final Duration timeout = operationTimeout.plus(operationTimeout); closeAsync().block(timeout); } /** * Gets the active AMQP connection for this instance. * * @return The AMQP connection. * * @throws AmqpException if the {@link Connection} was not transitioned to an active state within the given * {@link AmqpRetryOptions */ protected Mono<Connection> getReactorConnection() { return connectionMono; } /** * Creates a bidirectional link between the message broker and the client. * * @param sessionName Name of the session. * @param linkName Name of the link. * @param entityPath Address to the message broker. * * @return A new {@link RequestResponseChannel} to communicate with the message broker. */ protected AmqpChannelProcessor<RequestResponseChannel> createRequestResponseChannel(String sessionName, String linkName, String entityPath) { Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); final Flux<RequestResponseChannel> createChannel = createSession(sessionName) .cast(ReactorSession.class) .map(reactorSession -> new RequestResponseChannel(this, getId(), getFullyQualifiedNamespace(), linkName, entityPath, reactorSession.session(), connectionOptions.getRetry(), handlerProvider, reactorProvider, messageSerializer, senderSettleMode, receiverSettleMode)) .doOnNext(e -> { logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .log("Emitting new response channel."); }) .repeat(() -> !this.isDisposed()); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(ENTITY_PATH_KEY, entityPath); return createChannel .subscribeWith(new AmqpChannelProcessor<>(getFullyQualifiedNamespace(), channel -> channel.getEndpointStates(), retryPolicy, loggingContext)); } @Override public Mono<Void> closeAsync() { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already closed. Not disposing again."); return isClosedMono.asMono(); } return closeAsync(new AmqpShutdownSignal(false, true, "Disposed by client.")); } private synchronized void closeConnectionWork() { if (connection == null) { isClosedMono.emitEmpty((signalType, emitResult) -> { addSignalTypeAndResult(logger.atInfo(), signalType, emitResult) .log("Unable to complete closeMono."); return false; }); return; } connection.close(); handler.close(); final ArrayList<Mono<Void>> closingSessions = new ArrayList<>(); sessionMap.values().forEach(link -> closingSessions.add(link.isClosed())); final Mono<Void> closedExecutor = executor != null ? Mono.defer(() -> { synchronized (this) { logger.info("Closing executor."); return executor.closeAsync(); } }) : Mono.empty(); final Mono<Void> closeSessionAndExecutorMono = Mono.when(closingSessions) .timeout(operationTimeout) .onErrorResume(error -> { logger.info("Timed out waiting for all sessions to close."); return Mono.empty(); }) .then(closedExecutor) .then(Mono.fromRunnable(() -> { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result) .log("Unable to emit connection closed signal."); return false; }); subscriptions.dispose(); })); subscriptions.add(closeSessionAndExecutorMono.subscribe()); } private synchronized ClaimsBasedSecurityNode getOrCreateCBSNode() { if (cbsChannel == null) { logger.info("Setting CBS channel."); cbsChannelProcessor = createRequestResponseChannel(CBS_SESSION_NAME, CBS_LINK_NAME, CBS_ADDRESS); cbsChannel = new ClaimsBasedSecurityChannel( cbsChannelProcessor, connectionOptions.getTokenCredential(), connectionOptions.getAuthorizationType(), connectionOptions.getRetry()); } return cbsChannel; } private synchronized Connection getOrCreateConnection() throws IOException { if (connection == null) { logger.atInfo() .addKeyValue(HOSTNAME_KEY, handler.getHostname()) .addKeyValue("port", handler.getProtocolPort()) .log("Creating and starting connection."); final Reactor reactor = reactorProvider.createReactor(connectionId, handler.getMaxFrameSize()); connection = reactor.connectionToHost(handler.getHostname(), handler.getProtocolPort(), handler); final ReactorExceptionHandler reactorExceptionHandler = new ReactorExceptionHandler(); final Duration timeoutDivided = connectionOptions.getRetry().getTryTimeout().dividedBy(2); final Duration pendingTasksDuration = ClientConstants.SERVER_BUSY_WAIT_TIME.compareTo(timeoutDivided) < 0 ? ClientConstants.SERVER_BUSY_WAIT_TIME : timeoutDivided; final Scheduler scheduler = Schedulers.newSingle("reactor-executor"); executor = new ReactorExecutor(reactor, scheduler, connectionId, reactorExceptionHandler, pendingTasksDuration, connectionOptions.getFullyQualifiedNamespace()); final Mono<Void> executorCloseMono = Mono.defer(() -> { synchronized (this) { return executor.closeAsync(); } }); reactorProvider.getReactorDispatcher().getShutdownSignal() .flatMap(signal -> { reactorExceptionHandler.onConnectionShutdown(signal); return executorCloseMono; }) .onErrorResume(error -> { reactorExceptionHandler.onConnectionError(error); return executorCloseMono; }) .subscribe(); executor.start(); } return connection; } private final class ReactorExceptionHandler extends AmqpExceptionHandler { private ReactorExceptionHandler() { super(); } @Override public void onConnectionError(Throwable exception) { logger.atInfo() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionError, Starting new reactor", exception); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onReactorError: Disposing."); closeAsync(new AmqpShutdownSignal(false, false, "onReactorError: " + exception.toString())) .subscribe(); } } @Override void onConnectionShutdown(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown. Shutting down."); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown: disposing."); closeAsync(shutdownSignal).subscribe(); } } } private static final class SessionSubscription { private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AmqpSession session; private final Disposable subscription; private SessionSubscription(AmqpSession session, Disposable subscription) { this.session = session; this.subscription = subscription; } private AmqpSession getSession() { return session; } private void dispose() { if (isDisposed.getAndSet(true)) { return; } if (session instanceof ReactorSession) { ((ReactorSession) session).closeAsync("Closing session.", null, true) .subscribe(); } else { session.dispose(); } subscription.dispose(); } private Mono<Void> isClosed() { if (session instanceof ReactorSession) { return ((ReactorSession) session).isClosed(); } else { return Mono.empty(); } } } }
If we put `emitShutDownSignal` ealier than `cbsCloseOperation`, because we add `.takeUntilOther(shutdownSignalSink.asMono())`, an "Cannot subscribe" exception will be thrown. Here we make `emitShutDownSignal` later than `cbsCloseOperation`. By this change, it will only request one extra AMQP channel (uninitialized) and then closed.
Mono<Void> closeAsync(AmqpShutdownSignal shutdownSignal) { final Mono<Void> emitShutDownSignalOperation = Mono.fromRunnable(() -> { addShutdownSignal(logger.atInfo(), shutdownSignal).log("Disposing of ReactorConnection."); final Sinks.EmitResult result = shutdownSignalSink.tryEmitValue(shutdownSignal); if (result.isFailure()) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(EMIT_RESULT_KEY, result) .log("Unable to emit shutdown signal."); } }); final Mono<Void> cbsCloseOperation; if (cbsChannelProcessor != null) { cbsCloseOperation = Mono.fromRunnable(() -> cbsChannelProcessor.dispose()); } else { cbsCloseOperation = Mono.empty(); } final Mono<Void> managementNodeCloseOperations = Mono.when( Flux.fromStream(managementNodes.values().stream()).flatMap(node -> node.closeAsync())); final Mono<Void> closeReactor = Mono.fromRunnable(() -> { logger.verbose("Scheduling closeConnection work."); final ReactorDispatcher dispatcher = reactorProvider.getReactorDispatcher(); if (dispatcher != null) { try { dispatcher.invoke(() -> closeConnectionWork()); } catch (IOException e) { logger.warning("IOException while scheduling closeConnection work. Manually disposing.", e); closeConnectionWork(); } catch (RejectedExecutionException e) { logger.info("Could not schedule closeConnection work. Manually disposing."); closeConnectionWork(); } } else { closeConnectionWork(); } }); return Mono.whenDelayError( cbsCloseOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed CBS node.")), managementNodeCloseOperations.doFinally(signalType -> logger.atVerbose() .log("Closed management nodes."))) .then(emitShutDownSignalOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Emitted connection shutdown signal. "))) .then(closeReactor.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed reactor dispatcher."))) .then(isClosedMono.asMono()); }
.then(emitShutDownSignalOperation.doFinally(signalType ->
Mono<Void> closeAsync(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal).log("Disposing of ReactorConnection."); final Sinks.EmitResult result = shutdownSignalSink.tryEmitValue(shutdownSignal); if (result.isFailure()) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(EMIT_RESULT_KEY, result) .log("Unable to emit shutdown signal."); } final Mono<Void> cbsCloseOperation; if (cbsChannelProcessor != null) { cbsCloseOperation = cbsChannelProcessor.flatMap(channel -> channel.closeAsync()); } else { cbsCloseOperation = Mono.empty(); } final Mono<Void> managementNodeCloseOperations = Mono.when( Flux.fromStream(managementNodes.values().stream()).flatMap(node -> node.closeAsync())); final Mono<Void> closeReactor = Mono.fromRunnable(() -> { logger.verbose("Scheduling closeConnection work."); final ReactorDispatcher dispatcher = reactorProvider.getReactorDispatcher(); if (dispatcher != null) { try { dispatcher.invoke(() -> closeConnectionWork()); } catch (IOException e) { logger.warning("IOException while scheduling closeConnection work. Manually disposing.", e); closeConnectionWork(); } catch (RejectedExecutionException e) { logger.info("Could not schedule closeConnection work. Manually disposing."); closeConnectionWork(); } } else { closeConnectionWork(); } }); return Mono.whenDelayError( cbsCloseOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed CBS node.")), managementNodeCloseOperations.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed management nodes."))) .then(closeReactor.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed reactor dispatcher."))) .then(isClosedMono.asMono()); }
class ReactorConnection implements AmqpConnection { private static final String CBS_SESSION_NAME = "cbs-session"; private static final String CBS_ADDRESS = "$cbs"; private static final String CBS_LINK_NAME = "cbs"; private static final String MANAGEMENT_SESSION_NAME = "mgmt-session"; private static final String MANAGEMENT_ADDRESS = "$management"; private static final String MANAGEMENT_LINK_NAME = "mgmt"; private final ClientLogger logger; private final ConcurrentMap<String, SessionSubscription> sessionMap = new ConcurrentHashMap<>(); private final ConcurrentHashMap<String, AmqpManagementNode> managementNodes = new ConcurrentHashMap<>(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.One<AmqpShutdownSignal> shutdownSignalSink = Sinks.one(); private final Flux<AmqpEndpointState> endpointStates; private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final String connectionId; private final Mono<Connection> connectionMono; private final ConnectionHandler handler; private final ReactorHandlerProvider handlerProvider; private final TokenManagerProvider tokenManagerProvider; private final MessageSerializer messageSerializer; private final ConnectionOptions connectionOptions; private final ReactorProvider reactorProvider; private final AmqpRetryPolicy retryPolicy; private final SenderSettleMode senderSettleMode; private final ReceiverSettleMode receiverSettleMode; private final Duration operationTimeout; private final Composite subscriptions; private ReactorExecutor executor; private volatile ClaimsBasedSecurityChannel cbsChannel; private volatile AmqpChannelProcessor<RequestResponseChannel> cbsChannelProcessor; private volatile Connection connection; /** * Creates a new AMQP connection that uses proton-j. * * @param connectionId Identifier for the connection. * @param connectionOptions A set of options used to create the AMQP connection. * @param reactorProvider Provides proton-j Reactor instances. * @param handlerProvider Provides {@link BaseHandler} to listen to proton-j reactor events. * @param tokenManagerProvider Provides the appropriate token manager to authorize with CBS node. * @param messageSerializer Serializer to translate objects to and from proton-j {@link Message messages}. * @param senderSettleMode to set as {@link SenderSettleMode} on sender. * @param receiverSettleMode to set as {@link ReceiverSettleMode} on receiver. */ public ReactorConnection(String connectionId, ConnectionOptions connectionOptions, ReactorProvider reactorProvider, ReactorHandlerProvider handlerProvider, TokenManagerProvider tokenManagerProvider, MessageSerializer messageSerializer, SenderSettleMode senderSettleMode, ReceiverSettleMode receiverSettleMode) { this.connectionOptions = connectionOptions; this.reactorProvider = reactorProvider; this.connectionId = connectionId; this.logger = new ClientLogger(ReactorConnection.class, createContextWithConnectionId(connectionId)); this.handlerProvider = handlerProvider; this.tokenManagerProvider = Objects.requireNonNull(tokenManagerProvider, "'tokenManagerProvider' cannot be null."); this.messageSerializer = messageSerializer; this.handler = handlerProvider.createConnectionHandler(connectionId, connectionOptions); this.retryPolicy = RetryUtil.getRetryPolicy(connectionOptions.getRetry()); this.operationTimeout = connectionOptions.getRetry().getTryTimeout(); this.senderSettleMode = senderSettleMode; this.receiverSettleMode = receiverSettleMode; this.connectionMono = Mono.fromCallable(this::getOrCreateConnection) .flatMap(reactorConnection -> { final Mono<AmqpEndpointState> activeEndpoint = getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(operationTimeout, Mono.error(new AmqpException(true, String.format( "Connection '%s' not opened within AmqpRetryOptions.tryTimeout(): %s", connectionId, operationTimeout), handler.getErrorContext()))); return activeEndpoint.thenReturn(reactorConnection); }) .doOnError(error -> { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already disposed: Error occurred while connection was starting.", error); } else { closeAsync(new AmqpShutdownSignal(false, false, "Error occurred while connection was starting. Error: " + error)).subscribe(); } }); this.endpointStates = this.handler.getEndpointStates() .takeUntilOther(shutdownSignalSink.asMono()) .map(state -> { logger.verbose("State {}", state); return AmqpEndpointStateUtil.getConnectionState(state); }) .onErrorResume(error -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to error."); return closeAsync(new AmqpShutdownSignal(false, false, error.getMessage())).then(Mono.error(error)); } else { return Mono.error(error); } }) .doOnComplete(() -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to connection close."); closeAsync(new AmqpShutdownSignal(false, false, "Connection handler closed.")).subscribe(); } }) .cache(1); this.subscriptions = Disposables.composite(this.endpointStates.subscribe()); } /** * {@inheritDoc} */ @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } /** * Gets the shutdown signal associated with this connection. When it emits, the underlying connection is closed. * * @return Shutdown signals associated with this connection. It emits a signal when the underlying connection is * closed. */ @Override public Flux<AmqpShutdownSignal> getShutdownSignals() { return shutdownSignalSink.asMono().cache().flux(); } @Override public Mono<AmqpManagementNode> getManagementNode(String entityPath) { return Mono.defer(() -> { if (isDisposed()) { return Mono.error(logger.atError() .addKeyValue(ENTITY_PATH_KEY, entityPath) .log(Exceptions.propagate(new IllegalStateException("Connection is disposed. Cannot get management instance.")))); } final AmqpManagementNode existing = managementNodes.get(entityPath); if (existing != null) { return Mono.just(existing); } final TokenManager tokenManager = new AzureTokenManagerProvider(connectionOptions.getAuthorizationType(), connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getAuthorizationScope()) .getTokenManager(getClaimsBasedSecurityNode(), entityPath); return tokenManager.authorize().thenReturn(managementNodes.compute(entityPath, (key, current) -> { if (current != null) { logger.info("A management node exists already, returning it."); tokenManager.close(); return current; } final String sessionName = entityPath + "-" + MANAGEMENT_SESSION_NAME; final String linkName = entityPath + "-" + MANAGEMENT_LINK_NAME; final String address = entityPath + "/" + MANAGEMENT_ADDRESS; logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .addKeyValue("address", address) .log("Creating management node."); final AmqpChannelProcessor<RequestResponseChannel> requestResponseChannel = createRequestResponseChannel(sessionName, linkName, address); return new ManagementChannel(requestResponseChannel, getFullyQualifiedNamespace(), entityPath, tokenManager); })); }); } /** * {@inheritDoc} */ @Override public Mono<ClaimsBasedSecurityNode> getClaimsBasedSecurityNode() { return connectionMono.then(Mono.fromCallable(() -> getOrCreateCBSNode())); } @Override public String getId() { return connectionId; } /** * {@inheritDoc} */ @Override public String getFullyQualifiedNamespace() { return handler.getHostname(); } /** * {@inheritDoc} */ @Override public int getMaxFrameSize() { return handler.getMaxFrameSize(); } /** * {@inheritDoc} */ @Override public Map<String, Object> getConnectionProperties() { return handler.getConnectionProperties(); } /** * {@inheritDoc} */ @Override public Mono<AmqpSession> createSession(String sessionName) { return connectionMono.map(connection -> { final SessionSubscription sessionSubscription = sessionMap.computeIfAbsent(sessionName, key -> { final SessionHandler sessionHandler = handlerProvider.createSessionHandler(connectionId, getFullyQualifiedNamespace(), key, connectionOptions.getRetry().getTryTimeout()); final Session session = connection.session(); BaseHandler.setHandler(session, sessionHandler); final AmqpSession amqpSession = createSession(key, session, sessionHandler); final Disposable subscription = amqpSession.getEndpointStates() .subscribe(state -> { }, error -> { if (isDisposed.get()) { return; } logger.atInfo() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Error occurred. Removing and disposing session", error); removeSession(key); }, () -> { if (isDisposed.get()) { return; } logger.atVerbose() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Complete. Removing and disposing session."); removeSession(key); }); return new SessionSubscription(amqpSession, subscription); }); return sessionSubscription; }).flatMap(sessionSubscription -> { final Mono<AmqpEndpointState> activeSession = sessionSubscription.getSession().getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(retryPolicy.getRetryOptions().getTryTimeout(), Mono.error(new AmqpException(true, String.format("connectionId[%s] sessionName[%s] Timeout waiting for session to be active.", connectionId, sessionName), handler.getErrorContext()))); return activeSession.thenReturn(sessionSubscription.getSession()); }); } /** * Creates a new AMQP session with the given parameters. * * @param sessionName Name of the AMQP session. * @param session The reactor session associated with this session. * @param handler Session handler for the reactor session. * * @return A new instance of AMQP session. */ protected AmqpSession createSession(String sessionName, Session session, SessionHandler handler) { return new ReactorSession(this, session, handler, sessionName, reactorProvider, handlerProvider, getClaimsBasedSecurityNode(), tokenManagerProvider, messageSerializer, connectionOptions.getRetry()); } /** * {@inheritDoc} */ @Override public boolean removeSession(String sessionName) { if (sessionName == null) { return false; } final SessionSubscription removed = sessionMap.remove(sessionName); if (removed != null) { removed.dispose(); } return removed != null; } @Override public boolean isDisposed() { return isDisposed.get(); } /** * {@inheritDoc} */ @Override public void dispose() { final Duration timeout = operationTimeout.plus(operationTimeout); closeAsync().block(timeout); } /** * Gets the active AMQP connection for this instance. * * @return The AMQP connection. * * @throws AmqpException if the {@link Connection} was not transitioned to an active state within the given * {@link AmqpRetryOptions */ protected Mono<Connection> getReactorConnection() { return connectionMono; } /** * Creates a bidirectional link between the message broker and the client. * * @param sessionName Name of the session. * @param linkName Name of the link. * @param entityPath Address to the message broker. * * @return A new {@link RequestResponseChannel} to communicate with the message broker. */ protected AmqpChannelProcessor<RequestResponseChannel> createRequestResponseChannel(String sessionName, String linkName, String entityPath) { Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); final Flux<RequestResponseChannel> createChannel = createSession(sessionName) .cast(ReactorSession.class) .map(reactorSession -> new RequestResponseChannel(this, getId(), getFullyQualifiedNamespace(), linkName, entityPath, reactorSession.session(), connectionOptions.getRetry(), handlerProvider, reactorProvider, messageSerializer, senderSettleMode, receiverSettleMode)) .doOnNext(e -> { logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .log("Emitting new response channel."); }) .repeat(); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(ENTITY_PATH_KEY, entityPath); return createChannel .subscribeWith(new AmqpChannelProcessor<>(getFullyQualifiedNamespace(), channel -> channel.getEndpointStates(), retryPolicy, loggingContext)); } @Override public Mono<Void> closeAsync() { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already closed. Not disposing again."); return isClosedMono.asMono(); } return closeAsync(new AmqpShutdownSignal(false, true, "Disposed by client.")); } private synchronized void closeConnectionWork() { if (connection == null) { isClosedMono.emitEmpty((signalType, emitResult) -> { addSignalTypeAndResult(logger.atInfo(), signalType, emitResult) .log("Unable to complete closeMono."); return false; }); return; } connection.close(); handler.close(); final ArrayList<Mono<Void>> closingSessions = new ArrayList<>(); sessionMap.values().forEach(link -> closingSessions.add(link.isClosed())); final Mono<Void> closedExecutor = executor != null ? Mono.defer(() -> { synchronized (this) { logger.info("Closing executor."); return executor.closeAsync(); } }) : Mono.empty(); final Mono<Void> closeSessionAndExecutorMono = Mono.when(closingSessions) .timeout(operationTimeout) .onErrorResume(error -> { logger.info("Timed out waiting for all sessions to close."); return Mono.empty(); }) .then(closedExecutor) .then(Mono.fromRunnable(() -> { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result) .log("Unable to emit connection closed signal."); return false; }); subscriptions.dispose(); })); subscriptions.add(closeSessionAndExecutorMono.subscribe()); } private synchronized ClaimsBasedSecurityNode getOrCreateCBSNode() { if (cbsChannel == null) { logger.info("Setting CBS channel."); cbsChannelProcessor = createRequestResponseChannel(CBS_SESSION_NAME, CBS_LINK_NAME, CBS_ADDRESS); cbsChannel = new ClaimsBasedSecurityChannel( cbsChannelProcessor, connectionOptions.getTokenCredential(), connectionOptions.getAuthorizationType(), connectionOptions.getRetry()); } return cbsChannel; } private synchronized Connection getOrCreateConnection() throws IOException { if (connection == null) { logger.atInfo() .addKeyValue(HOSTNAME_KEY, handler.getHostname()) .addKeyValue("port", handler.getProtocolPort()) .log("Creating and starting connection."); final Reactor reactor = reactorProvider.createReactor(connectionId, handler.getMaxFrameSize()); connection = reactor.connectionToHost(handler.getHostname(), handler.getProtocolPort(), handler); final ReactorExceptionHandler reactorExceptionHandler = new ReactorExceptionHandler(); final Duration timeoutDivided = connectionOptions.getRetry().getTryTimeout().dividedBy(2); final Duration pendingTasksDuration = ClientConstants.SERVER_BUSY_WAIT_TIME.compareTo(timeoutDivided) < 0 ? ClientConstants.SERVER_BUSY_WAIT_TIME : timeoutDivided; final Scheduler scheduler = Schedulers.newSingle("reactor-executor"); executor = new ReactorExecutor(reactor, scheduler, connectionId, reactorExceptionHandler, pendingTasksDuration, connectionOptions.getFullyQualifiedNamespace()); final Mono<Void> executorCloseMono = Mono.defer(() -> { synchronized (this) { return executor.closeAsync(); } }); reactorProvider.getReactorDispatcher().getShutdownSignal() .flatMap(signal -> { reactorExceptionHandler.onConnectionShutdown(signal); return executorCloseMono; }) .onErrorResume(error -> { reactorExceptionHandler.onConnectionError(error); return executorCloseMono; }) .subscribe(); executor.start(); } return connection; } private final class ReactorExceptionHandler extends AmqpExceptionHandler { private ReactorExceptionHandler() { super(); } @Override public void onConnectionError(Throwable exception) { logger.atInfo() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionError, Starting new reactor", exception); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onReactorError: Disposing."); closeAsync(new AmqpShutdownSignal(false, false, "onReactorError: " + exception.toString())) .subscribe(); } } @Override void onConnectionShutdown(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown. Shutting down."); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown: disposing."); closeAsync(shutdownSignal).subscribe(); } } } private static final class SessionSubscription { private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AmqpSession session; private final Disposable subscription; private SessionSubscription(AmqpSession session, Disposable subscription) { this.session = session; this.subscription = subscription; } private AmqpSession getSession() { return session; } private void dispose() { if (isDisposed.getAndSet(true)) { return; } if (session instanceof ReactorSession) { ((ReactorSession) session).closeAsync("Closing session.", null, true) .subscribe(); } else { session.dispose(); } subscription.dispose(); } private Mono<Void> isClosed() { if (session instanceof ReactorSession) { return ((ReactorSession) session).isClosed(); } else { return Mono.empty(); } } } }
class ReactorConnection implements AmqpConnection { private static final String CBS_SESSION_NAME = "cbs-session"; private static final String CBS_ADDRESS = "$cbs"; private static final String CBS_LINK_NAME = "cbs"; private static final String MANAGEMENT_SESSION_NAME = "mgmt-session"; private static final String MANAGEMENT_ADDRESS = "$management"; private static final String MANAGEMENT_LINK_NAME = "mgmt"; private final ClientLogger logger; private final ConcurrentMap<String, SessionSubscription> sessionMap = new ConcurrentHashMap<>(); private final ConcurrentHashMap<String, AmqpManagementNode> managementNodes = new ConcurrentHashMap<>(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.One<AmqpShutdownSignal> shutdownSignalSink = Sinks.one(); private final Flux<AmqpEndpointState> endpointStates; private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final String connectionId; private final Mono<Connection> connectionMono; private final ConnectionHandler handler; private final ReactorHandlerProvider handlerProvider; private final TokenManagerProvider tokenManagerProvider; private final MessageSerializer messageSerializer; private final ConnectionOptions connectionOptions; private final ReactorProvider reactorProvider; private final AmqpRetryPolicy retryPolicy; private final SenderSettleMode senderSettleMode; private final ReceiverSettleMode receiverSettleMode; private final Duration operationTimeout; private final Composite subscriptions; private ReactorExecutor executor; private volatile ClaimsBasedSecurityChannel cbsChannel; private volatile AmqpChannelProcessor<RequestResponseChannel> cbsChannelProcessor; private volatile Connection connection; /** * Creates a new AMQP connection that uses proton-j. * * @param connectionId Identifier for the connection. * @param connectionOptions A set of options used to create the AMQP connection. * @param reactorProvider Provides proton-j Reactor instances. * @param handlerProvider Provides {@link BaseHandler} to listen to proton-j reactor events. * @param tokenManagerProvider Provides the appropriate token manager to authorize with CBS node. * @param messageSerializer Serializer to translate objects to and from proton-j {@link Message messages}. * @param senderSettleMode to set as {@link SenderSettleMode} on sender. * @param receiverSettleMode to set as {@link ReceiverSettleMode} on receiver. */ public ReactorConnection(String connectionId, ConnectionOptions connectionOptions, ReactorProvider reactorProvider, ReactorHandlerProvider handlerProvider, TokenManagerProvider tokenManagerProvider, MessageSerializer messageSerializer, SenderSettleMode senderSettleMode, ReceiverSettleMode receiverSettleMode) { this.connectionOptions = connectionOptions; this.reactorProvider = reactorProvider; this.connectionId = connectionId; this.logger = new ClientLogger(ReactorConnection.class, createContextWithConnectionId(connectionId)); this.handlerProvider = handlerProvider; this.tokenManagerProvider = Objects.requireNonNull(tokenManagerProvider, "'tokenManagerProvider' cannot be null."); this.messageSerializer = messageSerializer; this.handler = handlerProvider.createConnectionHandler(connectionId, connectionOptions); this.retryPolicy = RetryUtil.getRetryPolicy(connectionOptions.getRetry()); this.operationTimeout = connectionOptions.getRetry().getTryTimeout(); this.senderSettleMode = senderSettleMode; this.receiverSettleMode = receiverSettleMode; this.connectionMono = Mono.fromCallable(this::getOrCreateConnection) .flatMap(reactorConnection -> { final Mono<AmqpEndpointState> activeEndpoint = getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(operationTimeout, Mono.error(() -> new AmqpException(true, String.format( "Connection '%s' not opened within AmqpRetryOptions.tryTimeout(): %s", connectionId, operationTimeout), handler.getErrorContext()))); return activeEndpoint.thenReturn(reactorConnection); }) .doOnError(error -> { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already disposed: Error occurred while connection was starting.", error); } else { closeAsync(new AmqpShutdownSignal(false, false, "Error occurred while connection was starting. Error: " + error)).subscribe(); } }); this.endpointStates = this.handler.getEndpointStates() .takeUntilOther(shutdownSignalSink.asMono()) .map(state -> { logger.verbose("State {}", state); return AmqpEndpointStateUtil.getConnectionState(state); }) .onErrorResume(error -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to error."); return closeAsync(new AmqpShutdownSignal(false, false, error.getMessage())).then(Mono.error(error)); } else { return Mono.error(error); } }) .doOnComplete(() -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to connection close."); closeAsync(new AmqpShutdownSignal(false, false, "Connection handler closed.")).subscribe(); } }) .cache(1); this.subscriptions = Disposables.composite(this.endpointStates.subscribe()); } /** * {@inheritDoc} */ @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } /** * Gets the shutdown signal associated with this connection. When it emits, the underlying connection is closed. * * @return Shutdown signals associated with this connection. It emits a signal when the underlying connection is * closed. */ @Override public Flux<AmqpShutdownSignal> getShutdownSignals() { return shutdownSignalSink.asMono().cache().flux(); } @Override public Mono<AmqpManagementNode> getManagementNode(String entityPath) { return Mono.defer(() -> { if (isDisposed()) { return Mono.error(logger.atError() .addKeyValue(ENTITY_PATH_KEY, entityPath) .log(Exceptions.propagate(new IllegalStateException("Connection is disposed. Cannot get management instance.")))); } final AmqpManagementNode existing = managementNodes.get(entityPath); if (existing != null) { return Mono.just(existing); } final TokenManager tokenManager = new AzureTokenManagerProvider(connectionOptions.getAuthorizationType(), connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getAuthorizationScope()) .getTokenManager(getClaimsBasedSecurityNode(), entityPath); return tokenManager.authorize().thenReturn(managementNodes.compute(entityPath, (key, current) -> { if (current != null) { logger.info("A management node exists already, returning it."); tokenManager.close(); return current; } final String sessionName = entityPath + "-" + MANAGEMENT_SESSION_NAME; final String linkName = entityPath + "-" + MANAGEMENT_LINK_NAME; final String address = entityPath + "/" + MANAGEMENT_ADDRESS; logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .addKeyValue("address", address) .log("Creating management node."); final AmqpChannelProcessor<RequestResponseChannel> requestResponseChannel = createRequestResponseChannel(sessionName, linkName, address); return new ManagementChannel(requestResponseChannel, getFullyQualifiedNamespace(), entityPath, tokenManager); })); }); } /** * {@inheritDoc} */ @Override public Mono<ClaimsBasedSecurityNode> getClaimsBasedSecurityNode() { return connectionMono.then(Mono.fromCallable(() -> getOrCreateCBSNode())); } @Override public String getId() { return connectionId; } /** * {@inheritDoc} */ @Override public String getFullyQualifiedNamespace() { return handler.getHostname(); } /** * {@inheritDoc} */ @Override public int getMaxFrameSize() { return handler.getMaxFrameSize(); } /** * {@inheritDoc} */ @Override public Map<String, Object> getConnectionProperties() { return handler.getConnectionProperties(); } /** * {@inheritDoc} */ @Override public Mono<AmqpSession> createSession(String sessionName) { return connectionMono.map(connection -> { final SessionSubscription sessionSubscription = sessionMap.computeIfAbsent(sessionName, key -> { final SessionHandler sessionHandler = handlerProvider.createSessionHandler(connectionId, getFullyQualifiedNamespace(), key, connectionOptions.getRetry().getTryTimeout()); final Session session = connection.session(); BaseHandler.setHandler(session, sessionHandler); final AmqpSession amqpSession = createSession(key, session, sessionHandler); final Disposable subscription = amqpSession.getEndpointStates() .subscribe(state -> { }, error -> { if (isDisposed.get()) { return; } logger.atInfo() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Error occurred. Removing and disposing session", error); removeSession(key); }, () -> { if (isDisposed.get()) { return; } logger.atVerbose() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Complete. Removing and disposing session."); removeSession(key); }); return new SessionSubscription(amqpSession, subscription); }); return sessionSubscription; }).flatMap(sessionSubscription -> { final Mono<AmqpEndpointState> activeSession = sessionSubscription.getSession().getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(retryPolicy.getRetryOptions().getTryTimeout(), Mono.error(() -> new AmqpException(true, String.format("connectionId[%s] sessionName[%s] Timeout waiting for session to be active.", connectionId, sessionName), handler.getErrorContext()))); return activeSession.thenReturn(sessionSubscription.getSession()); }); } /** * Creates a new AMQP session with the given parameters. * * @param sessionName Name of the AMQP session. * @param session The reactor session associated with this session. * @param handler Session handler for the reactor session. * * @return A new instance of AMQP session. */ protected AmqpSession createSession(String sessionName, Session session, SessionHandler handler) { return new ReactorSession(this, session, handler, sessionName, reactorProvider, handlerProvider, getClaimsBasedSecurityNode(), tokenManagerProvider, messageSerializer, connectionOptions.getRetry()); } /** * {@inheritDoc} */ @Override public boolean removeSession(String sessionName) { if (sessionName == null) { return false; } final SessionSubscription removed = sessionMap.remove(sessionName); if (removed != null) { removed.dispose(); } return removed != null; } @Override public boolean isDisposed() { return isDisposed.get(); } /** * {@inheritDoc} */ @Override public void dispose() { final Duration timeout = operationTimeout.plus(operationTimeout); closeAsync().block(timeout); } /** * Gets the active AMQP connection for this instance. * * @return The AMQP connection. * * @throws AmqpException if the {@link Connection} was not transitioned to an active state within the given * {@link AmqpRetryOptions */ protected Mono<Connection> getReactorConnection() { return connectionMono; } /** * Creates a bidirectional link between the message broker and the client. * * @param sessionName Name of the session. * @param linkName Name of the link. * @param entityPath Address to the message broker. * * @return A new {@link RequestResponseChannel} to communicate with the message broker. */ protected AmqpChannelProcessor<RequestResponseChannel> createRequestResponseChannel(String sessionName, String linkName, String entityPath) { Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); final Flux<RequestResponseChannel> createChannel = createSession(sessionName) .cast(ReactorSession.class) .map(reactorSession -> new RequestResponseChannel(this, getId(), getFullyQualifiedNamespace(), linkName, entityPath, reactorSession.session(), connectionOptions.getRetry(), handlerProvider, reactorProvider, messageSerializer, senderSettleMode, receiverSettleMode)) .doOnNext(e -> { logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .log("Emitting new response channel."); }) .repeat(() -> !this.isDisposed()); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(ENTITY_PATH_KEY, entityPath); return createChannel .subscribeWith(new AmqpChannelProcessor<>(getFullyQualifiedNamespace(), channel -> channel.getEndpointStates(), retryPolicy, loggingContext)); } @Override public Mono<Void> closeAsync() { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already closed. Not disposing again."); return isClosedMono.asMono(); } return closeAsync(new AmqpShutdownSignal(false, true, "Disposed by client.")); } private synchronized void closeConnectionWork() { if (connection == null) { isClosedMono.emitEmpty((signalType, emitResult) -> { addSignalTypeAndResult(logger.atInfo(), signalType, emitResult) .log("Unable to complete closeMono."); return false; }); return; } connection.close(); handler.close(); final ArrayList<Mono<Void>> closingSessions = new ArrayList<>(); sessionMap.values().forEach(link -> closingSessions.add(link.isClosed())); final Mono<Void> closedExecutor = executor != null ? Mono.defer(() -> { synchronized (this) { logger.info("Closing executor."); return executor.closeAsync(); } }) : Mono.empty(); final Mono<Void> closeSessionAndExecutorMono = Mono.when(closingSessions) .timeout(operationTimeout) .onErrorResume(error -> { logger.info("Timed out waiting for all sessions to close."); return Mono.empty(); }) .then(closedExecutor) .then(Mono.fromRunnable(() -> { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result) .log("Unable to emit connection closed signal."); return false; }); subscriptions.dispose(); })); subscriptions.add(closeSessionAndExecutorMono.subscribe()); } private synchronized ClaimsBasedSecurityNode getOrCreateCBSNode() { if (cbsChannel == null) { logger.info("Setting CBS channel."); cbsChannelProcessor = createRequestResponseChannel(CBS_SESSION_NAME, CBS_LINK_NAME, CBS_ADDRESS); cbsChannel = new ClaimsBasedSecurityChannel( cbsChannelProcessor, connectionOptions.getTokenCredential(), connectionOptions.getAuthorizationType(), connectionOptions.getRetry()); } return cbsChannel; } private synchronized Connection getOrCreateConnection() throws IOException { if (connection == null) { logger.atInfo() .addKeyValue(HOSTNAME_KEY, handler.getHostname()) .addKeyValue("port", handler.getProtocolPort()) .log("Creating and starting connection."); final Reactor reactor = reactorProvider.createReactor(connectionId, handler.getMaxFrameSize()); connection = reactor.connectionToHost(handler.getHostname(), handler.getProtocolPort(), handler); final ReactorExceptionHandler reactorExceptionHandler = new ReactorExceptionHandler(); final Duration timeoutDivided = connectionOptions.getRetry().getTryTimeout().dividedBy(2); final Duration pendingTasksDuration = ClientConstants.SERVER_BUSY_WAIT_TIME.compareTo(timeoutDivided) < 0 ? ClientConstants.SERVER_BUSY_WAIT_TIME : timeoutDivided; final Scheduler scheduler = Schedulers.newSingle("reactor-executor"); executor = new ReactorExecutor(reactor, scheduler, connectionId, reactorExceptionHandler, pendingTasksDuration, connectionOptions.getFullyQualifiedNamespace()); final Mono<Void> executorCloseMono = Mono.defer(() -> { synchronized (this) { return executor.closeAsync(); } }); reactorProvider.getReactorDispatcher().getShutdownSignal() .flatMap(signal -> { reactorExceptionHandler.onConnectionShutdown(signal); return executorCloseMono; }) .onErrorResume(error -> { reactorExceptionHandler.onConnectionError(error); return executorCloseMono; }) .subscribe(); executor.start(); } return connection; } private final class ReactorExceptionHandler extends AmqpExceptionHandler { private ReactorExceptionHandler() { super(); } @Override public void onConnectionError(Throwable exception) { logger.atInfo() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionError, Starting new reactor", exception); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onReactorError: Disposing."); closeAsync(new AmqpShutdownSignal(false, false, "onReactorError: " + exception.toString())) .subscribe(); } } @Override void onConnectionShutdown(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown. Shutting down."); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown: disposing."); closeAsync(shutdownSignal).subscribe(); } } } private static final class SessionSubscription { private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AmqpSession session; private final Disposable subscription; private SessionSubscription(AmqpSession session, Disposable subscription) { this.session = session; this.subscription = subscription; } private AmqpSession getSession() { return session; } private void dispose() { if (isDisposed.getAndSet(true)) { return; } if (session instanceof ReactorSession) { ((ReactorSession) session).closeAsync("Closing session.", null, true) .subscribe(); } else { session.dispose(); } subscription.dispose(); } private Mono<Void> isClosed() { if (session instanceof ReactorSession) { return ((ReactorSession) session).isClosed(); } else { return Mono.empty(); } } } }
Thanks Kun.. I can clearly see how this change helps us avoid repeated logs in the "explicit close" use case. I mean, we could split the mode of connection-close into two - 1. Explicit closeAsync. 2. Implicit closeAsync. "Explicit close" is when the application "explicitly" invokes `closeAsync()`; at the time of such "explicit close", the underlying amqp-connection (very likely) will be healthy, so the endpoints closure (links -> session -> connection) seamlessly+quickly goes through; this is fast and happy path. The application is likely to finish execution after the explicit close. "Implicit close" is when the library internally invokes `closeAsync()`; this is mainly in the recovery/self-healing route (where we have/had the race cases, receiver hanging, and many reliability issues). This is the route where the connection may be faulted, the dispatcher rejecting invoke, retries etc. I was wondering, are there going to be any negative impact in the "Implicit close" route if we change the order in the shutdown signal emitted? want to make sure we think through various control flows, and if nothing concerning, we're good. \\cc @conniey
Mono<Void> closeAsync(AmqpShutdownSignal shutdownSignal) { final Mono<Void> emitShutDownSignalOperation = Mono.fromRunnable(() -> { addShutdownSignal(logger.atInfo(), shutdownSignal).log("Disposing of ReactorConnection."); final Sinks.EmitResult result = shutdownSignalSink.tryEmitValue(shutdownSignal); if (result.isFailure()) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(EMIT_RESULT_KEY, result) .log("Unable to emit shutdown signal."); } }); final Mono<Void> cbsCloseOperation; if (cbsChannelProcessor != null) { cbsCloseOperation = cbsChannelProcessor.flatMap(channel -> channel.closeAsync()); } else { cbsCloseOperation = Mono.empty(); } final Mono<Void> managementNodeCloseOperations = Mono.when( Flux.fromStream(managementNodes.values().stream()).flatMap(node -> node.closeAsync())); final Mono<Void> closeReactor = Mono.fromRunnable(() -> { logger.verbose("Scheduling closeConnection work."); final ReactorDispatcher dispatcher = reactorProvider.getReactorDispatcher(); if (dispatcher != null) { try { dispatcher.invoke(() -> closeConnectionWork()); } catch (IOException e) { logger.warning("IOException while scheduling closeConnection work. Manually disposing.", e); closeConnectionWork(); } catch (RejectedExecutionException e) { logger.info("Could not schedule closeConnection work. Manually disposing."); closeConnectionWork(); } } else { closeConnectionWork(); } }); return Mono.whenDelayError( cbsCloseOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed CBS node.")), managementNodeCloseOperations.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed management nodes."))) .then(emitShutDownSignalOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Emitted connection shutdown signal. "))) .then(closeReactor.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed reactor dispatcher."))) .then(isClosedMono.asMono()); }
logger.atVerbose()
Mono<Void> closeAsync(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal).log("Disposing of ReactorConnection."); final Sinks.EmitResult result = shutdownSignalSink.tryEmitValue(shutdownSignal); if (result.isFailure()) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(EMIT_RESULT_KEY, result) .log("Unable to emit shutdown signal."); } final Mono<Void> cbsCloseOperation; if (cbsChannelProcessor != null) { cbsCloseOperation = cbsChannelProcessor.flatMap(channel -> channel.closeAsync()); } else { cbsCloseOperation = Mono.empty(); } final Mono<Void> managementNodeCloseOperations = Mono.when( Flux.fromStream(managementNodes.values().stream()).flatMap(node -> node.closeAsync())); final Mono<Void> closeReactor = Mono.fromRunnable(() -> { logger.verbose("Scheduling closeConnection work."); final ReactorDispatcher dispatcher = reactorProvider.getReactorDispatcher(); if (dispatcher != null) { try { dispatcher.invoke(() -> closeConnectionWork()); } catch (IOException e) { logger.warning("IOException while scheduling closeConnection work. Manually disposing.", e); closeConnectionWork(); } catch (RejectedExecutionException e) { logger.info("Could not schedule closeConnection work. Manually disposing."); closeConnectionWork(); } } else { closeConnectionWork(); } }); return Mono.whenDelayError( cbsCloseOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed CBS node.")), managementNodeCloseOperations.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed management nodes."))) .then(closeReactor.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed reactor dispatcher."))) .then(isClosedMono.asMono()); }
class ReactorConnection implements AmqpConnection { private static final String CBS_SESSION_NAME = "cbs-session"; private static final String CBS_ADDRESS = "$cbs"; private static final String CBS_LINK_NAME = "cbs"; private static final String MANAGEMENT_SESSION_NAME = "mgmt-session"; private static final String MANAGEMENT_ADDRESS = "$management"; private static final String MANAGEMENT_LINK_NAME = "mgmt"; private final ClientLogger logger; private final ConcurrentMap<String, SessionSubscription> sessionMap = new ConcurrentHashMap<>(); private final ConcurrentHashMap<String, AmqpManagementNode> managementNodes = new ConcurrentHashMap<>(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.One<AmqpShutdownSignal> shutdownSignalSink = Sinks.one(); private final Flux<AmqpEndpointState> endpointStates; private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final String connectionId; private final Mono<Connection> connectionMono; private final ConnectionHandler handler; private final ReactorHandlerProvider handlerProvider; private final TokenManagerProvider tokenManagerProvider; private final MessageSerializer messageSerializer; private final ConnectionOptions connectionOptions; private final ReactorProvider reactorProvider; private final AmqpRetryPolicy retryPolicy; private final SenderSettleMode senderSettleMode; private final ReceiverSettleMode receiverSettleMode; private final Duration operationTimeout; private final Composite subscriptions; private ReactorExecutor executor; private volatile ClaimsBasedSecurityChannel cbsChannel; private volatile AmqpChannelProcessor<RequestResponseChannel> cbsChannelProcessor; private volatile Connection connection; /** * Creates a new AMQP connection that uses proton-j. * * @param connectionId Identifier for the connection. * @param connectionOptions A set of options used to create the AMQP connection. * @param reactorProvider Provides proton-j Reactor instances. * @param handlerProvider Provides {@link BaseHandler} to listen to proton-j reactor events. * @param tokenManagerProvider Provides the appropriate token manager to authorize with CBS node. * @param messageSerializer Serializer to translate objects to and from proton-j {@link Message messages}. * @param senderSettleMode to set as {@link SenderSettleMode} on sender. * @param receiverSettleMode to set as {@link ReceiverSettleMode} on receiver. */ public ReactorConnection(String connectionId, ConnectionOptions connectionOptions, ReactorProvider reactorProvider, ReactorHandlerProvider handlerProvider, TokenManagerProvider tokenManagerProvider, MessageSerializer messageSerializer, SenderSettleMode senderSettleMode, ReceiverSettleMode receiverSettleMode) { this.connectionOptions = connectionOptions; this.reactorProvider = reactorProvider; this.connectionId = connectionId; this.logger = new ClientLogger(ReactorConnection.class, createContextWithConnectionId(connectionId)); this.handlerProvider = handlerProvider; this.tokenManagerProvider = Objects.requireNonNull(tokenManagerProvider, "'tokenManagerProvider' cannot be null."); this.messageSerializer = messageSerializer; this.handler = handlerProvider.createConnectionHandler(connectionId, connectionOptions); this.retryPolicy = RetryUtil.getRetryPolicy(connectionOptions.getRetry()); this.operationTimeout = connectionOptions.getRetry().getTryTimeout(); this.senderSettleMode = senderSettleMode; this.receiverSettleMode = receiverSettleMode; this.connectionMono = Mono.fromCallable(this::getOrCreateConnection) .flatMap(reactorConnection -> { final Mono<AmqpEndpointState> activeEndpoint = getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(operationTimeout, Mono.error(() -> new AmqpException(true, String.format( "Connection '%s' not opened within AmqpRetryOptions.tryTimeout(): %s", connectionId, operationTimeout), handler.getErrorContext()))); return activeEndpoint.thenReturn(reactorConnection); }) .doOnError(error -> { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already disposed: Error occurred while connection was starting.", error); } else { closeAsync(new AmqpShutdownSignal(false, false, "Error occurred while connection was starting. Error: " + error)).subscribe(); } }); this.endpointStates = this.handler.getEndpointStates() .takeUntilOther(shutdownSignalSink.asMono()) .map(state -> { logger.verbose("State {}", state); return AmqpEndpointStateUtil.getConnectionState(state); }) .onErrorResume(error -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to error."); return closeAsync(new AmqpShutdownSignal(false, false, error.getMessage())).then(Mono.error(error)); } else { return Mono.error(error); } }) .doOnComplete(() -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to connection close."); closeAsync(new AmqpShutdownSignal(false, false, "Connection handler closed.")).subscribe(); } }) .cache(1); this.subscriptions = Disposables.composite(this.endpointStates.subscribe()); } /** * {@inheritDoc} */ @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } /** * Gets the shutdown signal associated with this connection. When it emits, the underlying connection is closed. * * @return Shutdown signals associated with this connection. It emits a signal when the underlying connection is * closed. */ @Override public Flux<AmqpShutdownSignal> getShutdownSignals() { return shutdownSignalSink.asMono().cache().flux(); } @Override public Mono<AmqpManagementNode> getManagementNode(String entityPath) { return Mono.defer(() -> { if (isDisposed()) { return Mono.error(logger.atError() .addKeyValue(ENTITY_PATH_KEY, entityPath) .log(Exceptions.propagate(new IllegalStateException("Connection is disposed. Cannot get management instance.")))); } final AmqpManagementNode existing = managementNodes.get(entityPath); if (existing != null) { return Mono.just(existing); } final TokenManager tokenManager = new AzureTokenManagerProvider(connectionOptions.getAuthorizationType(), connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getAuthorizationScope()) .getTokenManager(getClaimsBasedSecurityNode(), entityPath); return tokenManager.authorize().thenReturn(managementNodes.compute(entityPath, (key, current) -> { if (current != null) { logger.info("A management node exists already, returning it."); tokenManager.close(); return current; } final String sessionName = entityPath + "-" + MANAGEMENT_SESSION_NAME; final String linkName = entityPath + "-" + MANAGEMENT_LINK_NAME; final String address = entityPath + "/" + MANAGEMENT_ADDRESS; logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .addKeyValue("address", address) .log("Creating management node."); final AmqpChannelProcessor<RequestResponseChannel> requestResponseChannel = createRequestResponseChannel(sessionName, linkName, address); return new ManagementChannel(requestResponseChannel, getFullyQualifiedNamespace(), entityPath, tokenManager); })); }); } /** * {@inheritDoc} */ @Override public Mono<ClaimsBasedSecurityNode> getClaimsBasedSecurityNode() { return connectionMono.then(Mono.fromCallable(() -> getOrCreateCBSNode())); } @Override public String getId() { return connectionId; } /** * {@inheritDoc} */ @Override public String getFullyQualifiedNamespace() { return handler.getHostname(); } /** * {@inheritDoc} */ @Override public int getMaxFrameSize() { return handler.getMaxFrameSize(); } /** * {@inheritDoc} */ @Override public Map<String, Object> getConnectionProperties() { return handler.getConnectionProperties(); } /** * {@inheritDoc} */ @Override public Mono<AmqpSession> createSession(String sessionName) { return connectionMono.map(connection -> { final SessionSubscription sessionSubscription = sessionMap.computeIfAbsent(sessionName, key -> { final SessionHandler sessionHandler = handlerProvider.createSessionHandler(connectionId, getFullyQualifiedNamespace(), key, connectionOptions.getRetry().getTryTimeout()); final Session session = connection.session(); BaseHandler.setHandler(session, sessionHandler); final AmqpSession amqpSession = createSession(key, session, sessionHandler); final Disposable subscription = amqpSession.getEndpointStates() .subscribe(state -> { }, error -> { if (isDisposed.get()) { return; } logger.atInfo() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Error occurred. Removing and disposing session", error); removeSession(key); }, () -> { if (isDisposed.get()) { return; } logger.atVerbose() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Complete. Removing and disposing session."); removeSession(key); }); return new SessionSubscription(amqpSession, subscription); }); return sessionSubscription; }).flatMap(sessionSubscription -> { final Mono<AmqpEndpointState> activeSession = sessionSubscription.getSession().getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(retryPolicy.getRetryOptions().getTryTimeout(), Mono.error(() -> new AmqpException(true, String.format("connectionId[%s] sessionName[%s] Timeout waiting for session to be active.", connectionId, sessionName), handler.getErrorContext()))); return activeSession.thenReturn(sessionSubscription.getSession()); }); } /** * Creates a new AMQP session with the given parameters. * * @param sessionName Name of the AMQP session. * @param session The reactor session associated with this session. * @param handler Session handler for the reactor session. * * @return A new instance of AMQP session. */ protected AmqpSession createSession(String sessionName, Session session, SessionHandler handler) { return new ReactorSession(this, session, handler, sessionName, reactorProvider, handlerProvider, getClaimsBasedSecurityNode(), tokenManagerProvider, messageSerializer, connectionOptions.getRetry()); } /** * {@inheritDoc} */ @Override public boolean removeSession(String sessionName) { if (sessionName == null) { return false; } final SessionSubscription removed = sessionMap.remove(sessionName); if (removed != null) { removed.dispose(); } return removed != null; } @Override public boolean isDisposed() { return isDisposed.get(); } /** * {@inheritDoc} */ @Override public void dispose() { final Duration timeout = operationTimeout.plus(operationTimeout); closeAsync().block(timeout); } /** * Gets the active AMQP connection for this instance. * * @return The AMQP connection. * * @throws AmqpException if the {@link Connection} was not transitioned to an active state within the given * {@link AmqpRetryOptions */ protected Mono<Connection> getReactorConnection() { return connectionMono; } /** * Creates a bidirectional link between the message broker and the client. * * @param sessionName Name of the session. * @param linkName Name of the link. * @param entityPath Address to the message broker. * * @return A new {@link RequestResponseChannel} to communicate with the message broker. */ protected AmqpChannelProcessor<RequestResponseChannel> createRequestResponseChannel(String sessionName, String linkName, String entityPath) { Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); final Flux<RequestResponseChannel> createChannel = createSession(sessionName) .cast(ReactorSession.class) .map(reactorSession -> new RequestResponseChannel(this, getId(), getFullyQualifiedNamespace(), linkName, entityPath, reactorSession.session(), connectionOptions.getRetry(), handlerProvider, reactorProvider, messageSerializer, senderSettleMode, receiverSettleMode)) .doOnNext(e -> { logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .log("Emitting new response channel."); }) .repeat() .takeUntilOther(shutdownSignalSink.asMono()); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(ENTITY_PATH_KEY, entityPath); return createChannel .subscribeWith(new AmqpChannelProcessor<>(getFullyQualifiedNamespace(), channel -> channel.getEndpointStates(), retryPolicy, loggingContext)); } @Override public Mono<Void> closeAsync() { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already closed. Not disposing again."); return isClosedMono.asMono(); } return closeAsync(new AmqpShutdownSignal(false, true, "Disposed by client.")); } private synchronized void closeConnectionWork() { if (connection == null) { isClosedMono.emitEmpty((signalType, emitResult) -> { addSignalTypeAndResult(logger.atInfo(), signalType, emitResult) .log("Unable to complete closeMono."); return false; }); return; } connection.close(); handler.close(); final ArrayList<Mono<Void>> closingSessions = new ArrayList<>(); sessionMap.values().forEach(link -> closingSessions.add(link.isClosed())); final Mono<Void> closedExecutor = executor != null ? Mono.defer(() -> { synchronized (this) { logger.info("Closing executor."); return executor.closeAsync(); } }) : Mono.empty(); final Mono<Void> closeSessionAndExecutorMono = Mono.when(closingSessions) .timeout(operationTimeout) .onErrorResume(error -> { logger.info("Timed out waiting for all sessions to close."); return Mono.empty(); }) .then(closedExecutor) .then(Mono.fromRunnable(() -> { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result) .log("Unable to emit connection closed signal."); return false; }); subscriptions.dispose(); })); subscriptions.add(closeSessionAndExecutorMono.subscribe()); } private synchronized ClaimsBasedSecurityNode getOrCreateCBSNode() { if (cbsChannel == null) { logger.info("Setting CBS channel."); cbsChannelProcessor = createRequestResponseChannel(CBS_SESSION_NAME, CBS_LINK_NAME, CBS_ADDRESS); cbsChannel = new ClaimsBasedSecurityChannel( cbsChannelProcessor, connectionOptions.getTokenCredential(), connectionOptions.getAuthorizationType(), connectionOptions.getRetry()); } return cbsChannel; } private synchronized Connection getOrCreateConnection() throws IOException { if (connection == null) { logger.atInfo() .addKeyValue(HOSTNAME_KEY, handler.getHostname()) .addKeyValue("port", handler.getProtocolPort()) .log("Creating and starting connection."); final Reactor reactor = reactorProvider.createReactor(connectionId, handler.getMaxFrameSize()); connection = reactor.connectionToHost(handler.getHostname(), handler.getProtocolPort(), handler); final ReactorExceptionHandler reactorExceptionHandler = new ReactorExceptionHandler(); final Duration timeoutDivided = connectionOptions.getRetry().getTryTimeout().dividedBy(2); final Duration pendingTasksDuration = ClientConstants.SERVER_BUSY_WAIT_TIME.compareTo(timeoutDivided) < 0 ? ClientConstants.SERVER_BUSY_WAIT_TIME : timeoutDivided; final Scheduler scheduler = Schedulers.newSingle("reactor-executor"); executor = new ReactorExecutor(reactor, scheduler, connectionId, reactorExceptionHandler, pendingTasksDuration, connectionOptions.getFullyQualifiedNamespace()); final Mono<Void> executorCloseMono = Mono.defer(() -> { synchronized (this) { return executor.closeAsync(); } }); reactorProvider.getReactorDispatcher().getShutdownSignal() .flatMap(signal -> { reactorExceptionHandler.onConnectionShutdown(signal); return executorCloseMono; }) .onErrorResume(error -> { reactorExceptionHandler.onConnectionError(error); return executorCloseMono; }) .subscribe(); executor.start(); } return connection; } private final class ReactorExceptionHandler extends AmqpExceptionHandler { private ReactorExceptionHandler() { super(); } @Override public void onConnectionError(Throwable exception) { logger.atInfo() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionError, Starting new reactor", exception); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onReactorError: Disposing."); closeAsync(new AmqpShutdownSignal(false, false, "onReactorError: " + exception.toString())) .subscribe(); } } @Override void onConnectionShutdown(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown. Shutting down."); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown: disposing."); closeAsync(shutdownSignal).subscribe(); } } } private static final class SessionSubscription { private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AmqpSession session; private final Disposable subscription; private SessionSubscription(AmqpSession session, Disposable subscription) { this.session = session; this.subscription = subscription; } private AmqpSession getSession() { return session; } private void dispose() { if (isDisposed.getAndSet(true)) { return; } if (session instanceof ReactorSession) { ((ReactorSession) session).closeAsync("Closing session.", null, true) .subscribe(); } else { session.dispose(); } subscription.dispose(); } private Mono<Void> isClosed() { if (session instanceof ReactorSession) { return ((ReactorSession) session).isClosed(); } else { return Mono.empty(); } } } }
class ReactorConnection implements AmqpConnection { private static final String CBS_SESSION_NAME = "cbs-session"; private static final String CBS_ADDRESS = "$cbs"; private static final String CBS_LINK_NAME = "cbs"; private static final String MANAGEMENT_SESSION_NAME = "mgmt-session"; private static final String MANAGEMENT_ADDRESS = "$management"; private static final String MANAGEMENT_LINK_NAME = "mgmt"; private final ClientLogger logger; private final ConcurrentMap<String, SessionSubscription> sessionMap = new ConcurrentHashMap<>(); private final ConcurrentHashMap<String, AmqpManagementNode> managementNodes = new ConcurrentHashMap<>(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.One<AmqpShutdownSignal> shutdownSignalSink = Sinks.one(); private final Flux<AmqpEndpointState> endpointStates; private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final String connectionId; private final Mono<Connection> connectionMono; private final ConnectionHandler handler; private final ReactorHandlerProvider handlerProvider; private final TokenManagerProvider tokenManagerProvider; private final MessageSerializer messageSerializer; private final ConnectionOptions connectionOptions; private final ReactorProvider reactorProvider; private final AmqpRetryPolicy retryPolicy; private final SenderSettleMode senderSettleMode; private final ReceiverSettleMode receiverSettleMode; private final Duration operationTimeout; private final Composite subscriptions; private ReactorExecutor executor; private volatile ClaimsBasedSecurityChannel cbsChannel; private volatile AmqpChannelProcessor<RequestResponseChannel> cbsChannelProcessor; private volatile Connection connection; /** * Creates a new AMQP connection that uses proton-j. * * @param connectionId Identifier for the connection. * @param connectionOptions A set of options used to create the AMQP connection. * @param reactorProvider Provides proton-j Reactor instances. * @param handlerProvider Provides {@link BaseHandler} to listen to proton-j reactor events. * @param tokenManagerProvider Provides the appropriate token manager to authorize with CBS node. * @param messageSerializer Serializer to translate objects to and from proton-j {@link Message messages}. * @param senderSettleMode to set as {@link SenderSettleMode} on sender. * @param receiverSettleMode to set as {@link ReceiverSettleMode} on receiver. */ public ReactorConnection(String connectionId, ConnectionOptions connectionOptions, ReactorProvider reactorProvider, ReactorHandlerProvider handlerProvider, TokenManagerProvider tokenManagerProvider, MessageSerializer messageSerializer, SenderSettleMode senderSettleMode, ReceiverSettleMode receiverSettleMode) { this.connectionOptions = connectionOptions; this.reactorProvider = reactorProvider; this.connectionId = connectionId; this.logger = new ClientLogger(ReactorConnection.class, createContextWithConnectionId(connectionId)); this.handlerProvider = handlerProvider; this.tokenManagerProvider = Objects.requireNonNull(tokenManagerProvider, "'tokenManagerProvider' cannot be null."); this.messageSerializer = messageSerializer; this.handler = handlerProvider.createConnectionHandler(connectionId, connectionOptions); this.retryPolicy = RetryUtil.getRetryPolicy(connectionOptions.getRetry()); this.operationTimeout = connectionOptions.getRetry().getTryTimeout(); this.senderSettleMode = senderSettleMode; this.receiverSettleMode = receiverSettleMode; this.connectionMono = Mono.fromCallable(this::getOrCreateConnection) .flatMap(reactorConnection -> { final Mono<AmqpEndpointState> activeEndpoint = getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(operationTimeout, Mono.error(() -> new AmqpException(true, String.format( "Connection '%s' not opened within AmqpRetryOptions.tryTimeout(): %s", connectionId, operationTimeout), handler.getErrorContext()))); return activeEndpoint.thenReturn(reactorConnection); }) .doOnError(error -> { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already disposed: Error occurred while connection was starting.", error); } else { closeAsync(new AmqpShutdownSignal(false, false, "Error occurred while connection was starting. Error: " + error)).subscribe(); } }); this.endpointStates = this.handler.getEndpointStates() .takeUntilOther(shutdownSignalSink.asMono()) .map(state -> { logger.verbose("State {}", state); return AmqpEndpointStateUtil.getConnectionState(state); }) .onErrorResume(error -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to error."); return closeAsync(new AmqpShutdownSignal(false, false, error.getMessage())).then(Mono.error(error)); } else { return Mono.error(error); } }) .doOnComplete(() -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to connection close."); closeAsync(new AmqpShutdownSignal(false, false, "Connection handler closed.")).subscribe(); } }) .cache(1); this.subscriptions = Disposables.composite(this.endpointStates.subscribe()); } /** * {@inheritDoc} */ @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } /** * Gets the shutdown signal associated with this connection. When it emits, the underlying connection is closed. * * @return Shutdown signals associated with this connection. It emits a signal when the underlying connection is * closed. */ @Override public Flux<AmqpShutdownSignal> getShutdownSignals() { return shutdownSignalSink.asMono().cache().flux(); } @Override public Mono<AmqpManagementNode> getManagementNode(String entityPath) { return Mono.defer(() -> { if (isDisposed()) { return Mono.error(logger.atError() .addKeyValue(ENTITY_PATH_KEY, entityPath) .log(Exceptions.propagate(new IllegalStateException("Connection is disposed. Cannot get management instance.")))); } final AmqpManagementNode existing = managementNodes.get(entityPath); if (existing != null) { return Mono.just(existing); } final TokenManager tokenManager = new AzureTokenManagerProvider(connectionOptions.getAuthorizationType(), connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getAuthorizationScope()) .getTokenManager(getClaimsBasedSecurityNode(), entityPath); return tokenManager.authorize().thenReturn(managementNodes.compute(entityPath, (key, current) -> { if (current != null) { logger.info("A management node exists already, returning it."); tokenManager.close(); return current; } final String sessionName = entityPath + "-" + MANAGEMENT_SESSION_NAME; final String linkName = entityPath + "-" + MANAGEMENT_LINK_NAME; final String address = entityPath + "/" + MANAGEMENT_ADDRESS; logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .addKeyValue("address", address) .log("Creating management node."); final AmqpChannelProcessor<RequestResponseChannel> requestResponseChannel = createRequestResponseChannel(sessionName, linkName, address); return new ManagementChannel(requestResponseChannel, getFullyQualifiedNamespace(), entityPath, tokenManager); })); }); } /** * {@inheritDoc} */ @Override public Mono<ClaimsBasedSecurityNode> getClaimsBasedSecurityNode() { return connectionMono.then(Mono.fromCallable(() -> getOrCreateCBSNode())); } @Override public String getId() { return connectionId; } /** * {@inheritDoc} */ @Override public String getFullyQualifiedNamespace() { return handler.getHostname(); } /** * {@inheritDoc} */ @Override public int getMaxFrameSize() { return handler.getMaxFrameSize(); } /** * {@inheritDoc} */ @Override public Map<String, Object> getConnectionProperties() { return handler.getConnectionProperties(); } /** * {@inheritDoc} */ @Override public Mono<AmqpSession> createSession(String sessionName) { return connectionMono.map(connection -> { final SessionSubscription sessionSubscription = sessionMap.computeIfAbsent(sessionName, key -> { final SessionHandler sessionHandler = handlerProvider.createSessionHandler(connectionId, getFullyQualifiedNamespace(), key, connectionOptions.getRetry().getTryTimeout()); final Session session = connection.session(); BaseHandler.setHandler(session, sessionHandler); final AmqpSession amqpSession = createSession(key, session, sessionHandler); final Disposable subscription = amqpSession.getEndpointStates() .subscribe(state -> { }, error -> { if (isDisposed.get()) { return; } logger.atInfo() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Error occurred. Removing and disposing session", error); removeSession(key); }, () -> { if (isDisposed.get()) { return; } logger.atVerbose() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Complete. Removing and disposing session."); removeSession(key); }); return new SessionSubscription(amqpSession, subscription); }); return sessionSubscription; }).flatMap(sessionSubscription -> { final Mono<AmqpEndpointState> activeSession = sessionSubscription.getSession().getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(retryPolicy.getRetryOptions().getTryTimeout(), Mono.error(() -> new AmqpException(true, String.format("connectionId[%s] sessionName[%s] Timeout waiting for session to be active.", connectionId, sessionName), handler.getErrorContext()))); return activeSession.thenReturn(sessionSubscription.getSession()); }); } /** * Creates a new AMQP session with the given parameters. * * @param sessionName Name of the AMQP session. * @param session The reactor session associated with this session. * @param handler Session handler for the reactor session. * * @return A new instance of AMQP session. */ protected AmqpSession createSession(String sessionName, Session session, SessionHandler handler) { return new ReactorSession(this, session, handler, sessionName, reactorProvider, handlerProvider, getClaimsBasedSecurityNode(), tokenManagerProvider, messageSerializer, connectionOptions.getRetry()); } /** * {@inheritDoc} */ @Override public boolean removeSession(String sessionName) { if (sessionName == null) { return false; } final SessionSubscription removed = sessionMap.remove(sessionName); if (removed != null) { removed.dispose(); } return removed != null; } @Override public boolean isDisposed() { return isDisposed.get(); } /** * {@inheritDoc} */ @Override public void dispose() { final Duration timeout = operationTimeout.plus(operationTimeout); closeAsync().block(timeout); } /** * Gets the active AMQP connection for this instance. * * @return The AMQP connection. * * @throws AmqpException if the {@link Connection} was not transitioned to an active state within the given * {@link AmqpRetryOptions */ protected Mono<Connection> getReactorConnection() { return connectionMono; } /** * Creates a bidirectional link between the message broker and the client. * * @param sessionName Name of the session. * @param linkName Name of the link. * @param entityPath Address to the message broker. * * @return A new {@link RequestResponseChannel} to communicate with the message broker. */ protected AmqpChannelProcessor<RequestResponseChannel> createRequestResponseChannel(String sessionName, String linkName, String entityPath) { Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); final Flux<RequestResponseChannel> createChannel = createSession(sessionName) .cast(ReactorSession.class) .map(reactorSession -> new RequestResponseChannel(this, getId(), getFullyQualifiedNamespace(), linkName, entityPath, reactorSession.session(), connectionOptions.getRetry(), handlerProvider, reactorProvider, messageSerializer, senderSettleMode, receiverSettleMode)) .doOnNext(e -> { logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .log("Emitting new response channel."); }) .repeat(() -> !this.isDisposed()); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(ENTITY_PATH_KEY, entityPath); return createChannel .subscribeWith(new AmqpChannelProcessor<>(getFullyQualifiedNamespace(), channel -> channel.getEndpointStates(), retryPolicy, loggingContext)); } @Override public Mono<Void> closeAsync() { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already closed. Not disposing again."); return isClosedMono.asMono(); } return closeAsync(new AmqpShutdownSignal(false, true, "Disposed by client.")); } private synchronized void closeConnectionWork() { if (connection == null) { isClosedMono.emitEmpty((signalType, emitResult) -> { addSignalTypeAndResult(logger.atInfo(), signalType, emitResult) .log("Unable to complete closeMono."); return false; }); return; } connection.close(); handler.close(); final ArrayList<Mono<Void>> closingSessions = new ArrayList<>(); sessionMap.values().forEach(link -> closingSessions.add(link.isClosed())); final Mono<Void> closedExecutor = executor != null ? Mono.defer(() -> { synchronized (this) { logger.info("Closing executor."); return executor.closeAsync(); } }) : Mono.empty(); final Mono<Void> closeSessionAndExecutorMono = Mono.when(closingSessions) .timeout(operationTimeout) .onErrorResume(error -> { logger.info("Timed out waiting for all sessions to close."); return Mono.empty(); }) .then(closedExecutor) .then(Mono.fromRunnable(() -> { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result) .log("Unable to emit connection closed signal."); return false; }); subscriptions.dispose(); })); subscriptions.add(closeSessionAndExecutorMono.subscribe()); } private synchronized ClaimsBasedSecurityNode getOrCreateCBSNode() { if (cbsChannel == null) { logger.info("Setting CBS channel."); cbsChannelProcessor = createRequestResponseChannel(CBS_SESSION_NAME, CBS_LINK_NAME, CBS_ADDRESS); cbsChannel = new ClaimsBasedSecurityChannel( cbsChannelProcessor, connectionOptions.getTokenCredential(), connectionOptions.getAuthorizationType(), connectionOptions.getRetry()); } return cbsChannel; } private synchronized Connection getOrCreateConnection() throws IOException { if (connection == null) { logger.atInfo() .addKeyValue(HOSTNAME_KEY, handler.getHostname()) .addKeyValue("port", handler.getProtocolPort()) .log("Creating and starting connection."); final Reactor reactor = reactorProvider.createReactor(connectionId, handler.getMaxFrameSize()); connection = reactor.connectionToHost(handler.getHostname(), handler.getProtocolPort(), handler); final ReactorExceptionHandler reactorExceptionHandler = new ReactorExceptionHandler(); final Duration timeoutDivided = connectionOptions.getRetry().getTryTimeout().dividedBy(2); final Duration pendingTasksDuration = ClientConstants.SERVER_BUSY_WAIT_TIME.compareTo(timeoutDivided) < 0 ? ClientConstants.SERVER_BUSY_WAIT_TIME : timeoutDivided; final Scheduler scheduler = Schedulers.newSingle("reactor-executor"); executor = new ReactorExecutor(reactor, scheduler, connectionId, reactorExceptionHandler, pendingTasksDuration, connectionOptions.getFullyQualifiedNamespace()); final Mono<Void> executorCloseMono = Mono.defer(() -> { synchronized (this) { return executor.closeAsync(); } }); reactorProvider.getReactorDispatcher().getShutdownSignal() .flatMap(signal -> { reactorExceptionHandler.onConnectionShutdown(signal); return executorCloseMono; }) .onErrorResume(error -> { reactorExceptionHandler.onConnectionError(error); return executorCloseMono; }) .subscribe(); executor.start(); } return connection; } private final class ReactorExceptionHandler extends AmqpExceptionHandler { private ReactorExceptionHandler() { super(); } @Override public void onConnectionError(Throwable exception) { logger.atInfo() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionError, Starting new reactor", exception); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onReactorError: Disposing."); closeAsync(new AmqpShutdownSignal(false, false, "onReactorError: " + exception.toString())) .subscribe(); } } @Override void onConnectionShutdown(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown. Shutting down."); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown: disposing."); closeAsync(shutdownSignal).subscribe(); } } } private static final class SessionSubscription { private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AmqpSession session; private final Disposable subscription; private SessionSubscription(AmqpSession session, Disposable subscription) { this.session = session; this.subscription = subscription; } private AmqpSession getSession() { return session; } private void dispose() { if (isDisposed.getAndSet(true)) { return; } if (session instanceof ReactorSession) { ((ReactorSession) session).closeAsync("Closing session.", null, true) .subscribe(); } else { session.dispose(); } subscription.dispose(); } private Mono<Void> isClosed() { if (session instanceof ReactorSession) { return ((ReactorSession) session).isClosed(); } else { return Mono.empty(); } } } }
Thanks for telling me about the different between Explicit closeAysnc and Implict closeAysnc. It is worthwhile to think about various scenarios. I took lots of time to think about my changes as it does change the existing closing order. I do this change for two reasons: 1. After adding `.takeUntilOther(shutdownSignalSink.asMono())`, I have to make sure the CBS channel closed earlier than shutdown signal emitted. 2. When we emit shutdown signal, the ReactorSession may closed ealier than CBS channel, so the Close frame is sent earlier than Detach frame, which cause the CBS channel timeout. The connection may always closed after 60sec timeout. This change can solve this issue. For the case you mentioned, right, the change will postpone the closing of ReactorRecevier(s). But I have a question, so for each connection, they can have its own CBS channel, and old CBS channel closure has no impact to the creation of new one. But they are using the same ReactorReceiver, so "old" ReactoreRecevier closure will block "new" ReactorReceiver, right? I will check the Implicit closeAsync, this part of logic seems very interesting.
Mono<Void> closeAsync(AmqpShutdownSignal shutdownSignal) { final Mono<Void> emitShutDownSignalOperation = Mono.fromRunnable(() -> { addShutdownSignal(logger.atInfo(), shutdownSignal).log("Disposing of ReactorConnection."); final Sinks.EmitResult result = shutdownSignalSink.tryEmitValue(shutdownSignal); if (result.isFailure()) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(EMIT_RESULT_KEY, result) .log("Unable to emit shutdown signal."); } }); final Mono<Void> cbsCloseOperation; if (cbsChannelProcessor != null) { cbsCloseOperation = cbsChannelProcessor.flatMap(channel -> channel.closeAsync()); } else { cbsCloseOperation = Mono.empty(); } final Mono<Void> managementNodeCloseOperations = Mono.when( Flux.fromStream(managementNodes.values().stream()).flatMap(node -> node.closeAsync())); final Mono<Void> closeReactor = Mono.fromRunnable(() -> { logger.verbose("Scheduling closeConnection work."); final ReactorDispatcher dispatcher = reactorProvider.getReactorDispatcher(); if (dispatcher != null) { try { dispatcher.invoke(() -> closeConnectionWork()); } catch (IOException e) { logger.warning("IOException while scheduling closeConnection work. Manually disposing.", e); closeConnectionWork(); } catch (RejectedExecutionException e) { logger.info("Could not schedule closeConnection work. Manually disposing."); closeConnectionWork(); } } else { closeConnectionWork(); } }); return Mono.whenDelayError( cbsCloseOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed CBS node.")), managementNodeCloseOperations.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed management nodes."))) .then(emitShutDownSignalOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Emitted connection shutdown signal. "))) .then(closeReactor.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed reactor dispatcher."))) .then(isClosedMono.asMono()); }
logger.atVerbose()
Mono<Void> closeAsync(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal).log("Disposing of ReactorConnection."); final Sinks.EmitResult result = shutdownSignalSink.tryEmitValue(shutdownSignal); if (result.isFailure()) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(EMIT_RESULT_KEY, result) .log("Unable to emit shutdown signal."); } final Mono<Void> cbsCloseOperation; if (cbsChannelProcessor != null) { cbsCloseOperation = cbsChannelProcessor.flatMap(channel -> channel.closeAsync()); } else { cbsCloseOperation = Mono.empty(); } final Mono<Void> managementNodeCloseOperations = Mono.when( Flux.fromStream(managementNodes.values().stream()).flatMap(node -> node.closeAsync())); final Mono<Void> closeReactor = Mono.fromRunnable(() -> { logger.verbose("Scheduling closeConnection work."); final ReactorDispatcher dispatcher = reactorProvider.getReactorDispatcher(); if (dispatcher != null) { try { dispatcher.invoke(() -> closeConnectionWork()); } catch (IOException e) { logger.warning("IOException while scheduling closeConnection work. Manually disposing.", e); closeConnectionWork(); } catch (RejectedExecutionException e) { logger.info("Could not schedule closeConnection work. Manually disposing."); closeConnectionWork(); } } else { closeConnectionWork(); } }); return Mono.whenDelayError( cbsCloseOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed CBS node.")), managementNodeCloseOperations.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed management nodes."))) .then(closeReactor.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed reactor dispatcher."))) .then(isClosedMono.asMono()); }
class ReactorConnection implements AmqpConnection { private static final String CBS_SESSION_NAME = "cbs-session"; private static final String CBS_ADDRESS = "$cbs"; private static final String CBS_LINK_NAME = "cbs"; private static final String MANAGEMENT_SESSION_NAME = "mgmt-session"; private static final String MANAGEMENT_ADDRESS = "$management"; private static final String MANAGEMENT_LINK_NAME = "mgmt"; private final ClientLogger logger; private final ConcurrentMap<String, SessionSubscription> sessionMap = new ConcurrentHashMap<>(); private final ConcurrentHashMap<String, AmqpManagementNode> managementNodes = new ConcurrentHashMap<>(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.One<AmqpShutdownSignal> shutdownSignalSink = Sinks.one(); private final Flux<AmqpEndpointState> endpointStates; private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final String connectionId; private final Mono<Connection> connectionMono; private final ConnectionHandler handler; private final ReactorHandlerProvider handlerProvider; private final TokenManagerProvider tokenManagerProvider; private final MessageSerializer messageSerializer; private final ConnectionOptions connectionOptions; private final ReactorProvider reactorProvider; private final AmqpRetryPolicy retryPolicy; private final SenderSettleMode senderSettleMode; private final ReceiverSettleMode receiverSettleMode; private final Duration operationTimeout; private final Composite subscriptions; private ReactorExecutor executor; private volatile ClaimsBasedSecurityChannel cbsChannel; private volatile AmqpChannelProcessor<RequestResponseChannel> cbsChannelProcessor; private volatile Connection connection; /** * Creates a new AMQP connection that uses proton-j. * * @param connectionId Identifier for the connection. * @param connectionOptions A set of options used to create the AMQP connection. * @param reactorProvider Provides proton-j Reactor instances. * @param handlerProvider Provides {@link BaseHandler} to listen to proton-j reactor events. * @param tokenManagerProvider Provides the appropriate token manager to authorize with CBS node. * @param messageSerializer Serializer to translate objects to and from proton-j {@link Message messages}. * @param senderSettleMode to set as {@link SenderSettleMode} on sender. * @param receiverSettleMode to set as {@link ReceiverSettleMode} on receiver. */ public ReactorConnection(String connectionId, ConnectionOptions connectionOptions, ReactorProvider reactorProvider, ReactorHandlerProvider handlerProvider, TokenManagerProvider tokenManagerProvider, MessageSerializer messageSerializer, SenderSettleMode senderSettleMode, ReceiverSettleMode receiverSettleMode) { this.connectionOptions = connectionOptions; this.reactorProvider = reactorProvider; this.connectionId = connectionId; this.logger = new ClientLogger(ReactorConnection.class, createContextWithConnectionId(connectionId)); this.handlerProvider = handlerProvider; this.tokenManagerProvider = Objects.requireNonNull(tokenManagerProvider, "'tokenManagerProvider' cannot be null."); this.messageSerializer = messageSerializer; this.handler = handlerProvider.createConnectionHandler(connectionId, connectionOptions); this.retryPolicy = RetryUtil.getRetryPolicy(connectionOptions.getRetry()); this.operationTimeout = connectionOptions.getRetry().getTryTimeout(); this.senderSettleMode = senderSettleMode; this.receiverSettleMode = receiverSettleMode; this.connectionMono = Mono.fromCallable(this::getOrCreateConnection) .flatMap(reactorConnection -> { final Mono<AmqpEndpointState> activeEndpoint = getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(operationTimeout, Mono.error(() -> new AmqpException(true, String.format( "Connection '%s' not opened within AmqpRetryOptions.tryTimeout(): %s", connectionId, operationTimeout), handler.getErrorContext()))); return activeEndpoint.thenReturn(reactorConnection); }) .doOnError(error -> { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already disposed: Error occurred while connection was starting.", error); } else { closeAsync(new AmqpShutdownSignal(false, false, "Error occurred while connection was starting. Error: " + error)).subscribe(); } }); this.endpointStates = this.handler.getEndpointStates() .takeUntilOther(shutdownSignalSink.asMono()) .map(state -> { logger.verbose("State {}", state); return AmqpEndpointStateUtil.getConnectionState(state); }) .onErrorResume(error -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to error."); return closeAsync(new AmqpShutdownSignal(false, false, error.getMessage())).then(Mono.error(error)); } else { return Mono.error(error); } }) .doOnComplete(() -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to connection close."); closeAsync(new AmqpShutdownSignal(false, false, "Connection handler closed.")).subscribe(); } }) .cache(1); this.subscriptions = Disposables.composite(this.endpointStates.subscribe()); } /** * {@inheritDoc} */ @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } /** * Gets the shutdown signal associated with this connection. When it emits, the underlying connection is closed. * * @return Shutdown signals associated with this connection. It emits a signal when the underlying connection is * closed. */ @Override public Flux<AmqpShutdownSignal> getShutdownSignals() { return shutdownSignalSink.asMono().cache().flux(); } @Override public Mono<AmqpManagementNode> getManagementNode(String entityPath) { return Mono.defer(() -> { if (isDisposed()) { return Mono.error(logger.atError() .addKeyValue(ENTITY_PATH_KEY, entityPath) .log(Exceptions.propagate(new IllegalStateException("Connection is disposed. Cannot get management instance.")))); } final AmqpManagementNode existing = managementNodes.get(entityPath); if (existing != null) { return Mono.just(existing); } final TokenManager tokenManager = new AzureTokenManagerProvider(connectionOptions.getAuthorizationType(), connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getAuthorizationScope()) .getTokenManager(getClaimsBasedSecurityNode(), entityPath); return tokenManager.authorize().thenReturn(managementNodes.compute(entityPath, (key, current) -> { if (current != null) { logger.info("A management node exists already, returning it."); tokenManager.close(); return current; } final String sessionName = entityPath + "-" + MANAGEMENT_SESSION_NAME; final String linkName = entityPath + "-" + MANAGEMENT_LINK_NAME; final String address = entityPath + "/" + MANAGEMENT_ADDRESS; logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .addKeyValue("address", address) .log("Creating management node."); final AmqpChannelProcessor<RequestResponseChannel> requestResponseChannel = createRequestResponseChannel(sessionName, linkName, address); return new ManagementChannel(requestResponseChannel, getFullyQualifiedNamespace(), entityPath, tokenManager); })); }); } /** * {@inheritDoc} */ @Override public Mono<ClaimsBasedSecurityNode> getClaimsBasedSecurityNode() { return connectionMono.then(Mono.fromCallable(() -> getOrCreateCBSNode())); } @Override public String getId() { return connectionId; } /** * {@inheritDoc} */ @Override public String getFullyQualifiedNamespace() { return handler.getHostname(); } /** * {@inheritDoc} */ @Override public int getMaxFrameSize() { return handler.getMaxFrameSize(); } /** * {@inheritDoc} */ @Override public Map<String, Object> getConnectionProperties() { return handler.getConnectionProperties(); } /** * {@inheritDoc} */ @Override public Mono<AmqpSession> createSession(String sessionName) { return connectionMono.map(connection -> { final SessionSubscription sessionSubscription = sessionMap.computeIfAbsent(sessionName, key -> { final SessionHandler sessionHandler = handlerProvider.createSessionHandler(connectionId, getFullyQualifiedNamespace(), key, connectionOptions.getRetry().getTryTimeout()); final Session session = connection.session(); BaseHandler.setHandler(session, sessionHandler); final AmqpSession amqpSession = createSession(key, session, sessionHandler); final Disposable subscription = amqpSession.getEndpointStates() .subscribe(state -> { }, error -> { if (isDisposed.get()) { return; } logger.atInfo() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Error occurred. Removing and disposing session", error); removeSession(key); }, () -> { if (isDisposed.get()) { return; } logger.atVerbose() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Complete. Removing and disposing session."); removeSession(key); }); return new SessionSubscription(amqpSession, subscription); }); return sessionSubscription; }).flatMap(sessionSubscription -> { final Mono<AmqpEndpointState> activeSession = sessionSubscription.getSession().getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(retryPolicy.getRetryOptions().getTryTimeout(), Mono.error(() -> new AmqpException(true, String.format("connectionId[%s] sessionName[%s] Timeout waiting for session to be active.", connectionId, sessionName), handler.getErrorContext()))); return activeSession.thenReturn(sessionSubscription.getSession()); }); } /** * Creates a new AMQP session with the given parameters. * * @param sessionName Name of the AMQP session. * @param session The reactor session associated with this session. * @param handler Session handler for the reactor session. * * @return A new instance of AMQP session. */ protected AmqpSession createSession(String sessionName, Session session, SessionHandler handler) { return new ReactorSession(this, session, handler, sessionName, reactorProvider, handlerProvider, getClaimsBasedSecurityNode(), tokenManagerProvider, messageSerializer, connectionOptions.getRetry()); } /** * {@inheritDoc} */ @Override public boolean removeSession(String sessionName) { if (sessionName == null) { return false; } final SessionSubscription removed = sessionMap.remove(sessionName); if (removed != null) { removed.dispose(); } return removed != null; } @Override public boolean isDisposed() { return isDisposed.get(); } /** * {@inheritDoc} */ @Override public void dispose() { final Duration timeout = operationTimeout.plus(operationTimeout); closeAsync().block(timeout); } /** * Gets the active AMQP connection for this instance. * * @return The AMQP connection. * * @throws AmqpException if the {@link Connection} was not transitioned to an active state within the given * {@link AmqpRetryOptions */ protected Mono<Connection> getReactorConnection() { return connectionMono; } /** * Creates a bidirectional link between the message broker and the client. * * @param sessionName Name of the session. * @param linkName Name of the link. * @param entityPath Address to the message broker. * * @return A new {@link RequestResponseChannel} to communicate with the message broker. */ protected AmqpChannelProcessor<RequestResponseChannel> createRequestResponseChannel(String sessionName, String linkName, String entityPath) { Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); final Flux<RequestResponseChannel> createChannel = createSession(sessionName) .cast(ReactorSession.class) .map(reactorSession -> new RequestResponseChannel(this, getId(), getFullyQualifiedNamespace(), linkName, entityPath, reactorSession.session(), connectionOptions.getRetry(), handlerProvider, reactorProvider, messageSerializer, senderSettleMode, receiverSettleMode)) .doOnNext(e -> { logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .log("Emitting new response channel."); }) .repeat() .takeUntilOther(shutdownSignalSink.asMono()); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(ENTITY_PATH_KEY, entityPath); return createChannel .subscribeWith(new AmqpChannelProcessor<>(getFullyQualifiedNamespace(), channel -> channel.getEndpointStates(), retryPolicy, loggingContext)); } @Override public Mono<Void> closeAsync() { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already closed. Not disposing again."); return isClosedMono.asMono(); } return closeAsync(new AmqpShutdownSignal(false, true, "Disposed by client.")); } private synchronized void closeConnectionWork() { if (connection == null) { isClosedMono.emitEmpty((signalType, emitResult) -> { addSignalTypeAndResult(logger.atInfo(), signalType, emitResult) .log("Unable to complete closeMono."); return false; }); return; } connection.close(); handler.close(); final ArrayList<Mono<Void>> closingSessions = new ArrayList<>(); sessionMap.values().forEach(link -> closingSessions.add(link.isClosed())); final Mono<Void> closedExecutor = executor != null ? Mono.defer(() -> { synchronized (this) { logger.info("Closing executor."); return executor.closeAsync(); } }) : Mono.empty(); final Mono<Void> closeSessionAndExecutorMono = Mono.when(closingSessions) .timeout(operationTimeout) .onErrorResume(error -> { logger.info("Timed out waiting for all sessions to close."); return Mono.empty(); }) .then(closedExecutor) .then(Mono.fromRunnable(() -> { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result) .log("Unable to emit connection closed signal."); return false; }); subscriptions.dispose(); })); subscriptions.add(closeSessionAndExecutorMono.subscribe()); } private synchronized ClaimsBasedSecurityNode getOrCreateCBSNode() { if (cbsChannel == null) { logger.info("Setting CBS channel."); cbsChannelProcessor = createRequestResponseChannel(CBS_SESSION_NAME, CBS_LINK_NAME, CBS_ADDRESS); cbsChannel = new ClaimsBasedSecurityChannel( cbsChannelProcessor, connectionOptions.getTokenCredential(), connectionOptions.getAuthorizationType(), connectionOptions.getRetry()); } return cbsChannel; } private synchronized Connection getOrCreateConnection() throws IOException { if (connection == null) { logger.atInfo() .addKeyValue(HOSTNAME_KEY, handler.getHostname()) .addKeyValue("port", handler.getProtocolPort()) .log("Creating and starting connection."); final Reactor reactor = reactorProvider.createReactor(connectionId, handler.getMaxFrameSize()); connection = reactor.connectionToHost(handler.getHostname(), handler.getProtocolPort(), handler); final ReactorExceptionHandler reactorExceptionHandler = new ReactorExceptionHandler(); final Duration timeoutDivided = connectionOptions.getRetry().getTryTimeout().dividedBy(2); final Duration pendingTasksDuration = ClientConstants.SERVER_BUSY_WAIT_TIME.compareTo(timeoutDivided) < 0 ? ClientConstants.SERVER_BUSY_WAIT_TIME : timeoutDivided; final Scheduler scheduler = Schedulers.newSingle("reactor-executor"); executor = new ReactorExecutor(reactor, scheduler, connectionId, reactorExceptionHandler, pendingTasksDuration, connectionOptions.getFullyQualifiedNamespace()); final Mono<Void> executorCloseMono = Mono.defer(() -> { synchronized (this) { return executor.closeAsync(); } }); reactorProvider.getReactorDispatcher().getShutdownSignal() .flatMap(signal -> { reactorExceptionHandler.onConnectionShutdown(signal); return executorCloseMono; }) .onErrorResume(error -> { reactorExceptionHandler.onConnectionError(error); return executorCloseMono; }) .subscribe(); executor.start(); } return connection; } private final class ReactorExceptionHandler extends AmqpExceptionHandler { private ReactorExceptionHandler() { super(); } @Override public void onConnectionError(Throwable exception) { logger.atInfo() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionError, Starting new reactor", exception); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onReactorError: Disposing."); closeAsync(new AmqpShutdownSignal(false, false, "onReactorError: " + exception.toString())) .subscribe(); } } @Override void onConnectionShutdown(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown. Shutting down."); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown: disposing."); closeAsync(shutdownSignal).subscribe(); } } } private static final class SessionSubscription { private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AmqpSession session; private final Disposable subscription; private SessionSubscription(AmqpSession session, Disposable subscription) { this.session = session; this.subscription = subscription; } private AmqpSession getSession() { return session; } private void dispose() { if (isDisposed.getAndSet(true)) { return; } if (session instanceof ReactorSession) { ((ReactorSession) session).closeAsync("Closing session.", null, true) .subscribe(); } else { session.dispose(); } subscription.dispose(); } private Mono<Void> isClosed() { if (session instanceof ReactorSession) { return ((ReactorSession) session).isClosed(); } else { return Mono.empty(); } } } }
class ReactorConnection implements AmqpConnection { private static final String CBS_SESSION_NAME = "cbs-session"; private static final String CBS_ADDRESS = "$cbs"; private static final String CBS_LINK_NAME = "cbs"; private static final String MANAGEMENT_SESSION_NAME = "mgmt-session"; private static final String MANAGEMENT_ADDRESS = "$management"; private static final String MANAGEMENT_LINK_NAME = "mgmt"; private final ClientLogger logger; private final ConcurrentMap<String, SessionSubscription> sessionMap = new ConcurrentHashMap<>(); private final ConcurrentHashMap<String, AmqpManagementNode> managementNodes = new ConcurrentHashMap<>(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.One<AmqpShutdownSignal> shutdownSignalSink = Sinks.one(); private final Flux<AmqpEndpointState> endpointStates; private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final String connectionId; private final Mono<Connection> connectionMono; private final ConnectionHandler handler; private final ReactorHandlerProvider handlerProvider; private final TokenManagerProvider tokenManagerProvider; private final MessageSerializer messageSerializer; private final ConnectionOptions connectionOptions; private final ReactorProvider reactorProvider; private final AmqpRetryPolicy retryPolicy; private final SenderSettleMode senderSettleMode; private final ReceiverSettleMode receiverSettleMode; private final Duration operationTimeout; private final Composite subscriptions; private ReactorExecutor executor; private volatile ClaimsBasedSecurityChannel cbsChannel; private volatile AmqpChannelProcessor<RequestResponseChannel> cbsChannelProcessor; private volatile Connection connection; /** * Creates a new AMQP connection that uses proton-j. * * @param connectionId Identifier for the connection. * @param connectionOptions A set of options used to create the AMQP connection. * @param reactorProvider Provides proton-j Reactor instances. * @param handlerProvider Provides {@link BaseHandler} to listen to proton-j reactor events. * @param tokenManagerProvider Provides the appropriate token manager to authorize with CBS node. * @param messageSerializer Serializer to translate objects to and from proton-j {@link Message messages}. * @param senderSettleMode to set as {@link SenderSettleMode} on sender. * @param receiverSettleMode to set as {@link ReceiverSettleMode} on receiver. */ public ReactorConnection(String connectionId, ConnectionOptions connectionOptions, ReactorProvider reactorProvider, ReactorHandlerProvider handlerProvider, TokenManagerProvider tokenManagerProvider, MessageSerializer messageSerializer, SenderSettleMode senderSettleMode, ReceiverSettleMode receiverSettleMode) { this.connectionOptions = connectionOptions; this.reactorProvider = reactorProvider; this.connectionId = connectionId; this.logger = new ClientLogger(ReactorConnection.class, createContextWithConnectionId(connectionId)); this.handlerProvider = handlerProvider; this.tokenManagerProvider = Objects.requireNonNull(tokenManagerProvider, "'tokenManagerProvider' cannot be null."); this.messageSerializer = messageSerializer; this.handler = handlerProvider.createConnectionHandler(connectionId, connectionOptions); this.retryPolicy = RetryUtil.getRetryPolicy(connectionOptions.getRetry()); this.operationTimeout = connectionOptions.getRetry().getTryTimeout(); this.senderSettleMode = senderSettleMode; this.receiverSettleMode = receiverSettleMode; this.connectionMono = Mono.fromCallable(this::getOrCreateConnection) .flatMap(reactorConnection -> { final Mono<AmqpEndpointState> activeEndpoint = getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(operationTimeout, Mono.error(() -> new AmqpException(true, String.format( "Connection '%s' not opened within AmqpRetryOptions.tryTimeout(): %s", connectionId, operationTimeout), handler.getErrorContext()))); return activeEndpoint.thenReturn(reactorConnection); }) .doOnError(error -> { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already disposed: Error occurred while connection was starting.", error); } else { closeAsync(new AmqpShutdownSignal(false, false, "Error occurred while connection was starting. Error: " + error)).subscribe(); } }); this.endpointStates = this.handler.getEndpointStates() .takeUntilOther(shutdownSignalSink.asMono()) .map(state -> { logger.verbose("State {}", state); return AmqpEndpointStateUtil.getConnectionState(state); }) .onErrorResume(error -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to error."); return closeAsync(new AmqpShutdownSignal(false, false, error.getMessage())).then(Mono.error(error)); } else { return Mono.error(error); } }) .doOnComplete(() -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to connection close."); closeAsync(new AmqpShutdownSignal(false, false, "Connection handler closed.")).subscribe(); } }) .cache(1); this.subscriptions = Disposables.composite(this.endpointStates.subscribe()); } /** * {@inheritDoc} */ @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } /** * Gets the shutdown signal associated with this connection. When it emits, the underlying connection is closed. * * @return Shutdown signals associated with this connection. It emits a signal when the underlying connection is * closed. */ @Override public Flux<AmqpShutdownSignal> getShutdownSignals() { return shutdownSignalSink.asMono().cache().flux(); } @Override public Mono<AmqpManagementNode> getManagementNode(String entityPath) { return Mono.defer(() -> { if (isDisposed()) { return Mono.error(logger.atError() .addKeyValue(ENTITY_PATH_KEY, entityPath) .log(Exceptions.propagate(new IllegalStateException("Connection is disposed. Cannot get management instance.")))); } final AmqpManagementNode existing = managementNodes.get(entityPath); if (existing != null) { return Mono.just(existing); } final TokenManager tokenManager = new AzureTokenManagerProvider(connectionOptions.getAuthorizationType(), connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getAuthorizationScope()) .getTokenManager(getClaimsBasedSecurityNode(), entityPath); return tokenManager.authorize().thenReturn(managementNodes.compute(entityPath, (key, current) -> { if (current != null) { logger.info("A management node exists already, returning it."); tokenManager.close(); return current; } final String sessionName = entityPath + "-" + MANAGEMENT_SESSION_NAME; final String linkName = entityPath + "-" + MANAGEMENT_LINK_NAME; final String address = entityPath + "/" + MANAGEMENT_ADDRESS; logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .addKeyValue("address", address) .log("Creating management node."); final AmqpChannelProcessor<RequestResponseChannel> requestResponseChannel = createRequestResponseChannel(sessionName, linkName, address); return new ManagementChannel(requestResponseChannel, getFullyQualifiedNamespace(), entityPath, tokenManager); })); }); } /** * {@inheritDoc} */ @Override public Mono<ClaimsBasedSecurityNode> getClaimsBasedSecurityNode() { return connectionMono.then(Mono.fromCallable(() -> getOrCreateCBSNode())); } @Override public String getId() { return connectionId; } /** * {@inheritDoc} */ @Override public String getFullyQualifiedNamespace() { return handler.getHostname(); } /** * {@inheritDoc} */ @Override public int getMaxFrameSize() { return handler.getMaxFrameSize(); } /** * {@inheritDoc} */ @Override public Map<String, Object> getConnectionProperties() { return handler.getConnectionProperties(); } /** * {@inheritDoc} */ @Override public Mono<AmqpSession> createSession(String sessionName) { return connectionMono.map(connection -> { final SessionSubscription sessionSubscription = sessionMap.computeIfAbsent(sessionName, key -> { final SessionHandler sessionHandler = handlerProvider.createSessionHandler(connectionId, getFullyQualifiedNamespace(), key, connectionOptions.getRetry().getTryTimeout()); final Session session = connection.session(); BaseHandler.setHandler(session, sessionHandler); final AmqpSession amqpSession = createSession(key, session, sessionHandler); final Disposable subscription = amqpSession.getEndpointStates() .subscribe(state -> { }, error -> { if (isDisposed.get()) { return; } logger.atInfo() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Error occurred. Removing and disposing session", error); removeSession(key); }, () -> { if (isDisposed.get()) { return; } logger.atVerbose() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Complete. Removing and disposing session."); removeSession(key); }); return new SessionSubscription(amqpSession, subscription); }); return sessionSubscription; }).flatMap(sessionSubscription -> { final Mono<AmqpEndpointState> activeSession = sessionSubscription.getSession().getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(retryPolicy.getRetryOptions().getTryTimeout(), Mono.error(() -> new AmqpException(true, String.format("connectionId[%s] sessionName[%s] Timeout waiting for session to be active.", connectionId, sessionName), handler.getErrorContext()))); return activeSession.thenReturn(sessionSubscription.getSession()); }); } /** * Creates a new AMQP session with the given parameters. * * @param sessionName Name of the AMQP session. * @param session The reactor session associated with this session. * @param handler Session handler for the reactor session. * * @return A new instance of AMQP session. */ protected AmqpSession createSession(String sessionName, Session session, SessionHandler handler) { return new ReactorSession(this, session, handler, sessionName, reactorProvider, handlerProvider, getClaimsBasedSecurityNode(), tokenManagerProvider, messageSerializer, connectionOptions.getRetry()); } /** * {@inheritDoc} */ @Override public boolean removeSession(String sessionName) { if (sessionName == null) { return false; } final SessionSubscription removed = sessionMap.remove(sessionName); if (removed != null) { removed.dispose(); } return removed != null; } @Override public boolean isDisposed() { return isDisposed.get(); } /** * {@inheritDoc} */ @Override public void dispose() { final Duration timeout = operationTimeout.plus(operationTimeout); closeAsync().block(timeout); } /** * Gets the active AMQP connection for this instance. * * @return The AMQP connection. * * @throws AmqpException if the {@link Connection} was not transitioned to an active state within the given * {@link AmqpRetryOptions */ protected Mono<Connection> getReactorConnection() { return connectionMono; } /** * Creates a bidirectional link between the message broker and the client. * * @param sessionName Name of the session. * @param linkName Name of the link. * @param entityPath Address to the message broker. * * @return A new {@link RequestResponseChannel} to communicate with the message broker. */ protected AmqpChannelProcessor<RequestResponseChannel> createRequestResponseChannel(String sessionName, String linkName, String entityPath) { Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); final Flux<RequestResponseChannel> createChannel = createSession(sessionName) .cast(ReactorSession.class) .map(reactorSession -> new RequestResponseChannel(this, getId(), getFullyQualifiedNamespace(), linkName, entityPath, reactorSession.session(), connectionOptions.getRetry(), handlerProvider, reactorProvider, messageSerializer, senderSettleMode, receiverSettleMode)) .doOnNext(e -> { logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .log("Emitting new response channel."); }) .repeat(() -> !this.isDisposed()); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(ENTITY_PATH_KEY, entityPath); return createChannel .subscribeWith(new AmqpChannelProcessor<>(getFullyQualifiedNamespace(), channel -> channel.getEndpointStates(), retryPolicy, loggingContext)); } @Override public Mono<Void> closeAsync() { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already closed. Not disposing again."); return isClosedMono.asMono(); } return closeAsync(new AmqpShutdownSignal(false, true, "Disposed by client.")); } private synchronized void closeConnectionWork() { if (connection == null) { isClosedMono.emitEmpty((signalType, emitResult) -> { addSignalTypeAndResult(logger.atInfo(), signalType, emitResult) .log("Unable to complete closeMono."); return false; }); return; } connection.close(); handler.close(); final ArrayList<Mono<Void>> closingSessions = new ArrayList<>(); sessionMap.values().forEach(link -> closingSessions.add(link.isClosed())); final Mono<Void> closedExecutor = executor != null ? Mono.defer(() -> { synchronized (this) { logger.info("Closing executor."); return executor.closeAsync(); } }) : Mono.empty(); final Mono<Void> closeSessionAndExecutorMono = Mono.when(closingSessions) .timeout(operationTimeout) .onErrorResume(error -> { logger.info("Timed out waiting for all sessions to close."); return Mono.empty(); }) .then(closedExecutor) .then(Mono.fromRunnable(() -> { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result) .log("Unable to emit connection closed signal."); return false; }); subscriptions.dispose(); })); subscriptions.add(closeSessionAndExecutorMono.subscribe()); } private synchronized ClaimsBasedSecurityNode getOrCreateCBSNode() { if (cbsChannel == null) { logger.info("Setting CBS channel."); cbsChannelProcessor = createRequestResponseChannel(CBS_SESSION_NAME, CBS_LINK_NAME, CBS_ADDRESS); cbsChannel = new ClaimsBasedSecurityChannel( cbsChannelProcessor, connectionOptions.getTokenCredential(), connectionOptions.getAuthorizationType(), connectionOptions.getRetry()); } return cbsChannel; } private synchronized Connection getOrCreateConnection() throws IOException { if (connection == null) { logger.atInfo() .addKeyValue(HOSTNAME_KEY, handler.getHostname()) .addKeyValue("port", handler.getProtocolPort()) .log("Creating and starting connection."); final Reactor reactor = reactorProvider.createReactor(connectionId, handler.getMaxFrameSize()); connection = reactor.connectionToHost(handler.getHostname(), handler.getProtocolPort(), handler); final ReactorExceptionHandler reactorExceptionHandler = new ReactorExceptionHandler(); final Duration timeoutDivided = connectionOptions.getRetry().getTryTimeout().dividedBy(2); final Duration pendingTasksDuration = ClientConstants.SERVER_BUSY_WAIT_TIME.compareTo(timeoutDivided) < 0 ? ClientConstants.SERVER_BUSY_WAIT_TIME : timeoutDivided; final Scheduler scheduler = Schedulers.newSingle("reactor-executor"); executor = new ReactorExecutor(reactor, scheduler, connectionId, reactorExceptionHandler, pendingTasksDuration, connectionOptions.getFullyQualifiedNamespace()); final Mono<Void> executorCloseMono = Mono.defer(() -> { synchronized (this) { return executor.closeAsync(); } }); reactorProvider.getReactorDispatcher().getShutdownSignal() .flatMap(signal -> { reactorExceptionHandler.onConnectionShutdown(signal); return executorCloseMono; }) .onErrorResume(error -> { reactorExceptionHandler.onConnectionError(error); return executorCloseMono; }) .subscribe(); executor.start(); } return connection; } private final class ReactorExceptionHandler extends AmqpExceptionHandler { private ReactorExceptionHandler() { super(); } @Override public void onConnectionError(Throwable exception) { logger.atInfo() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionError, Starting new reactor", exception); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onReactorError: Disposing."); closeAsync(new AmqpShutdownSignal(false, false, "onReactorError: " + exception.toString())) .subscribe(); } } @Override void onConnectionShutdown(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown. Shutting down."); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown: disposing."); closeAsync(shutdownSignal).subscribe(); } } } private static final class SessionSubscription { private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AmqpSession session; private final Disposable subscription; private SessionSubscription(AmqpSession session, Disposable subscription) { this.session = session; this.subscription = subscription; } private AmqpSession getSession() { return session; } private void dispose() { if (isDisposed.getAndSet(true)) { return; } if (session instanceof ReactorSession) { ((ReactorSession) session).closeAsync("Closing session.", null, true) .subscribe(); } else { session.dispose(); } subscription.dispose(); } private Mono<Void> isClosed() { if (session instanceof ReactorSession) { return ((ReactorSession) session).isClosed(); } else { return Mono.empty(); } } } }
> they are using the same ReactorReceiver, so "old" ReactorRecevier closure will block "new" ReactorReceiver, right? I read the RCA in #29212 and got the answer, the ReactorReceiver(s) are control by AmqpReceiveLinkProcessors, if old "ReactorReceiver" haven't closed, the new one won't be created. As the closing timeout will be added for ReactorReceiver in PR #29201, I am afraid if ReactorReceiver also has the timeout issue when it closed (Detach frame be sent later than Close frame). So I am thinking about a way to emit the shutdown ReactorSession and RectorSender/ReactorReceiver signal separately. Then we can close RectorSender/ReactorReceiver earlier.
Mono<Void> closeAsync(AmqpShutdownSignal shutdownSignal) { final Mono<Void> emitShutDownSignalOperation = Mono.fromRunnable(() -> { addShutdownSignal(logger.atInfo(), shutdownSignal).log("Disposing of ReactorConnection."); final Sinks.EmitResult result = shutdownSignalSink.tryEmitValue(shutdownSignal); if (result.isFailure()) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(EMIT_RESULT_KEY, result) .log("Unable to emit shutdown signal."); } }); final Mono<Void> cbsCloseOperation; if (cbsChannelProcessor != null) { cbsCloseOperation = cbsChannelProcessor.flatMap(channel -> channel.closeAsync()); } else { cbsCloseOperation = Mono.empty(); } final Mono<Void> managementNodeCloseOperations = Mono.when( Flux.fromStream(managementNodes.values().stream()).flatMap(node -> node.closeAsync())); final Mono<Void> closeReactor = Mono.fromRunnable(() -> { logger.verbose("Scheduling closeConnection work."); final ReactorDispatcher dispatcher = reactorProvider.getReactorDispatcher(); if (dispatcher != null) { try { dispatcher.invoke(() -> closeConnectionWork()); } catch (IOException e) { logger.warning("IOException while scheduling closeConnection work. Manually disposing.", e); closeConnectionWork(); } catch (RejectedExecutionException e) { logger.info("Could not schedule closeConnection work. Manually disposing."); closeConnectionWork(); } } else { closeConnectionWork(); } }); return Mono.whenDelayError( cbsCloseOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed CBS node.")), managementNodeCloseOperations.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed management nodes."))) .then(emitShutDownSignalOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Emitted connection shutdown signal. "))) .then(closeReactor.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed reactor dispatcher."))) .then(isClosedMono.asMono()); }
logger.atVerbose()
Mono<Void> closeAsync(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal).log("Disposing of ReactorConnection."); final Sinks.EmitResult result = shutdownSignalSink.tryEmitValue(shutdownSignal); if (result.isFailure()) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(EMIT_RESULT_KEY, result) .log("Unable to emit shutdown signal."); } final Mono<Void> cbsCloseOperation; if (cbsChannelProcessor != null) { cbsCloseOperation = cbsChannelProcessor.flatMap(channel -> channel.closeAsync()); } else { cbsCloseOperation = Mono.empty(); } final Mono<Void> managementNodeCloseOperations = Mono.when( Flux.fromStream(managementNodes.values().stream()).flatMap(node -> node.closeAsync())); final Mono<Void> closeReactor = Mono.fromRunnable(() -> { logger.verbose("Scheduling closeConnection work."); final ReactorDispatcher dispatcher = reactorProvider.getReactorDispatcher(); if (dispatcher != null) { try { dispatcher.invoke(() -> closeConnectionWork()); } catch (IOException e) { logger.warning("IOException while scheduling closeConnection work. Manually disposing.", e); closeConnectionWork(); } catch (RejectedExecutionException e) { logger.info("Could not schedule closeConnection work. Manually disposing."); closeConnectionWork(); } } else { closeConnectionWork(); } }); return Mono.whenDelayError( cbsCloseOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed CBS node.")), managementNodeCloseOperations.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed management nodes."))) .then(closeReactor.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed reactor dispatcher."))) .then(isClosedMono.asMono()); }
class ReactorConnection implements AmqpConnection { private static final String CBS_SESSION_NAME = "cbs-session"; private static final String CBS_ADDRESS = "$cbs"; private static final String CBS_LINK_NAME = "cbs"; private static final String MANAGEMENT_SESSION_NAME = "mgmt-session"; private static final String MANAGEMENT_ADDRESS = "$management"; private static final String MANAGEMENT_LINK_NAME = "mgmt"; private final ClientLogger logger; private final ConcurrentMap<String, SessionSubscription> sessionMap = new ConcurrentHashMap<>(); private final ConcurrentHashMap<String, AmqpManagementNode> managementNodes = new ConcurrentHashMap<>(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.One<AmqpShutdownSignal> shutdownSignalSink = Sinks.one(); private final Flux<AmqpEndpointState> endpointStates; private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final String connectionId; private final Mono<Connection> connectionMono; private final ConnectionHandler handler; private final ReactorHandlerProvider handlerProvider; private final TokenManagerProvider tokenManagerProvider; private final MessageSerializer messageSerializer; private final ConnectionOptions connectionOptions; private final ReactorProvider reactorProvider; private final AmqpRetryPolicy retryPolicy; private final SenderSettleMode senderSettleMode; private final ReceiverSettleMode receiverSettleMode; private final Duration operationTimeout; private final Composite subscriptions; private ReactorExecutor executor; private volatile ClaimsBasedSecurityChannel cbsChannel; private volatile AmqpChannelProcessor<RequestResponseChannel> cbsChannelProcessor; private volatile Connection connection; /** * Creates a new AMQP connection that uses proton-j. * * @param connectionId Identifier for the connection. * @param connectionOptions A set of options used to create the AMQP connection. * @param reactorProvider Provides proton-j Reactor instances. * @param handlerProvider Provides {@link BaseHandler} to listen to proton-j reactor events. * @param tokenManagerProvider Provides the appropriate token manager to authorize with CBS node. * @param messageSerializer Serializer to translate objects to and from proton-j {@link Message messages}. * @param senderSettleMode to set as {@link SenderSettleMode} on sender. * @param receiverSettleMode to set as {@link ReceiverSettleMode} on receiver. */ public ReactorConnection(String connectionId, ConnectionOptions connectionOptions, ReactorProvider reactorProvider, ReactorHandlerProvider handlerProvider, TokenManagerProvider tokenManagerProvider, MessageSerializer messageSerializer, SenderSettleMode senderSettleMode, ReceiverSettleMode receiverSettleMode) { this.connectionOptions = connectionOptions; this.reactorProvider = reactorProvider; this.connectionId = connectionId; this.logger = new ClientLogger(ReactorConnection.class, createContextWithConnectionId(connectionId)); this.handlerProvider = handlerProvider; this.tokenManagerProvider = Objects.requireNonNull(tokenManagerProvider, "'tokenManagerProvider' cannot be null."); this.messageSerializer = messageSerializer; this.handler = handlerProvider.createConnectionHandler(connectionId, connectionOptions); this.retryPolicy = RetryUtil.getRetryPolicy(connectionOptions.getRetry()); this.operationTimeout = connectionOptions.getRetry().getTryTimeout(); this.senderSettleMode = senderSettleMode; this.receiverSettleMode = receiverSettleMode; this.connectionMono = Mono.fromCallable(this::getOrCreateConnection) .flatMap(reactorConnection -> { final Mono<AmqpEndpointState> activeEndpoint = getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(operationTimeout, Mono.error(() -> new AmqpException(true, String.format( "Connection '%s' not opened within AmqpRetryOptions.tryTimeout(): %s", connectionId, operationTimeout), handler.getErrorContext()))); return activeEndpoint.thenReturn(reactorConnection); }) .doOnError(error -> { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already disposed: Error occurred while connection was starting.", error); } else { closeAsync(new AmqpShutdownSignal(false, false, "Error occurred while connection was starting. Error: " + error)).subscribe(); } }); this.endpointStates = this.handler.getEndpointStates() .takeUntilOther(shutdownSignalSink.asMono()) .map(state -> { logger.verbose("State {}", state); return AmqpEndpointStateUtil.getConnectionState(state); }) .onErrorResume(error -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to error."); return closeAsync(new AmqpShutdownSignal(false, false, error.getMessage())).then(Mono.error(error)); } else { return Mono.error(error); } }) .doOnComplete(() -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to connection close."); closeAsync(new AmqpShutdownSignal(false, false, "Connection handler closed.")).subscribe(); } }) .cache(1); this.subscriptions = Disposables.composite(this.endpointStates.subscribe()); } /** * {@inheritDoc} */ @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } /** * Gets the shutdown signal associated with this connection. When it emits, the underlying connection is closed. * * @return Shutdown signals associated with this connection. It emits a signal when the underlying connection is * closed. */ @Override public Flux<AmqpShutdownSignal> getShutdownSignals() { return shutdownSignalSink.asMono().cache().flux(); } @Override public Mono<AmqpManagementNode> getManagementNode(String entityPath) { return Mono.defer(() -> { if (isDisposed()) { return Mono.error(logger.atError() .addKeyValue(ENTITY_PATH_KEY, entityPath) .log(Exceptions.propagate(new IllegalStateException("Connection is disposed. Cannot get management instance.")))); } final AmqpManagementNode existing = managementNodes.get(entityPath); if (existing != null) { return Mono.just(existing); } final TokenManager tokenManager = new AzureTokenManagerProvider(connectionOptions.getAuthorizationType(), connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getAuthorizationScope()) .getTokenManager(getClaimsBasedSecurityNode(), entityPath); return tokenManager.authorize().thenReturn(managementNodes.compute(entityPath, (key, current) -> { if (current != null) { logger.info("A management node exists already, returning it."); tokenManager.close(); return current; } final String sessionName = entityPath + "-" + MANAGEMENT_SESSION_NAME; final String linkName = entityPath + "-" + MANAGEMENT_LINK_NAME; final String address = entityPath + "/" + MANAGEMENT_ADDRESS; logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .addKeyValue("address", address) .log("Creating management node."); final AmqpChannelProcessor<RequestResponseChannel> requestResponseChannel = createRequestResponseChannel(sessionName, linkName, address); return new ManagementChannel(requestResponseChannel, getFullyQualifiedNamespace(), entityPath, tokenManager); })); }); } /** * {@inheritDoc} */ @Override public Mono<ClaimsBasedSecurityNode> getClaimsBasedSecurityNode() { return connectionMono.then(Mono.fromCallable(() -> getOrCreateCBSNode())); } @Override public String getId() { return connectionId; } /** * {@inheritDoc} */ @Override public String getFullyQualifiedNamespace() { return handler.getHostname(); } /** * {@inheritDoc} */ @Override public int getMaxFrameSize() { return handler.getMaxFrameSize(); } /** * {@inheritDoc} */ @Override public Map<String, Object> getConnectionProperties() { return handler.getConnectionProperties(); } /** * {@inheritDoc} */ @Override public Mono<AmqpSession> createSession(String sessionName) { return connectionMono.map(connection -> { final SessionSubscription sessionSubscription = sessionMap.computeIfAbsent(sessionName, key -> { final SessionHandler sessionHandler = handlerProvider.createSessionHandler(connectionId, getFullyQualifiedNamespace(), key, connectionOptions.getRetry().getTryTimeout()); final Session session = connection.session(); BaseHandler.setHandler(session, sessionHandler); final AmqpSession amqpSession = createSession(key, session, sessionHandler); final Disposable subscription = amqpSession.getEndpointStates() .subscribe(state -> { }, error -> { if (isDisposed.get()) { return; } logger.atInfo() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Error occurred. Removing and disposing session", error); removeSession(key); }, () -> { if (isDisposed.get()) { return; } logger.atVerbose() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Complete. Removing and disposing session."); removeSession(key); }); return new SessionSubscription(amqpSession, subscription); }); return sessionSubscription; }).flatMap(sessionSubscription -> { final Mono<AmqpEndpointState> activeSession = sessionSubscription.getSession().getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(retryPolicy.getRetryOptions().getTryTimeout(), Mono.error(() -> new AmqpException(true, String.format("connectionId[%s] sessionName[%s] Timeout waiting for session to be active.", connectionId, sessionName), handler.getErrorContext()))); return activeSession.thenReturn(sessionSubscription.getSession()); }); } /** * Creates a new AMQP session with the given parameters. * * @param sessionName Name of the AMQP session. * @param session The reactor session associated with this session. * @param handler Session handler for the reactor session. * * @return A new instance of AMQP session. */ protected AmqpSession createSession(String sessionName, Session session, SessionHandler handler) { return new ReactorSession(this, session, handler, sessionName, reactorProvider, handlerProvider, getClaimsBasedSecurityNode(), tokenManagerProvider, messageSerializer, connectionOptions.getRetry()); } /** * {@inheritDoc} */ @Override public boolean removeSession(String sessionName) { if (sessionName == null) { return false; } final SessionSubscription removed = sessionMap.remove(sessionName); if (removed != null) { removed.dispose(); } return removed != null; } @Override public boolean isDisposed() { return isDisposed.get(); } /** * {@inheritDoc} */ @Override public void dispose() { final Duration timeout = operationTimeout.plus(operationTimeout); closeAsync().block(timeout); } /** * Gets the active AMQP connection for this instance. * * @return The AMQP connection. * * @throws AmqpException if the {@link Connection} was not transitioned to an active state within the given * {@link AmqpRetryOptions */ protected Mono<Connection> getReactorConnection() { return connectionMono; } /** * Creates a bidirectional link between the message broker and the client. * * @param sessionName Name of the session. * @param linkName Name of the link. * @param entityPath Address to the message broker. * * @return A new {@link RequestResponseChannel} to communicate with the message broker. */ protected AmqpChannelProcessor<RequestResponseChannel> createRequestResponseChannel(String sessionName, String linkName, String entityPath) { Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); final Flux<RequestResponseChannel> createChannel = createSession(sessionName) .cast(ReactorSession.class) .map(reactorSession -> new RequestResponseChannel(this, getId(), getFullyQualifiedNamespace(), linkName, entityPath, reactorSession.session(), connectionOptions.getRetry(), handlerProvider, reactorProvider, messageSerializer, senderSettleMode, receiverSettleMode)) .doOnNext(e -> { logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .log("Emitting new response channel."); }) .repeat() .takeUntilOther(shutdownSignalSink.asMono()); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(ENTITY_PATH_KEY, entityPath); return createChannel .subscribeWith(new AmqpChannelProcessor<>(getFullyQualifiedNamespace(), channel -> channel.getEndpointStates(), retryPolicy, loggingContext)); } @Override public Mono<Void> closeAsync() { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already closed. Not disposing again."); return isClosedMono.asMono(); } return closeAsync(new AmqpShutdownSignal(false, true, "Disposed by client.")); } private synchronized void closeConnectionWork() { if (connection == null) { isClosedMono.emitEmpty((signalType, emitResult) -> { addSignalTypeAndResult(logger.atInfo(), signalType, emitResult) .log("Unable to complete closeMono."); return false; }); return; } connection.close(); handler.close(); final ArrayList<Mono<Void>> closingSessions = new ArrayList<>(); sessionMap.values().forEach(link -> closingSessions.add(link.isClosed())); final Mono<Void> closedExecutor = executor != null ? Mono.defer(() -> { synchronized (this) { logger.info("Closing executor."); return executor.closeAsync(); } }) : Mono.empty(); final Mono<Void> closeSessionAndExecutorMono = Mono.when(closingSessions) .timeout(operationTimeout) .onErrorResume(error -> { logger.info("Timed out waiting for all sessions to close."); return Mono.empty(); }) .then(closedExecutor) .then(Mono.fromRunnable(() -> { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result) .log("Unable to emit connection closed signal."); return false; }); subscriptions.dispose(); })); subscriptions.add(closeSessionAndExecutorMono.subscribe()); } private synchronized ClaimsBasedSecurityNode getOrCreateCBSNode() { if (cbsChannel == null) { logger.info("Setting CBS channel."); cbsChannelProcessor = createRequestResponseChannel(CBS_SESSION_NAME, CBS_LINK_NAME, CBS_ADDRESS); cbsChannel = new ClaimsBasedSecurityChannel( cbsChannelProcessor, connectionOptions.getTokenCredential(), connectionOptions.getAuthorizationType(), connectionOptions.getRetry()); } return cbsChannel; } private synchronized Connection getOrCreateConnection() throws IOException { if (connection == null) { logger.atInfo() .addKeyValue(HOSTNAME_KEY, handler.getHostname()) .addKeyValue("port", handler.getProtocolPort()) .log("Creating and starting connection."); final Reactor reactor = reactorProvider.createReactor(connectionId, handler.getMaxFrameSize()); connection = reactor.connectionToHost(handler.getHostname(), handler.getProtocolPort(), handler); final ReactorExceptionHandler reactorExceptionHandler = new ReactorExceptionHandler(); final Duration timeoutDivided = connectionOptions.getRetry().getTryTimeout().dividedBy(2); final Duration pendingTasksDuration = ClientConstants.SERVER_BUSY_WAIT_TIME.compareTo(timeoutDivided) < 0 ? ClientConstants.SERVER_BUSY_WAIT_TIME : timeoutDivided; final Scheduler scheduler = Schedulers.newSingle("reactor-executor"); executor = new ReactorExecutor(reactor, scheduler, connectionId, reactorExceptionHandler, pendingTasksDuration, connectionOptions.getFullyQualifiedNamespace()); final Mono<Void> executorCloseMono = Mono.defer(() -> { synchronized (this) { return executor.closeAsync(); } }); reactorProvider.getReactorDispatcher().getShutdownSignal() .flatMap(signal -> { reactorExceptionHandler.onConnectionShutdown(signal); return executorCloseMono; }) .onErrorResume(error -> { reactorExceptionHandler.onConnectionError(error); return executorCloseMono; }) .subscribe(); executor.start(); } return connection; } private final class ReactorExceptionHandler extends AmqpExceptionHandler { private ReactorExceptionHandler() { super(); } @Override public void onConnectionError(Throwable exception) { logger.atInfo() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionError, Starting new reactor", exception); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onReactorError: Disposing."); closeAsync(new AmqpShutdownSignal(false, false, "onReactorError: " + exception.toString())) .subscribe(); } } @Override void onConnectionShutdown(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown. Shutting down."); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown: disposing."); closeAsync(shutdownSignal).subscribe(); } } } private static final class SessionSubscription { private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AmqpSession session; private final Disposable subscription; private SessionSubscription(AmqpSession session, Disposable subscription) { this.session = session; this.subscription = subscription; } private AmqpSession getSession() { return session; } private void dispose() { if (isDisposed.getAndSet(true)) { return; } if (session instanceof ReactorSession) { ((ReactorSession) session).closeAsync("Closing session.", null, true) .subscribe(); } else { session.dispose(); } subscription.dispose(); } private Mono<Void> isClosed() { if (session instanceof ReactorSession) { return ((ReactorSession) session).isClosed(); } else { return Mono.empty(); } } } }
class ReactorConnection implements AmqpConnection { private static final String CBS_SESSION_NAME = "cbs-session"; private static final String CBS_ADDRESS = "$cbs"; private static final String CBS_LINK_NAME = "cbs"; private static final String MANAGEMENT_SESSION_NAME = "mgmt-session"; private static final String MANAGEMENT_ADDRESS = "$management"; private static final String MANAGEMENT_LINK_NAME = "mgmt"; private final ClientLogger logger; private final ConcurrentMap<String, SessionSubscription> sessionMap = new ConcurrentHashMap<>(); private final ConcurrentHashMap<String, AmqpManagementNode> managementNodes = new ConcurrentHashMap<>(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.One<AmqpShutdownSignal> shutdownSignalSink = Sinks.one(); private final Flux<AmqpEndpointState> endpointStates; private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final String connectionId; private final Mono<Connection> connectionMono; private final ConnectionHandler handler; private final ReactorHandlerProvider handlerProvider; private final TokenManagerProvider tokenManagerProvider; private final MessageSerializer messageSerializer; private final ConnectionOptions connectionOptions; private final ReactorProvider reactorProvider; private final AmqpRetryPolicy retryPolicy; private final SenderSettleMode senderSettleMode; private final ReceiverSettleMode receiverSettleMode; private final Duration operationTimeout; private final Composite subscriptions; private ReactorExecutor executor; private volatile ClaimsBasedSecurityChannel cbsChannel; private volatile AmqpChannelProcessor<RequestResponseChannel> cbsChannelProcessor; private volatile Connection connection; /** * Creates a new AMQP connection that uses proton-j. * * @param connectionId Identifier for the connection. * @param connectionOptions A set of options used to create the AMQP connection. * @param reactorProvider Provides proton-j Reactor instances. * @param handlerProvider Provides {@link BaseHandler} to listen to proton-j reactor events. * @param tokenManagerProvider Provides the appropriate token manager to authorize with CBS node. * @param messageSerializer Serializer to translate objects to and from proton-j {@link Message messages}. * @param senderSettleMode to set as {@link SenderSettleMode} on sender. * @param receiverSettleMode to set as {@link ReceiverSettleMode} on receiver. */ public ReactorConnection(String connectionId, ConnectionOptions connectionOptions, ReactorProvider reactorProvider, ReactorHandlerProvider handlerProvider, TokenManagerProvider tokenManagerProvider, MessageSerializer messageSerializer, SenderSettleMode senderSettleMode, ReceiverSettleMode receiverSettleMode) { this.connectionOptions = connectionOptions; this.reactorProvider = reactorProvider; this.connectionId = connectionId; this.logger = new ClientLogger(ReactorConnection.class, createContextWithConnectionId(connectionId)); this.handlerProvider = handlerProvider; this.tokenManagerProvider = Objects.requireNonNull(tokenManagerProvider, "'tokenManagerProvider' cannot be null."); this.messageSerializer = messageSerializer; this.handler = handlerProvider.createConnectionHandler(connectionId, connectionOptions); this.retryPolicy = RetryUtil.getRetryPolicy(connectionOptions.getRetry()); this.operationTimeout = connectionOptions.getRetry().getTryTimeout(); this.senderSettleMode = senderSettleMode; this.receiverSettleMode = receiverSettleMode; this.connectionMono = Mono.fromCallable(this::getOrCreateConnection) .flatMap(reactorConnection -> { final Mono<AmqpEndpointState> activeEndpoint = getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(operationTimeout, Mono.error(() -> new AmqpException(true, String.format( "Connection '%s' not opened within AmqpRetryOptions.tryTimeout(): %s", connectionId, operationTimeout), handler.getErrorContext()))); return activeEndpoint.thenReturn(reactorConnection); }) .doOnError(error -> { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already disposed: Error occurred while connection was starting.", error); } else { closeAsync(new AmqpShutdownSignal(false, false, "Error occurred while connection was starting. Error: " + error)).subscribe(); } }); this.endpointStates = this.handler.getEndpointStates() .takeUntilOther(shutdownSignalSink.asMono()) .map(state -> { logger.verbose("State {}", state); return AmqpEndpointStateUtil.getConnectionState(state); }) .onErrorResume(error -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to error."); return closeAsync(new AmqpShutdownSignal(false, false, error.getMessage())).then(Mono.error(error)); } else { return Mono.error(error); } }) .doOnComplete(() -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to connection close."); closeAsync(new AmqpShutdownSignal(false, false, "Connection handler closed.")).subscribe(); } }) .cache(1); this.subscriptions = Disposables.composite(this.endpointStates.subscribe()); } /** * {@inheritDoc} */ @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } /** * Gets the shutdown signal associated with this connection. When it emits, the underlying connection is closed. * * @return Shutdown signals associated with this connection. It emits a signal when the underlying connection is * closed. */ @Override public Flux<AmqpShutdownSignal> getShutdownSignals() { return shutdownSignalSink.asMono().cache().flux(); } @Override public Mono<AmqpManagementNode> getManagementNode(String entityPath) { return Mono.defer(() -> { if (isDisposed()) { return Mono.error(logger.atError() .addKeyValue(ENTITY_PATH_KEY, entityPath) .log(Exceptions.propagate(new IllegalStateException("Connection is disposed. Cannot get management instance.")))); } final AmqpManagementNode existing = managementNodes.get(entityPath); if (existing != null) { return Mono.just(existing); } final TokenManager tokenManager = new AzureTokenManagerProvider(connectionOptions.getAuthorizationType(), connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getAuthorizationScope()) .getTokenManager(getClaimsBasedSecurityNode(), entityPath); return tokenManager.authorize().thenReturn(managementNodes.compute(entityPath, (key, current) -> { if (current != null) { logger.info("A management node exists already, returning it."); tokenManager.close(); return current; } final String sessionName = entityPath + "-" + MANAGEMENT_SESSION_NAME; final String linkName = entityPath + "-" + MANAGEMENT_LINK_NAME; final String address = entityPath + "/" + MANAGEMENT_ADDRESS; logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .addKeyValue("address", address) .log("Creating management node."); final AmqpChannelProcessor<RequestResponseChannel> requestResponseChannel = createRequestResponseChannel(sessionName, linkName, address); return new ManagementChannel(requestResponseChannel, getFullyQualifiedNamespace(), entityPath, tokenManager); })); }); } /** * {@inheritDoc} */ @Override public Mono<ClaimsBasedSecurityNode> getClaimsBasedSecurityNode() { return connectionMono.then(Mono.fromCallable(() -> getOrCreateCBSNode())); } @Override public String getId() { return connectionId; } /** * {@inheritDoc} */ @Override public String getFullyQualifiedNamespace() { return handler.getHostname(); } /** * {@inheritDoc} */ @Override public int getMaxFrameSize() { return handler.getMaxFrameSize(); } /** * {@inheritDoc} */ @Override public Map<String, Object> getConnectionProperties() { return handler.getConnectionProperties(); } /** * {@inheritDoc} */ @Override public Mono<AmqpSession> createSession(String sessionName) { return connectionMono.map(connection -> { final SessionSubscription sessionSubscription = sessionMap.computeIfAbsent(sessionName, key -> { final SessionHandler sessionHandler = handlerProvider.createSessionHandler(connectionId, getFullyQualifiedNamespace(), key, connectionOptions.getRetry().getTryTimeout()); final Session session = connection.session(); BaseHandler.setHandler(session, sessionHandler); final AmqpSession amqpSession = createSession(key, session, sessionHandler); final Disposable subscription = amqpSession.getEndpointStates() .subscribe(state -> { }, error -> { if (isDisposed.get()) { return; } logger.atInfo() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Error occurred. Removing and disposing session", error); removeSession(key); }, () -> { if (isDisposed.get()) { return; } logger.atVerbose() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Complete. Removing and disposing session."); removeSession(key); }); return new SessionSubscription(amqpSession, subscription); }); return sessionSubscription; }).flatMap(sessionSubscription -> { final Mono<AmqpEndpointState> activeSession = sessionSubscription.getSession().getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(retryPolicy.getRetryOptions().getTryTimeout(), Mono.error(() -> new AmqpException(true, String.format("connectionId[%s] sessionName[%s] Timeout waiting for session to be active.", connectionId, sessionName), handler.getErrorContext()))); return activeSession.thenReturn(sessionSubscription.getSession()); }); } /** * Creates a new AMQP session with the given parameters. * * @param sessionName Name of the AMQP session. * @param session The reactor session associated with this session. * @param handler Session handler for the reactor session. * * @return A new instance of AMQP session. */ protected AmqpSession createSession(String sessionName, Session session, SessionHandler handler) { return new ReactorSession(this, session, handler, sessionName, reactorProvider, handlerProvider, getClaimsBasedSecurityNode(), tokenManagerProvider, messageSerializer, connectionOptions.getRetry()); } /** * {@inheritDoc} */ @Override public boolean removeSession(String sessionName) { if (sessionName == null) { return false; } final SessionSubscription removed = sessionMap.remove(sessionName); if (removed != null) { removed.dispose(); } return removed != null; } @Override public boolean isDisposed() { return isDisposed.get(); } /** * {@inheritDoc} */ @Override public void dispose() { final Duration timeout = operationTimeout.plus(operationTimeout); closeAsync().block(timeout); } /** * Gets the active AMQP connection for this instance. * * @return The AMQP connection. * * @throws AmqpException if the {@link Connection} was not transitioned to an active state within the given * {@link AmqpRetryOptions */ protected Mono<Connection> getReactorConnection() { return connectionMono; } /** * Creates a bidirectional link between the message broker and the client. * * @param sessionName Name of the session. * @param linkName Name of the link. * @param entityPath Address to the message broker. * * @return A new {@link RequestResponseChannel} to communicate with the message broker. */ protected AmqpChannelProcessor<RequestResponseChannel> createRequestResponseChannel(String sessionName, String linkName, String entityPath) { Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); final Flux<RequestResponseChannel> createChannel = createSession(sessionName) .cast(ReactorSession.class) .map(reactorSession -> new RequestResponseChannel(this, getId(), getFullyQualifiedNamespace(), linkName, entityPath, reactorSession.session(), connectionOptions.getRetry(), handlerProvider, reactorProvider, messageSerializer, senderSettleMode, receiverSettleMode)) .doOnNext(e -> { logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .log("Emitting new response channel."); }) .repeat(() -> !this.isDisposed()); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(ENTITY_PATH_KEY, entityPath); return createChannel .subscribeWith(new AmqpChannelProcessor<>(getFullyQualifiedNamespace(), channel -> channel.getEndpointStates(), retryPolicy, loggingContext)); } @Override public Mono<Void> closeAsync() { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already closed. Not disposing again."); return isClosedMono.asMono(); } return closeAsync(new AmqpShutdownSignal(false, true, "Disposed by client.")); } private synchronized void closeConnectionWork() { if (connection == null) { isClosedMono.emitEmpty((signalType, emitResult) -> { addSignalTypeAndResult(logger.atInfo(), signalType, emitResult) .log("Unable to complete closeMono."); return false; }); return; } connection.close(); handler.close(); final ArrayList<Mono<Void>> closingSessions = new ArrayList<>(); sessionMap.values().forEach(link -> closingSessions.add(link.isClosed())); final Mono<Void> closedExecutor = executor != null ? Mono.defer(() -> { synchronized (this) { logger.info("Closing executor."); return executor.closeAsync(); } }) : Mono.empty(); final Mono<Void> closeSessionAndExecutorMono = Mono.when(closingSessions) .timeout(operationTimeout) .onErrorResume(error -> { logger.info("Timed out waiting for all sessions to close."); return Mono.empty(); }) .then(closedExecutor) .then(Mono.fromRunnable(() -> { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result) .log("Unable to emit connection closed signal."); return false; }); subscriptions.dispose(); })); subscriptions.add(closeSessionAndExecutorMono.subscribe()); } private synchronized ClaimsBasedSecurityNode getOrCreateCBSNode() { if (cbsChannel == null) { logger.info("Setting CBS channel."); cbsChannelProcessor = createRequestResponseChannel(CBS_SESSION_NAME, CBS_LINK_NAME, CBS_ADDRESS); cbsChannel = new ClaimsBasedSecurityChannel( cbsChannelProcessor, connectionOptions.getTokenCredential(), connectionOptions.getAuthorizationType(), connectionOptions.getRetry()); } return cbsChannel; } private synchronized Connection getOrCreateConnection() throws IOException { if (connection == null) { logger.atInfo() .addKeyValue(HOSTNAME_KEY, handler.getHostname()) .addKeyValue("port", handler.getProtocolPort()) .log("Creating and starting connection."); final Reactor reactor = reactorProvider.createReactor(connectionId, handler.getMaxFrameSize()); connection = reactor.connectionToHost(handler.getHostname(), handler.getProtocolPort(), handler); final ReactorExceptionHandler reactorExceptionHandler = new ReactorExceptionHandler(); final Duration timeoutDivided = connectionOptions.getRetry().getTryTimeout().dividedBy(2); final Duration pendingTasksDuration = ClientConstants.SERVER_BUSY_WAIT_TIME.compareTo(timeoutDivided) < 0 ? ClientConstants.SERVER_BUSY_WAIT_TIME : timeoutDivided; final Scheduler scheduler = Schedulers.newSingle("reactor-executor"); executor = new ReactorExecutor(reactor, scheduler, connectionId, reactorExceptionHandler, pendingTasksDuration, connectionOptions.getFullyQualifiedNamespace()); final Mono<Void> executorCloseMono = Mono.defer(() -> { synchronized (this) { return executor.closeAsync(); } }); reactorProvider.getReactorDispatcher().getShutdownSignal() .flatMap(signal -> { reactorExceptionHandler.onConnectionShutdown(signal); return executorCloseMono; }) .onErrorResume(error -> { reactorExceptionHandler.onConnectionError(error); return executorCloseMono; }) .subscribe(); executor.start(); } return connection; } private final class ReactorExceptionHandler extends AmqpExceptionHandler { private ReactorExceptionHandler() { super(); } @Override public void onConnectionError(Throwable exception) { logger.atInfo() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionError, Starting new reactor", exception); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onReactorError: Disposing."); closeAsync(new AmqpShutdownSignal(false, false, "onReactorError: " + exception.toString())) .subscribe(); } } @Override void onConnectionShutdown(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown. Shutting down."); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown: disposing."); closeAsync(shutdownSignal).subscribe(); } } } private static final class SessionSubscription { private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AmqpSession session; private final Disposable subscription; private SessionSubscription(AmqpSession session, Disposable subscription) { this.session = session; this.subscription = subscription; } private AmqpSession getSession() { return session; } private void dispose() { if (isDisposed.getAndSet(true)) { return; } if (session instanceof ReactorSession) { ((ReactorSession) session).closeAsync("Closing session.", null, true) .subscribe(); } else { session.dispose(); } subscription.dispose(); } private Mono<Void> isClosed() { if (session instanceof ReactorSession) { return ((ReactorSession) session).isClosed(); } else { return Mono.empty(); } } } }
> I read the RCA in https://github.com/Azure/azure-sdk-for-java/issues/29212 and got the answer, the ReactorReceiver(s) are control by AmqpReceiveLinkProcessors, if old "ReactorReceiver" haven't closed, the new one won't be created. Yep, that's right. > I am afraid if ReactorReceiver also has the timeout issue when it closed (Detach frame be sent later than Close frame). if we look at ReactorSession, we can see multiple routes it gets disposed/closed. In all routes, except one, the ReactorSession will send CLOSE only after child ReactorReceiver(s) closes, meaning it gives the ReactorReceiver(s) a chance to send DETACH. The only one route where ReactorSession sends CLOSE without waiting for ReactorReaciver(s) DETACH is when the Connection sends the shutdown signal to the ReactorSession. I think this is an optimization, given doing so will be considered as implicit disposal of links under the session on the broker side (less traffic). I'm working on a pr to have no-wait time in this particular case, but it will still make sure to terminate the endpoint state (the issue we fixed in [29201](https://github.com/Azure/azure-sdk-for-java/pull/29201)); hence AmqpReceiveLinkProcessor gets to react.
Mono<Void> closeAsync(AmqpShutdownSignal shutdownSignal) { final Mono<Void> emitShutDownSignalOperation = Mono.fromRunnable(() -> { addShutdownSignal(logger.atInfo(), shutdownSignal).log("Disposing of ReactorConnection."); final Sinks.EmitResult result = shutdownSignalSink.tryEmitValue(shutdownSignal); if (result.isFailure()) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(EMIT_RESULT_KEY, result) .log("Unable to emit shutdown signal."); } }); final Mono<Void> cbsCloseOperation; if (cbsChannelProcessor != null) { cbsCloseOperation = cbsChannelProcessor.flatMap(channel -> channel.closeAsync()); } else { cbsCloseOperation = Mono.empty(); } final Mono<Void> managementNodeCloseOperations = Mono.when( Flux.fromStream(managementNodes.values().stream()).flatMap(node -> node.closeAsync())); final Mono<Void> closeReactor = Mono.fromRunnable(() -> { logger.verbose("Scheduling closeConnection work."); final ReactorDispatcher dispatcher = reactorProvider.getReactorDispatcher(); if (dispatcher != null) { try { dispatcher.invoke(() -> closeConnectionWork()); } catch (IOException e) { logger.warning("IOException while scheduling closeConnection work. Manually disposing.", e); closeConnectionWork(); } catch (RejectedExecutionException e) { logger.info("Could not schedule closeConnection work. Manually disposing."); closeConnectionWork(); } } else { closeConnectionWork(); } }); return Mono.whenDelayError( cbsCloseOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed CBS node.")), managementNodeCloseOperations.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed management nodes."))) .then(emitShutDownSignalOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Emitted connection shutdown signal. "))) .then(closeReactor.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed reactor dispatcher."))) .then(isClosedMono.asMono()); }
logger.atVerbose()
Mono<Void> closeAsync(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal).log("Disposing of ReactorConnection."); final Sinks.EmitResult result = shutdownSignalSink.tryEmitValue(shutdownSignal); if (result.isFailure()) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(EMIT_RESULT_KEY, result) .log("Unable to emit shutdown signal."); } final Mono<Void> cbsCloseOperation; if (cbsChannelProcessor != null) { cbsCloseOperation = cbsChannelProcessor.flatMap(channel -> channel.closeAsync()); } else { cbsCloseOperation = Mono.empty(); } final Mono<Void> managementNodeCloseOperations = Mono.when( Flux.fromStream(managementNodes.values().stream()).flatMap(node -> node.closeAsync())); final Mono<Void> closeReactor = Mono.fromRunnable(() -> { logger.verbose("Scheduling closeConnection work."); final ReactorDispatcher dispatcher = reactorProvider.getReactorDispatcher(); if (dispatcher != null) { try { dispatcher.invoke(() -> closeConnectionWork()); } catch (IOException e) { logger.warning("IOException while scheduling closeConnection work. Manually disposing.", e); closeConnectionWork(); } catch (RejectedExecutionException e) { logger.info("Could not schedule closeConnection work. Manually disposing."); closeConnectionWork(); } } else { closeConnectionWork(); } }); return Mono.whenDelayError( cbsCloseOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed CBS node.")), managementNodeCloseOperations.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed management nodes."))) .then(closeReactor.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed reactor dispatcher."))) .then(isClosedMono.asMono()); }
class ReactorConnection implements AmqpConnection { private static final String CBS_SESSION_NAME = "cbs-session"; private static final String CBS_ADDRESS = "$cbs"; private static final String CBS_LINK_NAME = "cbs"; private static final String MANAGEMENT_SESSION_NAME = "mgmt-session"; private static final String MANAGEMENT_ADDRESS = "$management"; private static final String MANAGEMENT_LINK_NAME = "mgmt"; private final ClientLogger logger; private final ConcurrentMap<String, SessionSubscription> sessionMap = new ConcurrentHashMap<>(); private final ConcurrentHashMap<String, AmqpManagementNode> managementNodes = new ConcurrentHashMap<>(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.One<AmqpShutdownSignal> shutdownSignalSink = Sinks.one(); private final Flux<AmqpEndpointState> endpointStates; private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final String connectionId; private final Mono<Connection> connectionMono; private final ConnectionHandler handler; private final ReactorHandlerProvider handlerProvider; private final TokenManagerProvider tokenManagerProvider; private final MessageSerializer messageSerializer; private final ConnectionOptions connectionOptions; private final ReactorProvider reactorProvider; private final AmqpRetryPolicy retryPolicy; private final SenderSettleMode senderSettleMode; private final ReceiverSettleMode receiverSettleMode; private final Duration operationTimeout; private final Composite subscriptions; private ReactorExecutor executor; private volatile ClaimsBasedSecurityChannel cbsChannel; private volatile AmqpChannelProcessor<RequestResponseChannel> cbsChannelProcessor; private volatile Connection connection; /** * Creates a new AMQP connection that uses proton-j. * * @param connectionId Identifier for the connection. * @param connectionOptions A set of options used to create the AMQP connection. * @param reactorProvider Provides proton-j Reactor instances. * @param handlerProvider Provides {@link BaseHandler} to listen to proton-j reactor events. * @param tokenManagerProvider Provides the appropriate token manager to authorize with CBS node. * @param messageSerializer Serializer to translate objects to and from proton-j {@link Message messages}. * @param senderSettleMode to set as {@link SenderSettleMode} on sender. * @param receiverSettleMode to set as {@link ReceiverSettleMode} on receiver. */ public ReactorConnection(String connectionId, ConnectionOptions connectionOptions, ReactorProvider reactorProvider, ReactorHandlerProvider handlerProvider, TokenManagerProvider tokenManagerProvider, MessageSerializer messageSerializer, SenderSettleMode senderSettleMode, ReceiverSettleMode receiverSettleMode) { this.connectionOptions = connectionOptions; this.reactorProvider = reactorProvider; this.connectionId = connectionId; this.logger = new ClientLogger(ReactorConnection.class, createContextWithConnectionId(connectionId)); this.handlerProvider = handlerProvider; this.tokenManagerProvider = Objects.requireNonNull(tokenManagerProvider, "'tokenManagerProvider' cannot be null."); this.messageSerializer = messageSerializer; this.handler = handlerProvider.createConnectionHandler(connectionId, connectionOptions); this.retryPolicy = RetryUtil.getRetryPolicy(connectionOptions.getRetry()); this.operationTimeout = connectionOptions.getRetry().getTryTimeout(); this.senderSettleMode = senderSettleMode; this.receiverSettleMode = receiverSettleMode; this.connectionMono = Mono.fromCallable(this::getOrCreateConnection) .flatMap(reactorConnection -> { final Mono<AmqpEndpointState> activeEndpoint = getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(operationTimeout, Mono.error(() -> new AmqpException(true, String.format( "Connection '%s' not opened within AmqpRetryOptions.tryTimeout(): %s", connectionId, operationTimeout), handler.getErrorContext()))); return activeEndpoint.thenReturn(reactorConnection); }) .doOnError(error -> { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already disposed: Error occurred while connection was starting.", error); } else { closeAsync(new AmqpShutdownSignal(false, false, "Error occurred while connection was starting. Error: " + error)).subscribe(); } }); this.endpointStates = this.handler.getEndpointStates() .takeUntilOther(shutdownSignalSink.asMono()) .map(state -> { logger.verbose("State {}", state); return AmqpEndpointStateUtil.getConnectionState(state); }) .onErrorResume(error -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to error."); return closeAsync(new AmqpShutdownSignal(false, false, error.getMessage())).then(Mono.error(error)); } else { return Mono.error(error); } }) .doOnComplete(() -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to connection close."); closeAsync(new AmqpShutdownSignal(false, false, "Connection handler closed.")).subscribe(); } }) .cache(1); this.subscriptions = Disposables.composite(this.endpointStates.subscribe()); } /** * {@inheritDoc} */ @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } /** * Gets the shutdown signal associated with this connection. When it emits, the underlying connection is closed. * * @return Shutdown signals associated with this connection. It emits a signal when the underlying connection is * closed. */ @Override public Flux<AmqpShutdownSignal> getShutdownSignals() { return shutdownSignalSink.asMono().cache().flux(); } @Override public Mono<AmqpManagementNode> getManagementNode(String entityPath) { return Mono.defer(() -> { if (isDisposed()) { return Mono.error(logger.atError() .addKeyValue(ENTITY_PATH_KEY, entityPath) .log(Exceptions.propagate(new IllegalStateException("Connection is disposed. Cannot get management instance.")))); } final AmqpManagementNode existing = managementNodes.get(entityPath); if (existing != null) { return Mono.just(existing); } final TokenManager tokenManager = new AzureTokenManagerProvider(connectionOptions.getAuthorizationType(), connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getAuthorizationScope()) .getTokenManager(getClaimsBasedSecurityNode(), entityPath); return tokenManager.authorize().thenReturn(managementNodes.compute(entityPath, (key, current) -> { if (current != null) { logger.info("A management node exists already, returning it."); tokenManager.close(); return current; } final String sessionName = entityPath + "-" + MANAGEMENT_SESSION_NAME; final String linkName = entityPath + "-" + MANAGEMENT_LINK_NAME; final String address = entityPath + "/" + MANAGEMENT_ADDRESS; logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .addKeyValue("address", address) .log("Creating management node."); final AmqpChannelProcessor<RequestResponseChannel> requestResponseChannel = createRequestResponseChannel(sessionName, linkName, address); return new ManagementChannel(requestResponseChannel, getFullyQualifiedNamespace(), entityPath, tokenManager); })); }); } /** * {@inheritDoc} */ @Override public Mono<ClaimsBasedSecurityNode> getClaimsBasedSecurityNode() { return connectionMono.then(Mono.fromCallable(() -> getOrCreateCBSNode())); } @Override public String getId() { return connectionId; } /** * {@inheritDoc} */ @Override public String getFullyQualifiedNamespace() { return handler.getHostname(); } /** * {@inheritDoc} */ @Override public int getMaxFrameSize() { return handler.getMaxFrameSize(); } /** * {@inheritDoc} */ @Override public Map<String, Object> getConnectionProperties() { return handler.getConnectionProperties(); } /** * {@inheritDoc} */ @Override public Mono<AmqpSession> createSession(String sessionName) { return connectionMono.map(connection -> { final SessionSubscription sessionSubscription = sessionMap.computeIfAbsent(sessionName, key -> { final SessionHandler sessionHandler = handlerProvider.createSessionHandler(connectionId, getFullyQualifiedNamespace(), key, connectionOptions.getRetry().getTryTimeout()); final Session session = connection.session(); BaseHandler.setHandler(session, sessionHandler); final AmqpSession amqpSession = createSession(key, session, sessionHandler); final Disposable subscription = amqpSession.getEndpointStates() .subscribe(state -> { }, error -> { if (isDisposed.get()) { return; } logger.atInfo() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Error occurred. Removing and disposing session", error); removeSession(key); }, () -> { if (isDisposed.get()) { return; } logger.atVerbose() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Complete. Removing and disposing session."); removeSession(key); }); return new SessionSubscription(amqpSession, subscription); }); return sessionSubscription; }).flatMap(sessionSubscription -> { final Mono<AmqpEndpointState> activeSession = sessionSubscription.getSession().getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(retryPolicy.getRetryOptions().getTryTimeout(), Mono.error(() -> new AmqpException(true, String.format("connectionId[%s] sessionName[%s] Timeout waiting for session to be active.", connectionId, sessionName), handler.getErrorContext()))); return activeSession.thenReturn(sessionSubscription.getSession()); }); } /** * Creates a new AMQP session with the given parameters. * * @param sessionName Name of the AMQP session. * @param session The reactor session associated with this session. * @param handler Session handler for the reactor session. * * @return A new instance of AMQP session. */ protected AmqpSession createSession(String sessionName, Session session, SessionHandler handler) { return new ReactorSession(this, session, handler, sessionName, reactorProvider, handlerProvider, getClaimsBasedSecurityNode(), tokenManagerProvider, messageSerializer, connectionOptions.getRetry()); } /** * {@inheritDoc} */ @Override public boolean removeSession(String sessionName) { if (sessionName == null) { return false; } final SessionSubscription removed = sessionMap.remove(sessionName); if (removed != null) { removed.dispose(); } return removed != null; } @Override public boolean isDisposed() { return isDisposed.get(); } /** * {@inheritDoc} */ @Override public void dispose() { final Duration timeout = operationTimeout.plus(operationTimeout); closeAsync().block(timeout); } /** * Gets the active AMQP connection for this instance. * * @return The AMQP connection. * * @throws AmqpException if the {@link Connection} was not transitioned to an active state within the given * {@link AmqpRetryOptions */ protected Mono<Connection> getReactorConnection() { return connectionMono; } /** * Creates a bidirectional link between the message broker and the client. * * @param sessionName Name of the session. * @param linkName Name of the link. * @param entityPath Address to the message broker. * * @return A new {@link RequestResponseChannel} to communicate with the message broker. */ protected AmqpChannelProcessor<RequestResponseChannel> createRequestResponseChannel(String sessionName, String linkName, String entityPath) { Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); final Flux<RequestResponseChannel> createChannel = createSession(sessionName) .cast(ReactorSession.class) .map(reactorSession -> new RequestResponseChannel(this, getId(), getFullyQualifiedNamespace(), linkName, entityPath, reactorSession.session(), connectionOptions.getRetry(), handlerProvider, reactorProvider, messageSerializer, senderSettleMode, receiverSettleMode)) .doOnNext(e -> { logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .log("Emitting new response channel."); }) .repeat() .takeUntilOther(shutdownSignalSink.asMono()); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(ENTITY_PATH_KEY, entityPath); return createChannel .subscribeWith(new AmqpChannelProcessor<>(getFullyQualifiedNamespace(), channel -> channel.getEndpointStates(), retryPolicy, loggingContext)); } @Override public Mono<Void> closeAsync() { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already closed. Not disposing again."); return isClosedMono.asMono(); } return closeAsync(new AmqpShutdownSignal(false, true, "Disposed by client.")); } private synchronized void closeConnectionWork() { if (connection == null) { isClosedMono.emitEmpty((signalType, emitResult) -> { addSignalTypeAndResult(logger.atInfo(), signalType, emitResult) .log("Unable to complete closeMono."); return false; }); return; } connection.close(); handler.close(); final ArrayList<Mono<Void>> closingSessions = new ArrayList<>(); sessionMap.values().forEach(link -> closingSessions.add(link.isClosed())); final Mono<Void> closedExecutor = executor != null ? Mono.defer(() -> { synchronized (this) { logger.info("Closing executor."); return executor.closeAsync(); } }) : Mono.empty(); final Mono<Void> closeSessionAndExecutorMono = Mono.when(closingSessions) .timeout(operationTimeout) .onErrorResume(error -> { logger.info("Timed out waiting for all sessions to close."); return Mono.empty(); }) .then(closedExecutor) .then(Mono.fromRunnable(() -> { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result) .log("Unable to emit connection closed signal."); return false; }); subscriptions.dispose(); })); subscriptions.add(closeSessionAndExecutorMono.subscribe()); } private synchronized ClaimsBasedSecurityNode getOrCreateCBSNode() { if (cbsChannel == null) { logger.info("Setting CBS channel."); cbsChannelProcessor = createRequestResponseChannel(CBS_SESSION_NAME, CBS_LINK_NAME, CBS_ADDRESS); cbsChannel = new ClaimsBasedSecurityChannel( cbsChannelProcessor, connectionOptions.getTokenCredential(), connectionOptions.getAuthorizationType(), connectionOptions.getRetry()); } return cbsChannel; } private synchronized Connection getOrCreateConnection() throws IOException { if (connection == null) { logger.atInfo() .addKeyValue(HOSTNAME_KEY, handler.getHostname()) .addKeyValue("port", handler.getProtocolPort()) .log("Creating and starting connection."); final Reactor reactor = reactorProvider.createReactor(connectionId, handler.getMaxFrameSize()); connection = reactor.connectionToHost(handler.getHostname(), handler.getProtocolPort(), handler); final ReactorExceptionHandler reactorExceptionHandler = new ReactorExceptionHandler(); final Duration timeoutDivided = connectionOptions.getRetry().getTryTimeout().dividedBy(2); final Duration pendingTasksDuration = ClientConstants.SERVER_BUSY_WAIT_TIME.compareTo(timeoutDivided) < 0 ? ClientConstants.SERVER_BUSY_WAIT_TIME : timeoutDivided; final Scheduler scheduler = Schedulers.newSingle("reactor-executor"); executor = new ReactorExecutor(reactor, scheduler, connectionId, reactorExceptionHandler, pendingTasksDuration, connectionOptions.getFullyQualifiedNamespace()); final Mono<Void> executorCloseMono = Mono.defer(() -> { synchronized (this) { return executor.closeAsync(); } }); reactorProvider.getReactorDispatcher().getShutdownSignal() .flatMap(signal -> { reactorExceptionHandler.onConnectionShutdown(signal); return executorCloseMono; }) .onErrorResume(error -> { reactorExceptionHandler.onConnectionError(error); return executorCloseMono; }) .subscribe(); executor.start(); } return connection; } private final class ReactorExceptionHandler extends AmqpExceptionHandler { private ReactorExceptionHandler() { super(); } @Override public void onConnectionError(Throwable exception) { logger.atInfo() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionError, Starting new reactor", exception); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onReactorError: Disposing."); closeAsync(new AmqpShutdownSignal(false, false, "onReactorError: " + exception.toString())) .subscribe(); } } @Override void onConnectionShutdown(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown. Shutting down."); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown: disposing."); closeAsync(shutdownSignal).subscribe(); } } } private static final class SessionSubscription { private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AmqpSession session; private final Disposable subscription; private SessionSubscription(AmqpSession session, Disposable subscription) { this.session = session; this.subscription = subscription; } private AmqpSession getSession() { return session; } private void dispose() { if (isDisposed.getAndSet(true)) { return; } if (session instanceof ReactorSession) { ((ReactorSession) session).closeAsync("Closing session.", null, true) .subscribe(); } else { session.dispose(); } subscription.dispose(); } private Mono<Void> isClosed() { if (session instanceof ReactorSession) { return ((ReactorSession) session).isClosed(); } else { return Mono.empty(); } } } }
class ReactorConnection implements AmqpConnection { private static final String CBS_SESSION_NAME = "cbs-session"; private static final String CBS_ADDRESS = "$cbs"; private static final String CBS_LINK_NAME = "cbs"; private static final String MANAGEMENT_SESSION_NAME = "mgmt-session"; private static final String MANAGEMENT_ADDRESS = "$management"; private static final String MANAGEMENT_LINK_NAME = "mgmt"; private final ClientLogger logger; private final ConcurrentMap<String, SessionSubscription> sessionMap = new ConcurrentHashMap<>(); private final ConcurrentHashMap<String, AmqpManagementNode> managementNodes = new ConcurrentHashMap<>(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.One<AmqpShutdownSignal> shutdownSignalSink = Sinks.one(); private final Flux<AmqpEndpointState> endpointStates; private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final String connectionId; private final Mono<Connection> connectionMono; private final ConnectionHandler handler; private final ReactorHandlerProvider handlerProvider; private final TokenManagerProvider tokenManagerProvider; private final MessageSerializer messageSerializer; private final ConnectionOptions connectionOptions; private final ReactorProvider reactorProvider; private final AmqpRetryPolicy retryPolicy; private final SenderSettleMode senderSettleMode; private final ReceiverSettleMode receiverSettleMode; private final Duration operationTimeout; private final Composite subscriptions; private ReactorExecutor executor; private volatile ClaimsBasedSecurityChannel cbsChannel; private volatile AmqpChannelProcessor<RequestResponseChannel> cbsChannelProcessor; private volatile Connection connection; /** * Creates a new AMQP connection that uses proton-j. * * @param connectionId Identifier for the connection. * @param connectionOptions A set of options used to create the AMQP connection. * @param reactorProvider Provides proton-j Reactor instances. * @param handlerProvider Provides {@link BaseHandler} to listen to proton-j reactor events. * @param tokenManagerProvider Provides the appropriate token manager to authorize with CBS node. * @param messageSerializer Serializer to translate objects to and from proton-j {@link Message messages}. * @param senderSettleMode to set as {@link SenderSettleMode} on sender. * @param receiverSettleMode to set as {@link ReceiverSettleMode} on receiver. */ public ReactorConnection(String connectionId, ConnectionOptions connectionOptions, ReactorProvider reactorProvider, ReactorHandlerProvider handlerProvider, TokenManagerProvider tokenManagerProvider, MessageSerializer messageSerializer, SenderSettleMode senderSettleMode, ReceiverSettleMode receiverSettleMode) { this.connectionOptions = connectionOptions; this.reactorProvider = reactorProvider; this.connectionId = connectionId; this.logger = new ClientLogger(ReactorConnection.class, createContextWithConnectionId(connectionId)); this.handlerProvider = handlerProvider; this.tokenManagerProvider = Objects.requireNonNull(tokenManagerProvider, "'tokenManagerProvider' cannot be null."); this.messageSerializer = messageSerializer; this.handler = handlerProvider.createConnectionHandler(connectionId, connectionOptions); this.retryPolicy = RetryUtil.getRetryPolicy(connectionOptions.getRetry()); this.operationTimeout = connectionOptions.getRetry().getTryTimeout(); this.senderSettleMode = senderSettleMode; this.receiverSettleMode = receiverSettleMode; this.connectionMono = Mono.fromCallable(this::getOrCreateConnection) .flatMap(reactorConnection -> { final Mono<AmqpEndpointState> activeEndpoint = getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(operationTimeout, Mono.error(() -> new AmqpException(true, String.format( "Connection '%s' not opened within AmqpRetryOptions.tryTimeout(): %s", connectionId, operationTimeout), handler.getErrorContext()))); return activeEndpoint.thenReturn(reactorConnection); }) .doOnError(error -> { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already disposed: Error occurred while connection was starting.", error); } else { closeAsync(new AmqpShutdownSignal(false, false, "Error occurred while connection was starting. Error: " + error)).subscribe(); } }); this.endpointStates = this.handler.getEndpointStates() .takeUntilOther(shutdownSignalSink.asMono()) .map(state -> { logger.verbose("State {}", state); return AmqpEndpointStateUtil.getConnectionState(state); }) .onErrorResume(error -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to error."); return closeAsync(new AmqpShutdownSignal(false, false, error.getMessage())).then(Mono.error(error)); } else { return Mono.error(error); } }) .doOnComplete(() -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to connection close."); closeAsync(new AmqpShutdownSignal(false, false, "Connection handler closed.")).subscribe(); } }) .cache(1); this.subscriptions = Disposables.composite(this.endpointStates.subscribe()); } /** * {@inheritDoc} */ @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } /** * Gets the shutdown signal associated with this connection. When it emits, the underlying connection is closed. * * @return Shutdown signals associated with this connection. It emits a signal when the underlying connection is * closed. */ @Override public Flux<AmqpShutdownSignal> getShutdownSignals() { return shutdownSignalSink.asMono().cache().flux(); } @Override public Mono<AmqpManagementNode> getManagementNode(String entityPath) { return Mono.defer(() -> { if (isDisposed()) { return Mono.error(logger.atError() .addKeyValue(ENTITY_PATH_KEY, entityPath) .log(Exceptions.propagate(new IllegalStateException("Connection is disposed. Cannot get management instance.")))); } final AmqpManagementNode existing = managementNodes.get(entityPath); if (existing != null) { return Mono.just(existing); } final TokenManager tokenManager = new AzureTokenManagerProvider(connectionOptions.getAuthorizationType(), connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getAuthorizationScope()) .getTokenManager(getClaimsBasedSecurityNode(), entityPath); return tokenManager.authorize().thenReturn(managementNodes.compute(entityPath, (key, current) -> { if (current != null) { logger.info("A management node exists already, returning it."); tokenManager.close(); return current; } final String sessionName = entityPath + "-" + MANAGEMENT_SESSION_NAME; final String linkName = entityPath + "-" + MANAGEMENT_LINK_NAME; final String address = entityPath + "/" + MANAGEMENT_ADDRESS; logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .addKeyValue("address", address) .log("Creating management node."); final AmqpChannelProcessor<RequestResponseChannel> requestResponseChannel = createRequestResponseChannel(sessionName, linkName, address); return new ManagementChannel(requestResponseChannel, getFullyQualifiedNamespace(), entityPath, tokenManager); })); }); } /** * {@inheritDoc} */ @Override public Mono<ClaimsBasedSecurityNode> getClaimsBasedSecurityNode() { return connectionMono.then(Mono.fromCallable(() -> getOrCreateCBSNode())); } @Override public String getId() { return connectionId; } /** * {@inheritDoc} */ @Override public String getFullyQualifiedNamespace() { return handler.getHostname(); } /** * {@inheritDoc} */ @Override public int getMaxFrameSize() { return handler.getMaxFrameSize(); } /** * {@inheritDoc} */ @Override public Map<String, Object> getConnectionProperties() { return handler.getConnectionProperties(); } /** * {@inheritDoc} */ @Override public Mono<AmqpSession> createSession(String sessionName) { return connectionMono.map(connection -> { final SessionSubscription sessionSubscription = sessionMap.computeIfAbsent(sessionName, key -> { final SessionHandler sessionHandler = handlerProvider.createSessionHandler(connectionId, getFullyQualifiedNamespace(), key, connectionOptions.getRetry().getTryTimeout()); final Session session = connection.session(); BaseHandler.setHandler(session, sessionHandler); final AmqpSession amqpSession = createSession(key, session, sessionHandler); final Disposable subscription = amqpSession.getEndpointStates() .subscribe(state -> { }, error -> { if (isDisposed.get()) { return; } logger.atInfo() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Error occurred. Removing and disposing session", error); removeSession(key); }, () -> { if (isDisposed.get()) { return; } logger.atVerbose() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Complete. Removing and disposing session."); removeSession(key); }); return new SessionSubscription(amqpSession, subscription); }); return sessionSubscription; }).flatMap(sessionSubscription -> { final Mono<AmqpEndpointState> activeSession = sessionSubscription.getSession().getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(retryPolicy.getRetryOptions().getTryTimeout(), Mono.error(() -> new AmqpException(true, String.format("connectionId[%s] sessionName[%s] Timeout waiting for session to be active.", connectionId, sessionName), handler.getErrorContext()))); return activeSession.thenReturn(sessionSubscription.getSession()); }); } /** * Creates a new AMQP session with the given parameters. * * @param sessionName Name of the AMQP session. * @param session The reactor session associated with this session. * @param handler Session handler for the reactor session. * * @return A new instance of AMQP session. */ protected AmqpSession createSession(String sessionName, Session session, SessionHandler handler) { return new ReactorSession(this, session, handler, sessionName, reactorProvider, handlerProvider, getClaimsBasedSecurityNode(), tokenManagerProvider, messageSerializer, connectionOptions.getRetry()); } /** * {@inheritDoc} */ @Override public boolean removeSession(String sessionName) { if (sessionName == null) { return false; } final SessionSubscription removed = sessionMap.remove(sessionName); if (removed != null) { removed.dispose(); } return removed != null; } @Override public boolean isDisposed() { return isDisposed.get(); } /** * {@inheritDoc} */ @Override public void dispose() { final Duration timeout = operationTimeout.plus(operationTimeout); closeAsync().block(timeout); } /** * Gets the active AMQP connection for this instance. * * @return The AMQP connection. * * @throws AmqpException if the {@link Connection} was not transitioned to an active state within the given * {@link AmqpRetryOptions */ protected Mono<Connection> getReactorConnection() { return connectionMono; } /** * Creates a bidirectional link between the message broker and the client. * * @param sessionName Name of the session. * @param linkName Name of the link. * @param entityPath Address to the message broker. * * @return A new {@link RequestResponseChannel} to communicate with the message broker. */ protected AmqpChannelProcessor<RequestResponseChannel> createRequestResponseChannel(String sessionName, String linkName, String entityPath) { Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); final Flux<RequestResponseChannel> createChannel = createSession(sessionName) .cast(ReactorSession.class) .map(reactorSession -> new RequestResponseChannel(this, getId(), getFullyQualifiedNamespace(), linkName, entityPath, reactorSession.session(), connectionOptions.getRetry(), handlerProvider, reactorProvider, messageSerializer, senderSettleMode, receiverSettleMode)) .doOnNext(e -> { logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .log("Emitting new response channel."); }) .repeat(() -> !this.isDisposed()); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(ENTITY_PATH_KEY, entityPath); return createChannel .subscribeWith(new AmqpChannelProcessor<>(getFullyQualifiedNamespace(), channel -> channel.getEndpointStates(), retryPolicy, loggingContext)); } @Override public Mono<Void> closeAsync() { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already closed. Not disposing again."); return isClosedMono.asMono(); } return closeAsync(new AmqpShutdownSignal(false, true, "Disposed by client.")); } private synchronized void closeConnectionWork() { if (connection == null) { isClosedMono.emitEmpty((signalType, emitResult) -> { addSignalTypeAndResult(logger.atInfo(), signalType, emitResult) .log("Unable to complete closeMono."); return false; }); return; } connection.close(); handler.close(); final ArrayList<Mono<Void>> closingSessions = new ArrayList<>(); sessionMap.values().forEach(link -> closingSessions.add(link.isClosed())); final Mono<Void> closedExecutor = executor != null ? Mono.defer(() -> { synchronized (this) { logger.info("Closing executor."); return executor.closeAsync(); } }) : Mono.empty(); final Mono<Void> closeSessionAndExecutorMono = Mono.when(closingSessions) .timeout(operationTimeout) .onErrorResume(error -> { logger.info("Timed out waiting for all sessions to close."); return Mono.empty(); }) .then(closedExecutor) .then(Mono.fromRunnable(() -> { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result) .log("Unable to emit connection closed signal."); return false; }); subscriptions.dispose(); })); subscriptions.add(closeSessionAndExecutorMono.subscribe()); } private synchronized ClaimsBasedSecurityNode getOrCreateCBSNode() { if (cbsChannel == null) { logger.info("Setting CBS channel."); cbsChannelProcessor = createRequestResponseChannel(CBS_SESSION_NAME, CBS_LINK_NAME, CBS_ADDRESS); cbsChannel = new ClaimsBasedSecurityChannel( cbsChannelProcessor, connectionOptions.getTokenCredential(), connectionOptions.getAuthorizationType(), connectionOptions.getRetry()); } return cbsChannel; } private synchronized Connection getOrCreateConnection() throws IOException { if (connection == null) { logger.atInfo() .addKeyValue(HOSTNAME_KEY, handler.getHostname()) .addKeyValue("port", handler.getProtocolPort()) .log("Creating and starting connection."); final Reactor reactor = reactorProvider.createReactor(connectionId, handler.getMaxFrameSize()); connection = reactor.connectionToHost(handler.getHostname(), handler.getProtocolPort(), handler); final ReactorExceptionHandler reactorExceptionHandler = new ReactorExceptionHandler(); final Duration timeoutDivided = connectionOptions.getRetry().getTryTimeout().dividedBy(2); final Duration pendingTasksDuration = ClientConstants.SERVER_BUSY_WAIT_TIME.compareTo(timeoutDivided) < 0 ? ClientConstants.SERVER_BUSY_WAIT_TIME : timeoutDivided; final Scheduler scheduler = Schedulers.newSingle("reactor-executor"); executor = new ReactorExecutor(reactor, scheduler, connectionId, reactorExceptionHandler, pendingTasksDuration, connectionOptions.getFullyQualifiedNamespace()); final Mono<Void> executorCloseMono = Mono.defer(() -> { synchronized (this) { return executor.closeAsync(); } }); reactorProvider.getReactorDispatcher().getShutdownSignal() .flatMap(signal -> { reactorExceptionHandler.onConnectionShutdown(signal); return executorCloseMono; }) .onErrorResume(error -> { reactorExceptionHandler.onConnectionError(error); return executorCloseMono; }) .subscribe(); executor.start(); } return connection; } private final class ReactorExceptionHandler extends AmqpExceptionHandler { private ReactorExceptionHandler() { super(); } @Override public void onConnectionError(Throwable exception) { logger.atInfo() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionError, Starting new reactor", exception); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onReactorError: Disposing."); closeAsync(new AmqpShutdownSignal(false, false, "onReactorError: " + exception.toString())) .subscribe(); } } @Override void onConnectionShutdown(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown. Shutting down."); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown: disposing."); closeAsync(shutdownSignal).subscribe(); } } } private static final class SessionSubscription { private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AmqpSession session; private final Disposable subscription; private SessionSubscription(AmqpSession session, Disposable subscription) { this.session = session; this.subscription = subscription; } private AmqpSession getSession() { return session; } private void dispose() { if (isDisposed.getAndSet(true)) { return; } if (session instanceof ReactorSession) { ((ReactorSession) session).closeAsync("Closing session.", null, true) .subscribe(); } else { session.dispose(); } subscription.dispose(); } private Mono<Void> isClosed() { if (session instanceof ReactorSession) { return ((ReactorSession) session).isClosed(); } else { return Mono.empty(); } } } }
That makes sense. I will rethink my changes. I could get some thoughts from your changes for ReactorReceiver. Maybe treat differently for explicit close and implicit close.
Mono<Void> closeAsync(AmqpShutdownSignal shutdownSignal) { final Mono<Void> emitShutDownSignalOperation = Mono.fromRunnable(() -> { addShutdownSignal(logger.atInfo(), shutdownSignal).log("Disposing of ReactorConnection."); final Sinks.EmitResult result = shutdownSignalSink.tryEmitValue(shutdownSignal); if (result.isFailure()) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(EMIT_RESULT_KEY, result) .log("Unable to emit shutdown signal."); } }); final Mono<Void> cbsCloseOperation; if (cbsChannelProcessor != null) { cbsCloseOperation = cbsChannelProcessor.flatMap(channel -> channel.closeAsync()); } else { cbsCloseOperation = Mono.empty(); } final Mono<Void> managementNodeCloseOperations = Mono.when( Flux.fromStream(managementNodes.values().stream()).flatMap(node -> node.closeAsync())); final Mono<Void> closeReactor = Mono.fromRunnable(() -> { logger.verbose("Scheduling closeConnection work."); final ReactorDispatcher dispatcher = reactorProvider.getReactorDispatcher(); if (dispatcher != null) { try { dispatcher.invoke(() -> closeConnectionWork()); } catch (IOException e) { logger.warning("IOException while scheduling closeConnection work. Manually disposing.", e); closeConnectionWork(); } catch (RejectedExecutionException e) { logger.info("Could not schedule closeConnection work. Manually disposing."); closeConnectionWork(); } } else { closeConnectionWork(); } }); return Mono.whenDelayError( cbsCloseOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed CBS node.")), managementNodeCloseOperations.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed management nodes."))) .then(emitShutDownSignalOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Emitted connection shutdown signal. "))) .then(closeReactor.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed reactor dispatcher."))) .then(isClosedMono.asMono()); }
logger.atVerbose()
Mono<Void> closeAsync(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal).log("Disposing of ReactorConnection."); final Sinks.EmitResult result = shutdownSignalSink.tryEmitValue(shutdownSignal); if (result.isFailure()) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(EMIT_RESULT_KEY, result) .log("Unable to emit shutdown signal."); } final Mono<Void> cbsCloseOperation; if (cbsChannelProcessor != null) { cbsCloseOperation = cbsChannelProcessor.flatMap(channel -> channel.closeAsync()); } else { cbsCloseOperation = Mono.empty(); } final Mono<Void> managementNodeCloseOperations = Mono.when( Flux.fromStream(managementNodes.values().stream()).flatMap(node -> node.closeAsync())); final Mono<Void> closeReactor = Mono.fromRunnable(() -> { logger.verbose("Scheduling closeConnection work."); final ReactorDispatcher dispatcher = reactorProvider.getReactorDispatcher(); if (dispatcher != null) { try { dispatcher.invoke(() -> closeConnectionWork()); } catch (IOException e) { logger.warning("IOException while scheduling closeConnection work. Manually disposing.", e); closeConnectionWork(); } catch (RejectedExecutionException e) { logger.info("Could not schedule closeConnection work. Manually disposing."); closeConnectionWork(); } } else { closeConnectionWork(); } }); return Mono.whenDelayError( cbsCloseOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed CBS node.")), managementNodeCloseOperations.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed management nodes."))) .then(closeReactor.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed reactor dispatcher."))) .then(isClosedMono.asMono()); }
class ReactorConnection implements AmqpConnection { private static final String CBS_SESSION_NAME = "cbs-session"; private static final String CBS_ADDRESS = "$cbs"; private static final String CBS_LINK_NAME = "cbs"; private static final String MANAGEMENT_SESSION_NAME = "mgmt-session"; private static final String MANAGEMENT_ADDRESS = "$management"; private static final String MANAGEMENT_LINK_NAME = "mgmt"; private final ClientLogger logger; private final ConcurrentMap<String, SessionSubscription> sessionMap = new ConcurrentHashMap<>(); private final ConcurrentHashMap<String, AmqpManagementNode> managementNodes = new ConcurrentHashMap<>(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.One<AmqpShutdownSignal> shutdownSignalSink = Sinks.one(); private final Flux<AmqpEndpointState> endpointStates; private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final String connectionId; private final Mono<Connection> connectionMono; private final ConnectionHandler handler; private final ReactorHandlerProvider handlerProvider; private final TokenManagerProvider tokenManagerProvider; private final MessageSerializer messageSerializer; private final ConnectionOptions connectionOptions; private final ReactorProvider reactorProvider; private final AmqpRetryPolicy retryPolicy; private final SenderSettleMode senderSettleMode; private final ReceiverSettleMode receiverSettleMode; private final Duration operationTimeout; private final Composite subscriptions; private ReactorExecutor executor; private volatile ClaimsBasedSecurityChannel cbsChannel; private volatile AmqpChannelProcessor<RequestResponseChannel> cbsChannelProcessor; private volatile Connection connection; /** * Creates a new AMQP connection that uses proton-j. * * @param connectionId Identifier for the connection. * @param connectionOptions A set of options used to create the AMQP connection. * @param reactorProvider Provides proton-j Reactor instances. * @param handlerProvider Provides {@link BaseHandler} to listen to proton-j reactor events. * @param tokenManagerProvider Provides the appropriate token manager to authorize with CBS node. * @param messageSerializer Serializer to translate objects to and from proton-j {@link Message messages}. * @param senderSettleMode to set as {@link SenderSettleMode} on sender. * @param receiverSettleMode to set as {@link ReceiverSettleMode} on receiver. */ public ReactorConnection(String connectionId, ConnectionOptions connectionOptions, ReactorProvider reactorProvider, ReactorHandlerProvider handlerProvider, TokenManagerProvider tokenManagerProvider, MessageSerializer messageSerializer, SenderSettleMode senderSettleMode, ReceiverSettleMode receiverSettleMode) { this.connectionOptions = connectionOptions; this.reactorProvider = reactorProvider; this.connectionId = connectionId; this.logger = new ClientLogger(ReactorConnection.class, createContextWithConnectionId(connectionId)); this.handlerProvider = handlerProvider; this.tokenManagerProvider = Objects.requireNonNull(tokenManagerProvider, "'tokenManagerProvider' cannot be null."); this.messageSerializer = messageSerializer; this.handler = handlerProvider.createConnectionHandler(connectionId, connectionOptions); this.retryPolicy = RetryUtil.getRetryPolicy(connectionOptions.getRetry()); this.operationTimeout = connectionOptions.getRetry().getTryTimeout(); this.senderSettleMode = senderSettleMode; this.receiverSettleMode = receiverSettleMode; this.connectionMono = Mono.fromCallable(this::getOrCreateConnection) .flatMap(reactorConnection -> { final Mono<AmqpEndpointState> activeEndpoint = getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(operationTimeout, Mono.error(() -> new AmqpException(true, String.format( "Connection '%s' not opened within AmqpRetryOptions.tryTimeout(): %s", connectionId, operationTimeout), handler.getErrorContext()))); return activeEndpoint.thenReturn(reactorConnection); }) .doOnError(error -> { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already disposed: Error occurred while connection was starting.", error); } else { closeAsync(new AmqpShutdownSignal(false, false, "Error occurred while connection was starting. Error: " + error)).subscribe(); } }); this.endpointStates = this.handler.getEndpointStates() .takeUntilOther(shutdownSignalSink.asMono()) .map(state -> { logger.verbose("State {}", state); return AmqpEndpointStateUtil.getConnectionState(state); }) .onErrorResume(error -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to error."); return closeAsync(new AmqpShutdownSignal(false, false, error.getMessage())).then(Mono.error(error)); } else { return Mono.error(error); } }) .doOnComplete(() -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to connection close."); closeAsync(new AmqpShutdownSignal(false, false, "Connection handler closed.")).subscribe(); } }) .cache(1); this.subscriptions = Disposables.composite(this.endpointStates.subscribe()); } /** * {@inheritDoc} */ @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } /** * Gets the shutdown signal associated with this connection. When it emits, the underlying connection is closed. * * @return Shutdown signals associated with this connection. It emits a signal when the underlying connection is * closed. */ @Override public Flux<AmqpShutdownSignal> getShutdownSignals() { return shutdownSignalSink.asMono().cache().flux(); } @Override public Mono<AmqpManagementNode> getManagementNode(String entityPath) { return Mono.defer(() -> { if (isDisposed()) { return Mono.error(logger.atError() .addKeyValue(ENTITY_PATH_KEY, entityPath) .log(Exceptions.propagate(new IllegalStateException("Connection is disposed. Cannot get management instance.")))); } final AmqpManagementNode existing = managementNodes.get(entityPath); if (existing != null) { return Mono.just(existing); } final TokenManager tokenManager = new AzureTokenManagerProvider(connectionOptions.getAuthorizationType(), connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getAuthorizationScope()) .getTokenManager(getClaimsBasedSecurityNode(), entityPath); return tokenManager.authorize().thenReturn(managementNodes.compute(entityPath, (key, current) -> { if (current != null) { logger.info("A management node exists already, returning it."); tokenManager.close(); return current; } final String sessionName = entityPath + "-" + MANAGEMENT_SESSION_NAME; final String linkName = entityPath + "-" + MANAGEMENT_LINK_NAME; final String address = entityPath + "/" + MANAGEMENT_ADDRESS; logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .addKeyValue("address", address) .log("Creating management node."); final AmqpChannelProcessor<RequestResponseChannel> requestResponseChannel = createRequestResponseChannel(sessionName, linkName, address); return new ManagementChannel(requestResponseChannel, getFullyQualifiedNamespace(), entityPath, tokenManager); })); }); } /** * {@inheritDoc} */ @Override public Mono<ClaimsBasedSecurityNode> getClaimsBasedSecurityNode() { return connectionMono.then(Mono.fromCallable(() -> getOrCreateCBSNode())); } @Override public String getId() { return connectionId; } /** * {@inheritDoc} */ @Override public String getFullyQualifiedNamespace() { return handler.getHostname(); } /** * {@inheritDoc} */ @Override public int getMaxFrameSize() { return handler.getMaxFrameSize(); } /** * {@inheritDoc} */ @Override public Map<String, Object> getConnectionProperties() { return handler.getConnectionProperties(); } /** * {@inheritDoc} */ @Override public Mono<AmqpSession> createSession(String sessionName) { return connectionMono.map(connection -> { final SessionSubscription sessionSubscription = sessionMap.computeIfAbsent(sessionName, key -> { final SessionHandler sessionHandler = handlerProvider.createSessionHandler(connectionId, getFullyQualifiedNamespace(), key, connectionOptions.getRetry().getTryTimeout()); final Session session = connection.session(); BaseHandler.setHandler(session, sessionHandler); final AmqpSession amqpSession = createSession(key, session, sessionHandler); final Disposable subscription = amqpSession.getEndpointStates() .subscribe(state -> { }, error -> { if (isDisposed.get()) { return; } logger.atInfo() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Error occurred. Removing and disposing session", error); removeSession(key); }, () -> { if (isDisposed.get()) { return; } logger.atVerbose() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Complete. Removing and disposing session."); removeSession(key); }); return new SessionSubscription(amqpSession, subscription); }); return sessionSubscription; }).flatMap(sessionSubscription -> { final Mono<AmqpEndpointState> activeSession = sessionSubscription.getSession().getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(retryPolicy.getRetryOptions().getTryTimeout(), Mono.error(() -> new AmqpException(true, String.format("connectionId[%s] sessionName[%s] Timeout waiting for session to be active.", connectionId, sessionName), handler.getErrorContext()))); return activeSession.thenReturn(sessionSubscription.getSession()); }); } /** * Creates a new AMQP session with the given parameters. * * @param sessionName Name of the AMQP session. * @param session The reactor session associated with this session. * @param handler Session handler for the reactor session. * * @return A new instance of AMQP session. */ protected AmqpSession createSession(String sessionName, Session session, SessionHandler handler) { return new ReactorSession(this, session, handler, sessionName, reactorProvider, handlerProvider, getClaimsBasedSecurityNode(), tokenManagerProvider, messageSerializer, connectionOptions.getRetry()); } /** * {@inheritDoc} */ @Override public boolean removeSession(String sessionName) { if (sessionName == null) { return false; } final SessionSubscription removed = sessionMap.remove(sessionName); if (removed != null) { removed.dispose(); } return removed != null; } @Override public boolean isDisposed() { return isDisposed.get(); } /** * {@inheritDoc} */ @Override public void dispose() { final Duration timeout = operationTimeout.plus(operationTimeout); closeAsync().block(timeout); } /** * Gets the active AMQP connection for this instance. * * @return The AMQP connection. * * @throws AmqpException if the {@link Connection} was not transitioned to an active state within the given * {@link AmqpRetryOptions */ protected Mono<Connection> getReactorConnection() { return connectionMono; } /** * Creates a bidirectional link between the message broker and the client. * * @param sessionName Name of the session. * @param linkName Name of the link. * @param entityPath Address to the message broker. * * @return A new {@link RequestResponseChannel} to communicate with the message broker. */ protected AmqpChannelProcessor<RequestResponseChannel> createRequestResponseChannel(String sessionName, String linkName, String entityPath) { Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); final Flux<RequestResponseChannel> createChannel = createSession(sessionName) .cast(ReactorSession.class) .map(reactorSession -> new RequestResponseChannel(this, getId(), getFullyQualifiedNamespace(), linkName, entityPath, reactorSession.session(), connectionOptions.getRetry(), handlerProvider, reactorProvider, messageSerializer, senderSettleMode, receiverSettleMode)) .doOnNext(e -> { logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .log("Emitting new response channel."); }) .repeat() .takeUntilOther(shutdownSignalSink.asMono()); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(ENTITY_PATH_KEY, entityPath); return createChannel .subscribeWith(new AmqpChannelProcessor<>(getFullyQualifiedNamespace(), channel -> channel.getEndpointStates(), retryPolicy, loggingContext)); } @Override public Mono<Void> closeAsync() { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already closed. Not disposing again."); return isClosedMono.asMono(); } return closeAsync(new AmqpShutdownSignal(false, true, "Disposed by client.")); } private synchronized void closeConnectionWork() { if (connection == null) { isClosedMono.emitEmpty((signalType, emitResult) -> { addSignalTypeAndResult(logger.atInfo(), signalType, emitResult) .log("Unable to complete closeMono."); return false; }); return; } connection.close(); handler.close(); final ArrayList<Mono<Void>> closingSessions = new ArrayList<>(); sessionMap.values().forEach(link -> closingSessions.add(link.isClosed())); final Mono<Void> closedExecutor = executor != null ? Mono.defer(() -> { synchronized (this) { logger.info("Closing executor."); return executor.closeAsync(); } }) : Mono.empty(); final Mono<Void> closeSessionAndExecutorMono = Mono.when(closingSessions) .timeout(operationTimeout) .onErrorResume(error -> { logger.info("Timed out waiting for all sessions to close."); return Mono.empty(); }) .then(closedExecutor) .then(Mono.fromRunnable(() -> { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result) .log("Unable to emit connection closed signal."); return false; }); subscriptions.dispose(); })); subscriptions.add(closeSessionAndExecutorMono.subscribe()); } private synchronized ClaimsBasedSecurityNode getOrCreateCBSNode() { if (cbsChannel == null) { logger.info("Setting CBS channel."); cbsChannelProcessor = createRequestResponseChannel(CBS_SESSION_NAME, CBS_LINK_NAME, CBS_ADDRESS); cbsChannel = new ClaimsBasedSecurityChannel( cbsChannelProcessor, connectionOptions.getTokenCredential(), connectionOptions.getAuthorizationType(), connectionOptions.getRetry()); } return cbsChannel; } private synchronized Connection getOrCreateConnection() throws IOException { if (connection == null) { logger.atInfo() .addKeyValue(HOSTNAME_KEY, handler.getHostname()) .addKeyValue("port", handler.getProtocolPort()) .log("Creating and starting connection."); final Reactor reactor = reactorProvider.createReactor(connectionId, handler.getMaxFrameSize()); connection = reactor.connectionToHost(handler.getHostname(), handler.getProtocolPort(), handler); final ReactorExceptionHandler reactorExceptionHandler = new ReactorExceptionHandler(); final Duration timeoutDivided = connectionOptions.getRetry().getTryTimeout().dividedBy(2); final Duration pendingTasksDuration = ClientConstants.SERVER_BUSY_WAIT_TIME.compareTo(timeoutDivided) < 0 ? ClientConstants.SERVER_BUSY_WAIT_TIME : timeoutDivided; final Scheduler scheduler = Schedulers.newSingle("reactor-executor"); executor = new ReactorExecutor(reactor, scheduler, connectionId, reactorExceptionHandler, pendingTasksDuration, connectionOptions.getFullyQualifiedNamespace()); final Mono<Void> executorCloseMono = Mono.defer(() -> { synchronized (this) { return executor.closeAsync(); } }); reactorProvider.getReactorDispatcher().getShutdownSignal() .flatMap(signal -> { reactorExceptionHandler.onConnectionShutdown(signal); return executorCloseMono; }) .onErrorResume(error -> { reactorExceptionHandler.onConnectionError(error); return executorCloseMono; }) .subscribe(); executor.start(); } return connection; } private final class ReactorExceptionHandler extends AmqpExceptionHandler { private ReactorExceptionHandler() { super(); } @Override public void onConnectionError(Throwable exception) { logger.atInfo() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionError, Starting new reactor", exception); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onReactorError: Disposing."); closeAsync(new AmqpShutdownSignal(false, false, "onReactorError: " + exception.toString())) .subscribe(); } } @Override void onConnectionShutdown(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown. Shutting down."); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown: disposing."); closeAsync(shutdownSignal).subscribe(); } } } private static final class SessionSubscription { private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AmqpSession session; private final Disposable subscription; private SessionSubscription(AmqpSession session, Disposable subscription) { this.session = session; this.subscription = subscription; } private AmqpSession getSession() { return session; } private void dispose() { if (isDisposed.getAndSet(true)) { return; } if (session instanceof ReactorSession) { ((ReactorSession) session).closeAsync("Closing session.", null, true) .subscribe(); } else { session.dispose(); } subscription.dispose(); } private Mono<Void> isClosed() { if (session instanceof ReactorSession) { return ((ReactorSession) session).isClosed(); } else { return Mono.empty(); } } } }
class ReactorConnection implements AmqpConnection { private static final String CBS_SESSION_NAME = "cbs-session"; private static final String CBS_ADDRESS = "$cbs"; private static final String CBS_LINK_NAME = "cbs"; private static final String MANAGEMENT_SESSION_NAME = "mgmt-session"; private static final String MANAGEMENT_ADDRESS = "$management"; private static final String MANAGEMENT_LINK_NAME = "mgmt"; private final ClientLogger logger; private final ConcurrentMap<String, SessionSubscription> sessionMap = new ConcurrentHashMap<>(); private final ConcurrentHashMap<String, AmqpManagementNode> managementNodes = new ConcurrentHashMap<>(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.One<AmqpShutdownSignal> shutdownSignalSink = Sinks.one(); private final Flux<AmqpEndpointState> endpointStates; private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final String connectionId; private final Mono<Connection> connectionMono; private final ConnectionHandler handler; private final ReactorHandlerProvider handlerProvider; private final TokenManagerProvider tokenManagerProvider; private final MessageSerializer messageSerializer; private final ConnectionOptions connectionOptions; private final ReactorProvider reactorProvider; private final AmqpRetryPolicy retryPolicy; private final SenderSettleMode senderSettleMode; private final ReceiverSettleMode receiverSettleMode; private final Duration operationTimeout; private final Composite subscriptions; private ReactorExecutor executor; private volatile ClaimsBasedSecurityChannel cbsChannel; private volatile AmqpChannelProcessor<RequestResponseChannel> cbsChannelProcessor; private volatile Connection connection; /** * Creates a new AMQP connection that uses proton-j. * * @param connectionId Identifier for the connection. * @param connectionOptions A set of options used to create the AMQP connection. * @param reactorProvider Provides proton-j Reactor instances. * @param handlerProvider Provides {@link BaseHandler} to listen to proton-j reactor events. * @param tokenManagerProvider Provides the appropriate token manager to authorize with CBS node. * @param messageSerializer Serializer to translate objects to and from proton-j {@link Message messages}. * @param senderSettleMode to set as {@link SenderSettleMode} on sender. * @param receiverSettleMode to set as {@link ReceiverSettleMode} on receiver. */ public ReactorConnection(String connectionId, ConnectionOptions connectionOptions, ReactorProvider reactorProvider, ReactorHandlerProvider handlerProvider, TokenManagerProvider tokenManagerProvider, MessageSerializer messageSerializer, SenderSettleMode senderSettleMode, ReceiverSettleMode receiverSettleMode) { this.connectionOptions = connectionOptions; this.reactorProvider = reactorProvider; this.connectionId = connectionId; this.logger = new ClientLogger(ReactorConnection.class, createContextWithConnectionId(connectionId)); this.handlerProvider = handlerProvider; this.tokenManagerProvider = Objects.requireNonNull(tokenManagerProvider, "'tokenManagerProvider' cannot be null."); this.messageSerializer = messageSerializer; this.handler = handlerProvider.createConnectionHandler(connectionId, connectionOptions); this.retryPolicy = RetryUtil.getRetryPolicy(connectionOptions.getRetry()); this.operationTimeout = connectionOptions.getRetry().getTryTimeout(); this.senderSettleMode = senderSettleMode; this.receiverSettleMode = receiverSettleMode; this.connectionMono = Mono.fromCallable(this::getOrCreateConnection) .flatMap(reactorConnection -> { final Mono<AmqpEndpointState> activeEndpoint = getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(operationTimeout, Mono.error(() -> new AmqpException(true, String.format( "Connection '%s' not opened within AmqpRetryOptions.tryTimeout(): %s", connectionId, operationTimeout), handler.getErrorContext()))); return activeEndpoint.thenReturn(reactorConnection); }) .doOnError(error -> { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already disposed: Error occurred while connection was starting.", error); } else { closeAsync(new AmqpShutdownSignal(false, false, "Error occurred while connection was starting. Error: " + error)).subscribe(); } }); this.endpointStates = this.handler.getEndpointStates() .takeUntilOther(shutdownSignalSink.asMono()) .map(state -> { logger.verbose("State {}", state); return AmqpEndpointStateUtil.getConnectionState(state); }) .onErrorResume(error -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to error."); return closeAsync(new AmqpShutdownSignal(false, false, error.getMessage())).then(Mono.error(error)); } else { return Mono.error(error); } }) .doOnComplete(() -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to connection close."); closeAsync(new AmqpShutdownSignal(false, false, "Connection handler closed.")).subscribe(); } }) .cache(1); this.subscriptions = Disposables.composite(this.endpointStates.subscribe()); } /** * {@inheritDoc} */ @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } /** * Gets the shutdown signal associated with this connection. When it emits, the underlying connection is closed. * * @return Shutdown signals associated with this connection. It emits a signal when the underlying connection is * closed. */ @Override public Flux<AmqpShutdownSignal> getShutdownSignals() { return shutdownSignalSink.asMono().cache().flux(); } @Override public Mono<AmqpManagementNode> getManagementNode(String entityPath) { return Mono.defer(() -> { if (isDisposed()) { return Mono.error(logger.atError() .addKeyValue(ENTITY_PATH_KEY, entityPath) .log(Exceptions.propagate(new IllegalStateException("Connection is disposed. Cannot get management instance.")))); } final AmqpManagementNode existing = managementNodes.get(entityPath); if (existing != null) { return Mono.just(existing); } final TokenManager tokenManager = new AzureTokenManagerProvider(connectionOptions.getAuthorizationType(), connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getAuthorizationScope()) .getTokenManager(getClaimsBasedSecurityNode(), entityPath); return tokenManager.authorize().thenReturn(managementNodes.compute(entityPath, (key, current) -> { if (current != null) { logger.info("A management node exists already, returning it."); tokenManager.close(); return current; } final String sessionName = entityPath + "-" + MANAGEMENT_SESSION_NAME; final String linkName = entityPath + "-" + MANAGEMENT_LINK_NAME; final String address = entityPath + "/" + MANAGEMENT_ADDRESS; logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .addKeyValue("address", address) .log("Creating management node."); final AmqpChannelProcessor<RequestResponseChannel> requestResponseChannel = createRequestResponseChannel(sessionName, linkName, address); return new ManagementChannel(requestResponseChannel, getFullyQualifiedNamespace(), entityPath, tokenManager); })); }); } /** * {@inheritDoc} */ @Override public Mono<ClaimsBasedSecurityNode> getClaimsBasedSecurityNode() { return connectionMono.then(Mono.fromCallable(() -> getOrCreateCBSNode())); } @Override public String getId() { return connectionId; } /** * {@inheritDoc} */ @Override public String getFullyQualifiedNamespace() { return handler.getHostname(); } /** * {@inheritDoc} */ @Override public int getMaxFrameSize() { return handler.getMaxFrameSize(); } /** * {@inheritDoc} */ @Override public Map<String, Object> getConnectionProperties() { return handler.getConnectionProperties(); } /** * {@inheritDoc} */ @Override public Mono<AmqpSession> createSession(String sessionName) { return connectionMono.map(connection -> { final SessionSubscription sessionSubscription = sessionMap.computeIfAbsent(sessionName, key -> { final SessionHandler sessionHandler = handlerProvider.createSessionHandler(connectionId, getFullyQualifiedNamespace(), key, connectionOptions.getRetry().getTryTimeout()); final Session session = connection.session(); BaseHandler.setHandler(session, sessionHandler); final AmqpSession amqpSession = createSession(key, session, sessionHandler); final Disposable subscription = amqpSession.getEndpointStates() .subscribe(state -> { }, error -> { if (isDisposed.get()) { return; } logger.atInfo() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Error occurred. Removing and disposing session", error); removeSession(key); }, () -> { if (isDisposed.get()) { return; } logger.atVerbose() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Complete. Removing and disposing session."); removeSession(key); }); return new SessionSubscription(amqpSession, subscription); }); return sessionSubscription; }).flatMap(sessionSubscription -> { final Mono<AmqpEndpointState> activeSession = sessionSubscription.getSession().getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(retryPolicy.getRetryOptions().getTryTimeout(), Mono.error(() -> new AmqpException(true, String.format("connectionId[%s] sessionName[%s] Timeout waiting for session to be active.", connectionId, sessionName), handler.getErrorContext()))); return activeSession.thenReturn(sessionSubscription.getSession()); }); } /** * Creates a new AMQP session with the given parameters. * * @param sessionName Name of the AMQP session. * @param session The reactor session associated with this session. * @param handler Session handler for the reactor session. * * @return A new instance of AMQP session. */ protected AmqpSession createSession(String sessionName, Session session, SessionHandler handler) { return new ReactorSession(this, session, handler, sessionName, reactorProvider, handlerProvider, getClaimsBasedSecurityNode(), tokenManagerProvider, messageSerializer, connectionOptions.getRetry()); } /** * {@inheritDoc} */ @Override public boolean removeSession(String sessionName) { if (sessionName == null) { return false; } final SessionSubscription removed = sessionMap.remove(sessionName); if (removed != null) { removed.dispose(); } return removed != null; } @Override public boolean isDisposed() { return isDisposed.get(); } /** * {@inheritDoc} */ @Override public void dispose() { final Duration timeout = operationTimeout.plus(operationTimeout); closeAsync().block(timeout); } /** * Gets the active AMQP connection for this instance. * * @return The AMQP connection. * * @throws AmqpException if the {@link Connection} was not transitioned to an active state within the given * {@link AmqpRetryOptions */ protected Mono<Connection> getReactorConnection() { return connectionMono; } /** * Creates a bidirectional link between the message broker and the client. * * @param sessionName Name of the session. * @param linkName Name of the link. * @param entityPath Address to the message broker. * * @return A new {@link RequestResponseChannel} to communicate with the message broker. */ protected AmqpChannelProcessor<RequestResponseChannel> createRequestResponseChannel(String sessionName, String linkName, String entityPath) { Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); final Flux<RequestResponseChannel> createChannel = createSession(sessionName) .cast(ReactorSession.class) .map(reactorSession -> new RequestResponseChannel(this, getId(), getFullyQualifiedNamespace(), linkName, entityPath, reactorSession.session(), connectionOptions.getRetry(), handlerProvider, reactorProvider, messageSerializer, senderSettleMode, receiverSettleMode)) .doOnNext(e -> { logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .log("Emitting new response channel."); }) .repeat(() -> !this.isDisposed()); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(ENTITY_PATH_KEY, entityPath); return createChannel .subscribeWith(new AmqpChannelProcessor<>(getFullyQualifiedNamespace(), channel -> channel.getEndpointStates(), retryPolicy, loggingContext)); } @Override public Mono<Void> closeAsync() { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already closed. Not disposing again."); return isClosedMono.asMono(); } return closeAsync(new AmqpShutdownSignal(false, true, "Disposed by client.")); } private synchronized void closeConnectionWork() { if (connection == null) { isClosedMono.emitEmpty((signalType, emitResult) -> { addSignalTypeAndResult(logger.atInfo(), signalType, emitResult) .log("Unable to complete closeMono."); return false; }); return; } connection.close(); handler.close(); final ArrayList<Mono<Void>> closingSessions = new ArrayList<>(); sessionMap.values().forEach(link -> closingSessions.add(link.isClosed())); final Mono<Void> closedExecutor = executor != null ? Mono.defer(() -> { synchronized (this) { logger.info("Closing executor."); return executor.closeAsync(); } }) : Mono.empty(); final Mono<Void> closeSessionAndExecutorMono = Mono.when(closingSessions) .timeout(operationTimeout) .onErrorResume(error -> { logger.info("Timed out waiting for all sessions to close."); return Mono.empty(); }) .then(closedExecutor) .then(Mono.fromRunnable(() -> { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result) .log("Unable to emit connection closed signal."); return false; }); subscriptions.dispose(); })); subscriptions.add(closeSessionAndExecutorMono.subscribe()); } private synchronized ClaimsBasedSecurityNode getOrCreateCBSNode() { if (cbsChannel == null) { logger.info("Setting CBS channel."); cbsChannelProcessor = createRequestResponseChannel(CBS_SESSION_NAME, CBS_LINK_NAME, CBS_ADDRESS); cbsChannel = new ClaimsBasedSecurityChannel( cbsChannelProcessor, connectionOptions.getTokenCredential(), connectionOptions.getAuthorizationType(), connectionOptions.getRetry()); } return cbsChannel; } private synchronized Connection getOrCreateConnection() throws IOException { if (connection == null) { logger.atInfo() .addKeyValue(HOSTNAME_KEY, handler.getHostname()) .addKeyValue("port", handler.getProtocolPort()) .log("Creating and starting connection."); final Reactor reactor = reactorProvider.createReactor(connectionId, handler.getMaxFrameSize()); connection = reactor.connectionToHost(handler.getHostname(), handler.getProtocolPort(), handler); final ReactorExceptionHandler reactorExceptionHandler = new ReactorExceptionHandler(); final Duration timeoutDivided = connectionOptions.getRetry().getTryTimeout().dividedBy(2); final Duration pendingTasksDuration = ClientConstants.SERVER_BUSY_WAIT_TIME.compareTo(timeoutDivided) < 0 ? ClientConstants.SERVER_BUSY_WAIT_TIME : timeoutDivided; final Scheduler scheduler = Schedulers.newSingle("reactor-executor"); executor = new ReactorExecutor(reactor, scheduler, connectionId, reactorExceptionHandler, pendingTasksDuration, connectionOptions.getFullyQualifiedNamespace()); final Mono<Void> executorCloseMono = Mono.defer(() -> { synchronized (this) { return executor.closeAsync(); } }); reactorProvider.getReactorDispatcher().getShutdownSignal() .flatMap(signal -> { reactorExceptionHandler.onConnectionShutdown(signal); return executorCloseMono; }) .onErrorResume(error -> { reactorExceptionHandler.onConnectionError(error); return executorCloseMono; }) .subscribe(); executor.start(); } return connection; } private final class ReactorExceptionHandler extends AmqpExceptionHandler { private ReactorExceptionHandler() { super(); } @Override public void onConnectionError(Throwable exception) { logger.atInfo() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionError, Starting new reactor", exception); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onReactorError: Disposing."); closeAsync(new AmqpShutdownSignal(false, false, "onReactorError: " + exception.toString())) .subscribe(); } } @Override void onConnectionShutdown(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown. Shutting down."); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown: disposing."); closeAsync(shutdownSignal).subscribe(); } } } private static final class SessionSubscription { private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AmqpSession session; private final Disposable subscription; private SessionSubscription(AmqpSession session, Disposable subscription) { this.session = session; this.subscription = subscription; } private AmqpSession getSession() { return session; } private void dispose() { if (isDisposed.getAndSet(true)) { return; } if (session instanceof ReactorSession) { ((ReactorSession) session).closeAsync("Closing session.", null, true) .subscribe(); } else { session.dispose(); } subscription.dispose(); } private Mono<Void> isClosed() { if (session instanceof ReactorSession) { return ((ReactorSession) session).isClosed(); } else { return Mono.empty(); } } } }
Sounds good; I'll open that pr and tag you to get thoughts; I'm trying to achieve it without adding extra aync edge and nodes (to reduce the async signal passing+thread hoping) to the underlying graph. The more the async-signaling, the more it's hard to debug/fix in the future.
Mono<Void> closeAsync(AmqpShutdownSignal shutdownSignal) { final Mono<Void> emitShutDownSignalOperation = Mono.fromRunnable(() -> { addShutdownSignal(logger.atInfo(), shutdownSignal).log("Disposing of ReactorConnection."); final Sinks.EmitResult result = shutdownSignalSink.tryEmitValue(shutdownSignal); if (result.isFailure()) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(EMIT_RESULT_KEY, result) .log("Unable to emit shutdown signal."); } }); final Mono<Void> cbsCloseOperation; if (cbsChannelProcessor != null) { cbsCloseOperation = cbsChannelProcessor.flatMap(channel -> channel.closeAsync()); } else { cbsCloseOperation = Mono.empty(); } final Mono<Void> managementNodeCloseOperations = Mono.when( Flux.fromStream(managementNodes.values().stream()).flatMap(node -> node.closeAsync())); final Mono<Void> closeReactor = Mono.fromRunnable(() -> { logger.verbose("Scheduling closeConnection work."); final ReactorDispatcher dispatcher = reactorProvider.getReactorDispatcher(); if (dispatcher != null) { try { dispatcher.invoke(() -> closeConnectionWork()); } catch (IOException e) { logger.warning("IOException while scheduling closeConnection work. Manually disposing.", e); closeConnectionWork(); } catch (RejectedExecutionException e) { logger.info("Could not schedule closeConnection work. Manually disposing."); closeConnectionWork(); } } else { closeConnectionWork(); } }); return Mono.whenDelayError( cbsCloseOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed CBS node.")), managementNodeCloseOperations.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed management nodes."))) .then(emitShutDownSignalOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Emitted connection shutdown signal. "))) .then(closeReactor.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed reactor dispatcher."))) .then(isClosedMono.asMono()); }
logger.atVerbose()
Mono<Void> closeAsync(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal).log("Disposing of ReactorConnection."); final Sinks.EmitResult result = shutdownSignalSink.tryEmitValue(shutdownSignal); if (result.isFailure()) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(EMIT_RESULT_KEY, result) .log("Unable to emit shutdown signal."); } final Mono<Void> cbsCloseOperation; if (cbsChannelProcessor != null) { cbsCloseOperation = cbsChannelProcessor.flatMap(channel -> channel.closeAsync()); } else { cbsCloseOperation = Mono.empty(); } final Mono<Void> managementNodeCloseOperations = Mono.when( Flux.fromStream(managementNodes.values().stream()).flatMap(node -> node.closeAsync())); final Mono<Void> closeReactor = Mono.fromRunnable(() -> { logger.verbose("Scheduling closeConnection work."); final ReactorDispatcher dispatcher = reactorProvider.getReactorDispatcher(); if (dispatcher != null) { try { dispatcher.invoke(() -> closeConnectionWork()); } catch (IOException e) { logger.warning("IOException while scheduling closeConnection work. Manually disposing.", e); closeConnectionWork(); } catch (RejectedExecutionException e) { logger.info("Could not schedule closeConnection work. Manually disposing."); closeConnectionWork(); } } else { closeConnectionWork(); } }); return Mono.whenDelayError( cbsCloseOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed CBS node.")), managementNodeCloseOperations.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed management nodes."))) .then(closeReactor.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed reactor dispatcher."))) .then(isClosedMono.asMono()); }
class ReactorConnection implements AmqpConnection { private static final String CBS_SESSION_NAME = "cbs-session"; private static final String CBS_ADDRESS = "$cbs"; private static final String CBS_LINK_NAME = "cbs"; private static final String MANAGEMENT_SESSION_NAME = "mgmt-session"; private static final String MANAGEMENT_ADDRESS = "$management"; private static final String MANAGEMENT_LINK_NAME = "mgmt"; private final ClientLogger logger; private final ConcurrentMap<String, SessionSubscription> sessionMap = new ConcurrentHashMap<>(); private final ConcurrentHashMap<String, AmqpManagementNode> managementNodes = new ConcurrentHashMap<>(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.One<AmqpShutdownSignal> shutdownSignalSink = Sinks.one(); private final Flux<AmqpEndpointState> endpointStates; private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final String connectionId; private final Mono<Connection> connectionMono; private final ConnectionHandler handler; private final ReactorHandlerProvider handlerProvider; private final TokenManagerProvider tokenManagerProvider; private final MessageSerializer messageSerializer; private final ConnectionOptions connectionOptions; private final ReactorProvider reactorProvider; private final AmqpRetryPolicy retryPolicy; private final SenderSettleMode senderSettleMode; private final ReceiverSettleMode receiverSettleMode; private final Duration operationTimeout; private final Composite subscriptions; private ReactorExecutor executor; private volatile ClaimsBasedSecurityChannel cbsChannel; private volatile AmqpChannelProcessor<RequestResponseChannel> cbsChannelProcessor; private volatile Connection connection; /** * Creates a new AMQP connection that uses proton-j. * * @param connectionId Identifier for the connection. * @param connectionOptions A set of options used to create the AMQP connection. * @param reactorProvider Provides proton-j Reactor instances. * @param handlerProvider Provides {@link BaseHandler} to listen to proton-j reactor events. * @param tokenManagerProvider Provides the appropriate token manager to authorize with CBS node. * @param messageSerializer Serializer to translate objects to and from proton-j {@link Message messages}. * @param senderSettleMode to set as {@link SenderSettleMode} on sender. * @param receiverSettleMode to set as {@link ReceiverSettleMode} on receiver. */ public ReactorConnection(String connectionId, ConnectionOptions connectionOptions, ReactorProvider reactorProvider, ReactorHandlerProvider handlerProvider, TokenManagerProvider tokenManagerProvider, MessageSerializer messageSerializer, SenderSettleMode senderSettleMode, ReceiverSettleMode receiverSettleMode) { this.connectionOptions = connectionOptions; this.reactorProvider = reactorProvider; this.connectionId = connectionId; this.logger = new ClientLogger(ReactorConnection.class, createContextWithConnectionId(connectionId)); this.handlerProvider = handlerProvider; this.tokenManagerProvider = Objects.requireNonNull(tokenManagerProvider, "'tokenManagerProvider' cannot be null."); this.messageSerializer = messageSerializer; this.handler = handlerProvider.createConnectionHandler(connectionId, connectionOptions); this.retryPolicy = RetryUtil.getRetryPolicy(connectionOptions.getRetry()); this.operationTimeout = connectionOptions.getRetry().getTryTimeout(); this.senderSettleMode = senderSettleMode; this.receiverSettleMode = receiverSettleMode; this.connectionMono = Mono.fromCallable(this::getOrCreateConnection) .flatMap(reactorConnection -> { final Mono<AmqpEndpointState> activeEndpoint = getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(operationTimeout, Mono.error(() -> new AmqpException(true, String.format( "Connection '%s' not opened within AmqpRetryOptions.tryTimeout(): %s", connectionId, operationTimeout), handler.getErrorContext()))); return activeEndpoint.thenReturn(reactorConnection); }) .doOnError(error -> { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already disposed: Error occurred while connection was starting.", error); } else { closeAsync(new AmqpShutdownSignal(false, false, "Error occurred while connection was starting. Error: " + error)).subscribe(); } }); this.endpointStates = this.handler.getEndpointStates() .takeUntilOther(shutdownSignalSink.asMono()) .map(state -> { logger.verbose("State {}", state); return AmqpEndpointStateUtil.getConnectionState(state); }) .onErrorResume(error -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to error."); return closeAsync(new AmqpShutdownSignal(false, false, error.getMessage())).then(Mono.error(error)); } else { return Mono.error(error); } }) .doOnComplete(() -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to connection close."); closeAsync(new AmqpShutdownSignal(false, false, "Connection handler closed.")).subscribe(); } }) .cache(1); this.subscriptions = Disposables.composite(this.endpointStates.subscribe()); } /** * {@inheritDoc} */ @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } /** * Gets the shutdown signal associated with this connection. When it emits, the underlying connection is closed. * * @return Shutdown signals associated with this connection. It emits a signal when the underlying connection is * closed. */ @Override public Flux<AmqpShutdownSignal> getShutdownSignals() { return shutdownSignalSink.asMono().cache().flux(); } @Override public Mono<AmqpManagementNode> getManagementNode(String entityPath) { return Mono.defer(() -> { if (isDisposed()) { return Mono.error(logger.atError() .addKeyValue(ENTITY_PATH_KEY, entityPath) .log(Exceptions.propagate(new IllegalStateException("Connection is disposed. Cannot get management instance.")))); } final AmqpManagementNode existing = managementNodes.get(entityPath); if (existing != null) { return Mono.just(existing); } final TokenManager tokenManager = new AzureTokenManagerProvider(connectionOptions.getAuthorizationType(), connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getAuthorizationScope()) .getTokenManager(getClaimsBasedSecurityNode(), entityPath); return tokenManager.authorize().thenReturn(managementNodes.compute(entityPath, (key, current) -> { if (current != null) { logger.info("A management node exists already, returning it."); tokenManager.close(); return current; } final String sessionName = entityPath + "-" + MANAGEMENT_SESSION_NAME; final String linkName = entityPath + "-" + MANAGEMENT_LINK_NAME; final String address = entityPath + "/" + MANAGEMENT_ADDRESS; logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .addKeyValue("address", address) .log("Creating management node."); final AmqpChannelProcessor<RequestResponseChannel> requestResponseChannel = createRequestResponseChannel(sessionName, linkName, address); return new ManagementChannel(requestResponseChannel, getFullyQualifiedNamespace(), entityPath, tokenManager); })); }); } /** * {@inheritDoc} */ @Override public Mono<ClaimsBasedSecurityNode> getClaimsBasedSecurityNode() { return connectionMono.then(Mono.fromCallable(() -> getOrCreateCBSNode())); } @Override public String getId() { return connectionId; } /** * {@inheritDoc} */ @Override public String getFullyQualifiedNamespace() { return handler.getHostname(); } /** * {@inheritDoc} */ @Override public int getMaxFrameSize() { return handler.getMaxFrameSize(); } /** * {@inheritDoc} */ @Override public Map<String, Object> getConnectionProperties() { return handler.getConnectionProperties(); } /** * {@inheritDoc} */ @Override public Mono<AmqpSession> createSession(String sessionName) { return connectionMono.map(connection -> { final SessionSubscription sessionSubscription = sessionMap.computeIfAbsent(sessionName, key -> { final SessionHandler sessionHandler = handlerProvider.createSessionHandler(connectionId, getFullyQualifiedNamespace(), key, connectionOptions.getRetry().getTryTimeout()); final Session session = connection.session(); BaseHandler.setHandler(session, sessionHandler); final AmqpSession amqpSession = createSession(key, session, sessionHandler); final Disposable subscription = amqpSession.getEndpointStates() .subscribe(state -> { }, error -> { if (isDisposed.get()) { return; } logger.atInfo() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Error occurred. Removing and disposing session", error); removeSession(key); }, () -> { if (isDisposed.get()) { return; } logger.atVerbose() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Complete. Removing and disposing session."); removeSession(key); }); return new SessionSubscription(amqpSession, subscription); }); return sessionSubscription; }).flatMap(sessionSubscription -> { final Mono<AmqpEndpointState> activeSession = sessionSubscription.getSession().getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(retryPolicy.getRetryOptions().getTryTimeout(), Mono.error(() -> new AmqpException(true, String.format("connectionId[%s] sessionName[%s] Timeout waiting for session to be active.", connectionId, sessionName), handler.getErrorContext()))); return activeSession.thenReturn(sessionSubscription.getSession()); }); } /** * Creates a new AMQP session with the given parameters. * * @param sessionName Name of the AMQP session. * @param session The reactor session associated with this session. * @param handler Session handler for the reactor session. * * @return A new instance of AMQP session. */ protected AmqpSession createSession(String sessionName, Session session, SessionHandler handler) { return new ReactorSession(this, session, handler, sessionName, reactorProvider, handlerProvider, getClaimsBasedSecurityNode(), tokenManagerProvider, messageSerializer, connectionOptions.getRetry()); } /** * {@inheritDoc} */ @Override public boolean removeSession(String sessionName) { if (sessionName == null) { return false; } final SessionSubscription removed = sessionMap.remove(sessionName); if (removed != null) { removed.dispose(); } return removed != null; } @Override public boolean isDisposed() { return isDisposed.get(); } /** * {@inheritDoc} */ @Override public void dispose() { final Duration timeout = operationTimeout.plus(operationTimeout); closeAsync().block(timeout); } /** * Gets the active AMQP connection for this instance. * * @return The AMQP connection. * * @throws AmqpException if the {@link Connection} was not transitioned to an active state within the given * {@link AmqpRetryOptions */ protected Mono<Connection> getReactorConnection() { return connectionMono; } /** * Creates a bidirectional link between the message broker and the client. * * @param sessionName Name of the session. * @param linkName Name of the link. * @param entityPath Address to the message broker. * * @return A new {@link RequestResponseChannel} to communicate with the message broker. */ protected AmqpChannelProcessor<RequestResponseChannel> createRequestResponseChannel(String sessionName, String linkName, String entityPath) { Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); final Flux<RequestResponseChannel> createChannel = createSession(sessionName) .cast(ReactorSession.class) .map(reactorSession -> new RequestResponseChannel(this, getId(), getFullyQualifiedNamespace(), linkName, entityPath, reactorSession.session(), connectionOptions.getRetry(), handlerProvider, reactorProvider, messageSerializer, senderSettleMode, receiverSettleMode)) .doOnNext(e -> { logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .log("Emitting new response channel."); }) .repeat() .takeUntilOther(shutdownSignalSink.asMono()); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(ENTITY_PATH_KEY, entityPath); return createChannel .subscribeWith(new AmqpChannelProcessor<>(getFullyQualifiedNamespace(), channel -> channel.getEndpointStates(), retryPolicy, loggingContext)); } @Override public Mono<Void> closeAsync() { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already closed. Not disposing again."); return isClosedMono.asMono(); } return closeAsync(new AmqpShutdownSignal(false, true, "Disposed by client.")); } private synchronized void closeConnectionWork() { if (connection == null) { isClosedMono.emitEmpty((signalType, emitResult) -> { addSignalTypeAndResult(logger.atInfo(), signalType, emitResult) .log("Unable to complete closeMono."); return false; }); return; } connection.close(); handler.close(); final ArrayList<Mono<Void>> closingSessions = new ArrayList<>(); sessionMap.values().forEach(link -> closingSessions.add(link.isClosed())); final Mono<Void> closedExecutor = executor != null ? Mono.defer(() -> { synchronized (this) { logger.info("Closing executor."); return executor.closeAsync(); } }) : Mono.empty(); final Mono<Void> closeSessionAndExecutorMono = Mono.when(closingSessions) .timeout(operationTimeout) .onErrorResume(error -> { logger.info("Timed out waiting for all sessions to close."); return Mono.empty(); }) .then(closedExecutor) .then(Mono.fromRunnable(() -> { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result) .log("Unable to emit connection closed signal."); return false; }); subscriptions.dispose(); })); subscriptions.add(closeSessionAndExecutorMono.subscribe()); } private synchronized ClaimsBasedSecurityNode getOrCreateCBSNode() { if (cbsChannel == null) { logger.info("Setting CBS channel."); cbsChannelProcessor = createRequestResponseChannel(CBS_SESSION_NAME, CBS_LINK_NAME, CBS_ADDRESS); cbsChannel = new ClaimsBasedSecurityChannel( cbsChannelProcessor, connectionOptions.getTokenCredential(), connectionOptions.getAuthorizationType(), connectionOptions.getRetry()); } return cbsChannel; } private synchronized Connection getOrCreateConnection() throws IOException { if (connection == null) { logger.atInfo() .addKeyValue(HOSTNAME_KEY, handler.getHostname()) .addKeyValue("port", handler.getProtocolPort()) .log("Creating and starting connection."); final Reactor reactor = reactorProvider.createReactor(connectionId, handler.getMaxFrameSize()); connection = reactor.connectionToHost(handler.getHostname(), handler.getProtocolPort(), handler); final ReactorExceptionHandler reactorExceptionHandler = new ReactorExceptionHandler(); final Duration timeoutDivided = connectionOptions.getRetry().getTryTimeout().dividedBy(2); final Duration pendingTasksDuration = ClientConstants.SERVER_BUSY_WAIT_TIME.compareTo(timeoutDivided) < 0 ? ClientConstants.SERVER_BUSY_WAIT_TIME : timeoutDivided; final Scheduler scheduler = Schedulers.newSingle("reactor-executor"); executor = new ReactorExecutor(reactor, scheduler, connectionId, reactorExceptionHandler, pendingTasksDuration, connectionOptions.getFullyQualifiedNamespace()); final Mono<Void> executorCloseMono = Mono.defer(() -> { synchronized (this) { return executor.closeAsync(); } }); reactorProvider.getReactorDispatcher().getShutdownSignal() .flatMap(signal -> { reactorExceptionHandler.onConnectionShutdown(signal); return executorCloseMono; }) .onErrorResume(error -> { reactorExceptionHandler.onConnectionError(error); return executorCloseMono; }) .subscribe(); executor.start(); } return connection; } private final class ReactorExceptionHandler extends AmqpExceptionHandler { private ReactorExceptionHandler() { super(); } @Override public void onConnectionError(Throwable exception) { logger.atInfo() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionError, Starting new reactor", exception); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onReactorError: Disposing."); closeAsync(new AmqpShutdownSignal(false, false, "onReactorError: " + exception.toString())) .subscribe(); } } @Override void onConnectionShutdown(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown. Shutting down."); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown: disposing."); closeAsync(shutdownSignal).subscribe(); } } } private static final class SessionSubscription { private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AmqpSession session; private final Disposable subscription; private SessionSubscription(AmqpSession session, Disposable subscription) { this.session = session; this.subscription = subscription; } private AmqpSession getSession() { return session; } private void dispose() { if (isDisposed.getAndSet(true)) { return; } if (session instanceof ReactorSession) { ((ReactorSession) session).closeAsync("Closing session.", null, true) .subscribe(); } else { session.dispose(); } subscription.dispose(); } private Mono<Void> isClosed() { if (session instanceof ReactorSession) { return ((ReactorSession) session).isClosed(); } else { return Mono.empty(); } } } }
class ReactorConnection implements AmqpConnection { private static final String CBS_SESSION_NAME = "cbs-session"; private static final String CBS_ADDRESS = "$cbs"; private static final String CBS_LINK_NAME = "cbs"; private static final String MANAGEMENT_SESSION_NAME = "mgmt-session"; private static final String MANAGEMENT_ADDRESS = "$management"; private static final String MANAGEMENT_LINK_NAME = "mgmt"; private final ClientLogger logger; private final ConcurrentMap<String, SessionSubscription> sessionMap = new ConcurrentHashMap<>(); private final ConcurrentHashMap<String, AmqpManagementNode> managementNodes = new ConcurrentHashMap<>(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.One<AmqpShutdownSignal> shutdownSignalSink = Sinks.one(); private final Flux<AmqpEndpointState> endpointStates; private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final String connectionId; private final Mono<Connection> connectionMono; private final ConnectionHandler handler; private final ReactorHandlerProvider handlerProvider; private final TokenManagerProvider tokenManagerProvider; private final MessageSerializer messageSerializer; private final ConnectionOptions connectionOptions; private final ReactorProvider reactorProvider; private final AmqpRetryPolicy retryPolicy; private final SenderSettleMode senderSettleMode; private final ReceiverSettleMode receiverSettleMode; private final Duration operationTimeout; private final Composite subscriptions; private ReactorExecutor executor; private volatile ClaimsBasedSecurityChannel cbsChannel; private volatile AmqpChannelProcessor<RequestResponseChannel> cbsChannelProcessor; private volatile Connection connection; /** * Creates a new AMQP connection that uses proton-j. * * @param connectionId Identifier for the connection. * @param connectionOptions A set of options used to create the AMQP connection. * @param reactorProvider Provides proton-j Reactor instances. * @param handlerProvider Provides {@link BaseHandler} to listen to proton-j reactor events. * @param tokenManagerProvider Provides the appropriate token manager to authorize with CBS node. * @param messageSerializer Serializer to translate objects to and from proton-j {@link Message messages}. * @param senderSettleMode to set as {@link SenderSettleMode} on sender. * @param receiverSettleMode to set as {@link ReceiverSettleMode} on receiver. */ public ReactorConnection(String connectionId, ConnectionOptions connectionOptions, ReactorProvider reactorProvider, ReactorHandlerProvider handlerProvider, TokenManagerProvider tokenManagerProvider, MessageSerializer messageSerializer, SenderSettleMode senderSettleMode, ReceiverSettleMode receiverSettleMode) { this.connectionOptions = connectionOptions; this.reactorProvider = reactorProvider; this.connectionId = connectionId; this.logger = new ClientLogger(ReactorConnection.class, createContextWithConnectionId(connectionId)); this.handlerProvider = handlerProvider; this.tokenManagerProvider = Objects.requireNonNull(tokenManagerProvider, "'tokenManagerProvider' cannot be null."); this.messageSerializer = messageSerializer; this.handler = handlerProvider.createConnectionHandler(connectionId, connectionOptions); this.retryPolicy = RetryUtil.getRetryPolicy(connectionOptions.getRetry()); this.operationTimeout = connectionOptions.getRetry().getTryTimeout(); this.senderSettleMode = senderSettleMode; this.receiverSettleMode = receiverSettleMode; this.connectionMono = Mono.fromCallable(this::getOrCreateConnection) .flatMap(reactorConnection -> { final Mono<AmqpEndpointState> activeEndpoint = getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(operationTimeout, Mono.error(() -> new AmqpException(true, String.format( "Connection '%s' not opened within AmqpRetryOptions.tryTimeout(): %s", connectionId, operationTimeout), handler.getErrorContext()))); return activeEndpoint.thenReturn(reactorConnection); }) .doOnError(error -> { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already disposed: Error occurred while connection was starting.", error); } else { closeAsync(new AmqpShutdownSignal(false, false, "Error occurred while connection was starting. Error: " + error)).subscribe(); } }); this.endpointStates = this.handler.getEndpointStates() .takeUntilOther(shutdownSignalSink.asMono()) .map(state -> { logger.verbose("State {}", state); return AmqpEndpointStateUtil.getConnectionState(state); }) .onErrorResume(error -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to error."); return closeAsync(new AmqpShutdownSignal(false, false, error.getMessage())).then(Mono.error(error)); } else { return Mono.error(error); } }) .doOnComplete(() -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to connection close."); closeAsync(new AmqpShutdownSignal(false, false, "Connection handler closed.")).subscribe(); } }) .cache(1); this.subscriptions = Disposables.composite(this.endpointStates.subscribe()); } /** * {@inheritDoc} */ @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } /** * Gets the shutdown signal associated with this connection. When it emits, the underlying connection is closed. * * @return Shutdown signals associated with this connection. It emits a signal when the underlying connection is * closed. */ @Override public Flux<AmqpShutdownSignal> getShutdownSignals() { return shutdownSignalSink.asMono().cache().flux(); } @Override public Mono<AmqpManagementNode> getManagementNode(String entityPath) { return Mono.defer(() -> { if (isDisposed()) { return Mono.error(logger.atError() .addKeyValue(ENTITY_PATH_KEY, entityPath) .log(Exceptions.propagate(new IllegalStateException("Connection is disposed. Cannot get management instance.")))); } final AmqpManagementNode existing = managementNodes.get(entityPath); if (existing != null) { return Mono.just(existing); } final TokenManager tokenManager = new AzureTokenManagerProvider(connectionOptions.getAuthorizationType(), connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getAuthorizationScope()) .getTokenManager(getClaimsBasedSecurityNode(), entityPath); return tokenManager.authorize().thenReturn(managementNodes.compute(entityPath, (key, current) -> { if (current != null) { logger.info("A management node exists already, returning it."); tokenManager.close(); return current; } final String sessionName = entityPath + "-" + MANAGEMENT_SESSION_NAME; final String linkName = entityPath + "-" + MANAGEMENT_LINK_NAME; final String address = entityPath + "/" + MANAGEMENT_ADDRESS; logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .addKeyValue("address", address) .log("Creating management node."); final AmqpChannelProcessor<RequestResponseChannel> requestResponseChannel = createRequestResponseChannel(sessionName, linkName, address); return new ManagementChannel(requestResponseChannel, getFullyQualifiedNamespace(), entityPath, tokenManager); })); }); } /** * {@inheritDoc} */ @Override public Mono<ClaimsBasedSecurityNode> getClaimsBasedSecurityNode() { return connectionMono.then(Mono.fromCallable(() -> getOrCreateCBSNode())); } @Override public String getId() { return connectionId; } /** * {@inheritDoc} */ @Override public String getFullyQualifiedNamespace() { return handler.getHostname(); } /** * {@inheritDoc} */ @Override public int getMaxFrameSize() { return handler.getMaxFrameSize(); } /** * {@inheritDoc} */ @Override public Map<String, Object> getConnectionProperties() { return handler.getConnectionProperties(); } /** * {@inheritDoc} */ @Override public Mono<AmqpSession> createSession(String sessionName) { return connectionMono.map(connection -> { final SessionSubscription sessionSubscription = sessionMap.computeIfAbsent(sessionName, key -> { final SessionHandler sessionHandler = handlerProvider.createSessionHandler(connectionId, getFullyQualifiedNamespace(), key, connectionOptions.getRetry().getTryTimeout()); final Session session = connection.session(); BaseHandler.setHandler(session, sessionHandler); final AmqpSession amqpSession = createSession(key, session, sessionHandler); final Disposable subscription = amqpSession.getEndpointStates() .subscribe(state -> { }, error -> { if (isDisposed.get()) { return; } logger.atInfo() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Error occurred. Removing and disposing session", error); removeSession(key); }, () -> { if (isDisposed.get()) { return; } logger.atVerbose() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Complete. Removing and disposing session."); removeSession(key); }); return new SessionSubscription(amqpSession, subscription); }); return sessionSubscription; }).flatMap(sessionSubscription -> { final Mono<AmqpEndpointState> activeSession = sessionSubscription.getSession().getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(retryPolicy.getRetryOptions().getTryTimeout(), Mono.error(() -> new AmqpException(true, String.format("connectionId[%s] sessionName[%s] Timeout waiting for session to be active.", connectionId, sessionName), handler.getErrorContext()))); return activeSession.thenReturn(sessionSubscription.getSession()); }); } /** * Creates a new AMQP session with the given parameters. * * @param sessionName Name of the AMQP session. * @param session The reactor session associated with this session. * @param handler Session handler for the reactor session. * * @return A new instance of AMQP session. */ protected AmqpSession createSession(String sessionName, Session session, SessionHandler handler) { return new ReactorSession(this, session, handler, sessionName, reactorProvider, handlerProvider, getClaimsBasedSecurityNode(), tokenManagerProvider, messageSerializer, connectionOptions.getRetry()); } /** * {@inheritDoc} */ @Override public boolean removeSession(String sessionName) { if (sessionName == null) { return false; } final SessionSubscription removed = sessionMap.remove(sessionName); if (removed != null) { removed.dispose(); } return removed != null; } @Override public boolean isDisposed() { return isDisposed.get(); } /** * {@inheritDoc} */ @Override public void dispose() { final Duration timeout = operationTimeout.plus(operationTimeout); closeAsync().block(timeout); } /** * Gets the active AMQP connection for this instance. * * @return The AMQP connection. * * @throws AmqpException if the {@link Connection} was not transitioned to an active state within the given * {@link AmqpRetryOptions */ protected Mono<Connection> getReactorConnection() { return connectionMono; } /** * Creates a bidirectional link between the message broker and the client. * * @param sessionName Name of the session. * @param linkName Name of the link. * @param entityPath Address to the message broker. * * @return A new {@link RequestResponseChannel} to communicate with the message broker. */ protected AmqpChannelProcessor<RequestResponseChannel> createRequestResponseChannel(String sessionName, String linkName, String entityPath) { Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); final Flux<RequestResponseChannel> createChannel = createSession(sessionName) .cast(ReactorSession.class) .map(reactorSession -> new RequestResponseChannel(this, getId(), getFullyQualifiedNamespace(), linkName, entityPath, reactorSession.session(), connectionOptions.getRetry(), handlerProvider, reactorProvider, messageSerializer, senderSettleMode, receiverSettleMode)) .doOnNext(e -> { logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .log("Emitting new response channel."); }) .repeat(() -> !this.isDisposed()); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(ENTITY_PATH_KEY, entityPath); return createChannel .subscribeWith(new AmqpChannelProcessor<>(getFullyQualifiedNamespace(), channel -> channel.getEndpointStates(), retryPolicy, loggingContext)); } @Override public Mono<Void> closeAsync() { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already closed. Not disposing again."); return isClosedMono.asMono(); } return closeAsync(new AmqpShutdownSignal(false, true, "Disposed by client.")); } private synchronized void closeConnectionWork() { if (connection == null) { isClosedMono.emitEmpty((signalType, emitResult) -> { addSignalTypeAndResult(logger.atInfo(), signalType, emitResult) .log("Unable to complete closeMono."); return false; }); return; } connection.close(); handler.close(); final ArrayList<Mono<Void>> closingSessions = new ArrayList<>(); sessionMap.values().forEach(link -> closingSessions.add(link.isClosed())); final Mono<Void> closedExecutor = executor != null ? Mono.defer(() -> { synchronized (this) { logger.info("Closing executor."); return executor.closeAsync(); } }) : Mono.empty(); final Mono<Void> closeSessionAndExecutorMono = Mono.when(closingSessions) .timeout(operationTimeout) .onErrorResume(error -> { logger.info("Timed out waiting for all sessions to close."); return Mono.empty(); }) .then(closedExecutor) .then(Mono.fromRunnable(() -> { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result) .log("Unable to emit connection closed signal."); return false; }); subscriptions.dispose(); })); subscriptions.add(closeSessionAndExecutorMono.subscribe()); } private synchronized ClaimsBasedSecurityNode getOrCreateCBSNode() { if (cbsChannel == null) { logger.info("Setting CBS channel."); cbsChannelProcessor = createRequestResponseChannel(CBS_SESSION_NAME, CBS_LINK_NAME, CBS_ADDRESS); cbsChannel = new ClaimsBasedSecurityChannel( cbsChannelProcessor, connectionOptions.getTokenCredential(), connectionOptions.getAuthorizationType(), connectionOptions.getRetry()); } return cbsChannel; } private synchronized Connection getOrCreateConnection() throws IOException { if (connection == null) { logger.atInfo() .addKeyValue(HOSTNAME_KEY, handler.getHostname()) .addKeyValue("port", handler.getProtocolPort()) .log("Creating and starting connection."); final Reactor reactor = reactorProvider.createReactor(connectionId, handler.getMaxFrameSize()); connection = reactor.connectionToHost(handler.getHostname(), handler.getProtocolPort(), handler); final ReactorExceptionHandler reactorExceptionHandler = new ReactorExceptionHandler(); final Duration timeoutDivided = connectionOptions.getRetry().getTryTimeout().dividedBy(2); final Duration pendingTasksDuration = ClientConstants.SERVER_BUSY_WAIT_TIME.compareTo(timeoutDivided) < 0 ? ClientConstants.SERVER_BUSY_WAIT_TIME : timeoutDivided; final Scheduler scheduler = Schedulers.newSingle("reactor-executor"); executor = new ReactorExecutor(reactor, scheduler, connectionId, reactorExceptionHandler, pendingTasksDuration, connectionOptions.getFullyQualifiedNamespace()); final Mono<Void> executorCloseMono = Mono.defer(() -> { synchronized (this) { return executor.closeAsync(); } }); reactorProvider.getReactorDispatcher().getShutdownSignal() .flatMap(signal -> { reactorExceptionHandler.onConnectionShutdown(signal); return executorCloseMono; }) .onErrorResume(error -> { reactorExceptionHandler.onConnectionError(error); return executorCloseMono; }) .subscribe(); executor.start(); } return connection; } private final class ReactorExceptionHandler extends AmqpExceptionHandler { private ReactorExceptionHandler() { super(); } @Override public void onConnectionError(Throwable exception) { logger.atInfo() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionError, Starting new reactor", exception); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onReactorError: Disposing."); closeAsync(new AmqpShutdownSignal(false, false, "onReactorError: " + exception.toString())) .subscribe(); } } @Override void onConnectionShutdown(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown. Shutting down."); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown: disposing."); closeAsync(shutdownSignal).subscribe(); } } } private static final class SessionSubscription { private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AmqpSession session; private final Disposable subscription; private SessionSubscription(AmqpSession session, Disposable subscription) { this.session = session; this.subscription = subscription; } private AmqpSession getSession() { return session; } private void dispose() { if (isDisposed.getAndSet(true)) { return; } if (session instanceof ReactorSession) { ((ReactorSession) session).closeAsync("Closing session.", null, true) .subscribe(); } else { session.dispose(); } subscription.dispose(); } private Mono<Void> isClosed() { if (session instanceof ReactorSession) { return ((ReactorSession) session).isClosed(); } else { return Mono.empty(); } } } }
I move `emitShutdownSignalOperation` into the `Mono.whenDelayError`, so ReactorReceiver(s) do need to wait cbs node completely closed. I have done several explict cases test and works fine and trying to repro some recovery cases. >The only one route where ReactorSession sends CLOSE without waiting for ReactorReaciver(s) DETACH is when the Connection sends the shutdown signal to the ReactorSession. I reconsidered this scenario, because the closing of ReactorReceiver(s) does not prevent the connection from closing (unlike the closing of a CBS node which prevents the connection from closing), ReactorReceiver(s) can also be closed after the connection is closed. So, I think having no-wait time close for ReactorReceiver is nice-to-have as it can close receiver earlier. I also done some tests, for example, explicitly closing EH consumer/processor, throw exception to close Connection, all these scenarios will close ReactorReceiver(s) first and then close Connection. For both logic analysis and tests, that timeout issue occurs in CBS node may not occur in ReactorReceiver(s). I'll keep checking the logic of connection closing.
Mono<Void> closeAsync(AmqpShutdownSignal shutdownSignal) { final Mono<Void> emitShutDownSignalOperation = Mono.fromRunnable(() -> { addShutdownSignal(logger.atInfo(), shutdownSignal).log("Disposing of ReactorConnection."); final Sinks.EmitResult result = shutdownSignalSink.tryEmitValue(shutdownSignal); if (result.isFailure()) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(EMIT_RESULT_KEY, result) .log("Unable to emit shutdown signal."); } }); final Mono<Void> cbsCloseOperation; if (cbsChannelProcessor != null) { cbsCloseOperation = cbsChannelProcessor.flatMap(channel -> channel.closeAsync()); } else { cbsCloseOperation = Mono.empty(); } final Mono<Void> managementNodeCloseOperations = Mono.when( Flux.fromStream(managementNodes.values().stream()).flatMap(node -> node.closeAsync())); final Mono<Void> closeReactor = Mono.fromRunnable(() -> { logger.verbose("Scheduling closeConnection work."); final ReactorDispatcher dispatcher = reactorProvider.getReactorDispatcher(); if (dispatcher != null) { try { dispatcher.invoke(() -> closeConnectionWork()); } catch (IOException e) { logger.warning("IOException while scheduling closeConnection work. Manually disposing.", e); closeConnectionWork(); } catch (RejectedExecutionException e) { logger.info("Could not schedule closeConnection work. Manually disposing."); closeConnectionWork(); } } else { closeConnectionWork(); } }); return Mono.whenDelayError( cbsCloseOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed CBS node.")), managementNodeCloseOperations.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed management nodes."))) .then(emitShutDownSignalOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Emitted connection shutdown signal. "))) .then(closeReactor.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed reactor dispatcher."))) .then(isClosedMono.asMono()); }
logger.atVerbose()
Mono<Void> closeAsync(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal).log("Disposing of ReactorConnection."); final Sinks.EmitResult result = shutdownSignalSink.tryEmitValue(shutdownSignal); if (result.isFailure()) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(EMIT_RESULT_KEY, result) .log("Unable to emit shutdown signal."); } final Mono<Void> cbsCloseOperation; if (cbsChannelProcessor != null) { cbsCloseOperation = cbsChannelProcessor.flatMap(channel -> channel.closeAsync()); } else { cbsCloseOperation = Mono.empty(); } final Mono<Void> managementNodeCloseOperations = Mono.when( Flux.fromStream(managementNodes.values().stream()).flatMap(node -> node.closeAsync())); final Mono<Void> closeReactor = Mono.fromRunnable(() -> { logger.verbose("Scheduling closeConnection work."); final ReactorDispatcher dispatcher = reactorProvider.getReactorDispatcher(); if (dispatcher != null) { try { dispatcher.invoke(() -> closeConnectionWork()); } catch (IOException e) { logger.warning("IOException while scheduling closeConnection work. Manually disposing.", e); closeConnectionWork(); } catch (RejectedExecutionException e) { logger.info("Could not schedule closeConnection work. Manually disposing."); closeConnectionWork(); } } else { closeConnectionWork(); } }); return Mono.whenDelayError( cbsCloseOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed CBS node.")), managementNodeCloseOperations.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed management nodes."))) .then(closeReactor.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed reactor dispatcher."))) .then(isClosedMono.asMono()); }
class ReactorConnection implements AmqpConnection { private static final String CBS_SESSION_NAME = "cbs-session"; private static final String CBS_ADDRESS = "$cbs"; private static final String CBS_LINK_NAME = "cbs"; private static final String MANAGEMENT_SESSION_NAME = "mgmt-session"; private static final String MANAGEMENT_ADDRESS = "$management"; private static final String MANAGEMENT_LINK_NAME = "mgmt"; private final ClientLogger logger; private final ConcurrentMap<String, SessionSubscription> sessionMap = new ConcurrentHashMap<>(); private final ConcurrentHashMap<String, AmqpManagementNode> managementNodes = new ConcurrentHashMap<>(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.One<AmqpShutdownSignal> shutdownSignalSink = Sinks.one(); private final Flux<AmqpEndpointState> endpointStates; private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final String connectionId; private final Mono<Connection> connectionMono; private final ConnectionHandler handler; private final ReactorHandlerProvider handlerProvider; private final TokenManagerProvider tokenManagerProvider; private final MessageSerializer messageSerializer; private final ConnectionOptions connectionOptions; private final ReactorProvider reactorProvider; private final AmqpRetryPolicy retryPolicy; private final SenderSettleMode senderSettleMode; private final ReceiverSettleMode receiverSettleMode; private final Duration operationTimeout; private final Composite subscriptions; private ReactorExecutor executor; private volatile ClaimsBasedSecurityChannel cbsChannel; private volatile AmqpChannelProcessor<RequestResponseChannel> cbsChannelProcessor; private volatile Connection connection; /** * Creates a new AMQP connection that uses proton-j. * * @param connectionId Identifier for the connection. * @param connectionOptions A set of options used to create the AMQP connection. * @param reactorProvider Provides proton-j Reactor instances. * @param handlerProvider Provides {@link BaseHandler} to listen to proton-j reactor events. * @param tokenManagerProvider Provides the appropriate token manager to authorize with CBS node. * @param messageSerializer Serializer to translate objects to and from proton-j {@link Message messages}. * @param senderSettleMode to set as {@link SenderSettleMode} on sender. * @param receiverSettleMode to set as {@link ReceiverSettleMode} on receiver. */ public ReactorConnection(String connectionId, ConnectionOptions connectionOptions, ReactorProvider reactorProvider, ReactorHandlerProvider handlerProvider, TokenManagerProvider tokenManagerProvider, MessageSerializer messageSerializer, SenderSettleMode senderSettleMode, ReceiverSettleMode receiverSettleMode) { this.connectionOptions = connectionOptions; this.reactorProvider = reactorProvider; this.connectionId = connectionId; this.logger = new ClientLogger(ReactorConnection.class, createContextWithConnectionId(connectionId)); this.handlerProvider = handlerProvider; this.tokenManagerProvider = Objects.requireNonNull(tokenManagerProvider, "'tokenManagerProvider' cannot be null."); this.messageSerializer = messageSerializer; this.handler = handlerProvider.createConnectionHandler(connectionId, connectionOptions); this.retryPolicy = RetryUtil.getRetryPolicy(connectionOptions.getRetry()); this.operationTimeout = connectionOptions.getRetry().getTryTimeout(); this.senderSettleMode = senderSettleMode; this.receiverSettleMode = receiverSettleMode; this.connectionMono = Mono.fromCallable(this::getOrCreateConnection) .flatMap(reactorConnection -> { final Mono<AmqpEndpointState> activeEndpoint = getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(operationTimeout, Mono.error(() -> new AmqpException(true, String.format( "Connection '%s' not opened within AmqpRetryOptions.tryTimeout(): %s", connectionId, operationTimeout), handler.getErrorContext()))); return activeEndpoint.thenReturn(reactorConnection); }) .doOnError(error -> { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already disposed: Error occurred while connection was starting.", error); } else { closeAsync(new AmqpShutdownSignal(false, false, "Error occurred while connection was starting. Error: " + error)).subscribe(); } }); this.endpointStates = this.handler.getEndpointStates() .takeUntilOther(shutdownSignalSink.asMono()) .map(state -> { logger.verbose("State {}", state); return AmqpEndpointStateUtil.getConnectionState(state); }) .onErrorResume(error -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to error."); return closeAsync(new AmqpShutdownSignal(false, false, error.getMessage())).then(Mono.error(error)); } else { return Mono.error(error); } }) .doOnComplete(() -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to connection close."); closeAsync(new AmqpShutdownSignal(false, false, "Connection handler closed.")).subscribe(); } }) .cache(1); this.subscriptions = Disposables.composite(this.endpointStates.subscribe()); } /** * {@inheritDoc} */ @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } /** * Gets the shutdown signal associated with this connection. When it emits, the underlying connection is closed. * * @return Shutdown signals associated with this connection. It emits a signal when the underlying connection is * closed. */ @Override public Flux<AmqpShutdownSignal> getShutdownSignals() { return shutdownSignalSink.asMono().cache().flux(); } @Override public Mono<AmqpManagementNode> getManagementNode(String entityPath) { return Mono.defer(() -> { if (isDisposed()) { return Mono.error(logger.atError() .addKeyValue(ENTITY_PATH_KEY, entityPath) .log(Exceptions.propagate(new IllegalStateException("Connection is disposed. Cannot get management instance.")))); } final AmqpManagementNode existing = managementNodes.get(entityPath); if (existing != null) { return Mono.just(existing); } final TokenManager tokenManager = new AzureTokenManagerProvider(connectionOptions.getAuthorizationType(), connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getAuthorizationScope()) .getTokenManager(getClaimsBasedSecurityNode(), entityPath); return tokenManager.authorize().thenReturn(managementNodes.compute(entityPath, (key, current) -> { if (current != null) { logger.info("A management node exists already, returning it."); tokenManager.close(); return current; } final String sessionName = entityPath + "-" + MANAGEMENT_SESSION_NAME; final String linkName = entityPath + "-" + MANAGEMENT_LINK_NAME; final String address = entityPath + "/" + MANAGEMENT_ADDRESS; logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .addKeyValue("address", address) .log("Creating management node."); final AmqpChannelProcessor<RequestResponseChannel> requestResponseChannel = createRequestResponseChannel(sessionName, linkName, address); return new ManagementChannel(requestResponseChannel, getFullyQualifiedNamespace(), entityPath, tokenManager); })); }); } /** * {@inheritDoc} */ @Override public Mono<ClaimsBasedSecurityNode> getClaimsBasedSecurityNode() { return connectionMono.then(Mono.fromCallable(() -> getOrCreateCBSNode())); } @Override public String getId() { return connectionId; } /** * {@inheritDoc} */ @Override public String getFullyQualifiedNamespace() { return handler.getHostname(); } /** * {@inheritDoc} */ @Override public int getMaxFrameSize() { return handler.getMaxFrameSize(); } /** * {@inheritDoc} */ @Override public Map<String, Object> getConnectionProperties() { return handler.getConnectionProperties(); } /** * {@inheritDoc} */ @Override public Mono<AmqpSession> createSession(String sessionName) { return connectionMono.map(connection -> { final SessionSubscription sessionSubscription = sessionMap.computeIfAbsent(sessionName, key -> { final SessionHandler sessionHandler = handlerProvider.createSessionHandler(connectionId, getFullyQualifiedNamespace(), key, connectionOptions.getRetry().getTryTimeout()); final Session session = connection.session(); BaseHandler.setHandler(session, sessionHandler); final AmqpSession amqpSession = createSession(key, session, sessionHandler); final Disposable subscription = amqpSession.getEndpointStates() .subscribe(state -> { }, error -> { if (isDisposed.get()) { return; } logger.atInfo() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Error occurred. Removing and disposing session", error); removeSession(key); }, () -> { if (isDisposed.get()) { return; } logger.atVerbose() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Complete. Removing and disposing session."); removeSession(key); }); return new SessionSubscription(amqpSession, subscription); }); return sessionSubscription; }).flatMap(sessionSubscription -> { final Mono<AmqpEndpointState> activeSession = sessionSubscription.getSession().getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(retryPolicy.getRetryOptions().getTryTimeout(), Mono.error(() -> new AmqpException(true, String.format("connectionId[%s] sessionName[%s] Timeout waiting for session to be active.", connectionId, sessionName), handler.getErrorContext()))); return activeSession.thenReturn(sessionSubscription.getSession()); }); } /** * Creates a new AMQP session with the given parameters. * * @param sessionName Name of the AMQP session. * @param session The reactor session associated with this session. * @param handler Session handler for the reactor session. * * @return A new instance of AMQP session. */ protected AmqpSession createSession(String sessionName, Session session, SessionHandler handler) { return new ReactorSession(this, session, handler, sessionName, reactorProvider, handlerProvider, getClaimsBasedSecurityNode(), tokenManagerProvider, messageSerializer, connectionOptions.getRetry()); } /** * {@inheritDoc} */ @Override public boolean removeSession(String sessionName) { if (sessionName == null) { return false; } final SessionSubscription removed = sessionMap.remove(sessionName); if (removed != null) { removed.dispose(); } return removed != null; } @Override public boolean isDisposed() { return isDisposed.get(); } /** * {@inheritDoc} */ @Override public void dispose() { final Duration timeout = operationTimeout.plus(operationTimeout); closeAsync().block(timeout); } /** * Gets the active AMQP connection for this instance. * * @return The AMQP connection. * * @throws AmqpException if the {@link Connection} was not transitioned to an active state within the given * {@link AmqpRetryOptions */ protected Mono<Connection> getReactorConnection() { return connectionMono; } /** * Creates a bidirectional link between the message broker and the client. * * @param sessionName Name of the session. * @param linkName Name of the link. * @param entityPath Address to the message broker. * * @return A new {@link RequestResponseChannel} to communicate with the message broker. */ protected AmqpChannelProcessor<RequestResponseChannel> createRequestResponseChannel(String sessionName, String linkName, String entityPath) { Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); final Flux<RequestResponseChannel> createChannel = createSession(sessionName) .cast(ReactorSession.class) .map(reactorSession -> new RequestResponseChannel(this, getId(), getFullyQualifiedNamespace(), linkName, entityPath, reactorSession.session(), connectionOptions.getRetry(), handlerProvider, reactorProvider, messageSerializer, senderSettleMode, receiverSettleMode)) .doOnNext(e -> { logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .log("Emitting new response channel."); }) .repeat() .takeUntilOther(shutdownSignalSink.asMono()); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(ENTITY_PATH_KEY, entityPath); return createChannel .subscribeWith(new AmqpChannelProcessor<>(getFullyQualifiedNamespace(), channel -> channel.getEndpointStates(), retryPolicy, loggingContext)); } @Override public Mono<Void> closeAsync() { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already closed. Not disposing again."); return isClosedMono.asMono(); } return closeAsync(new AmqpShutdownSignal(false, true, "Disposed by client.")); } private synchronized void closeConnectionWork() { if (connection == null) { isClosedMono.emitEmpty((signalType, emitResult) -> { addSignalTypeAndResult(logger.atInfo(), signalType, emitResult) .log("Unable to complete closeMono."); return false; }); return; } connection.close(); handler.close(); final ArrayList<Mono<Void>> closingSessions = new ArrayList<>(); sessionMap.values().forEach(link -> closingSessions.add(link.isClosed())); final Mono<Void> closedExecutor = executor != null ? Mono.defer(() -> { synchronized (this) { logger.info("Closing executor."); return executor.closeAsync(); } }) : Mono.empty(); final Mono<Void> closeSessionAndExecutorMono = Mono.when(closingSessions) .timeout(operationTimeout) .onErrorResume(error -> { logger.info("Timed out waiting for all sessions to close."); return Mono.empty(); }) .then(closedExecutor) .then(Mono.fromRunnable(() -> { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result) .log("Unable to emit connection closed signal."); return false; }); subscriptions.dispose(); })); subscriptions.add(closeSessionAndExecutorMono.subscribe()); } private synchronized ClaimsBasedSecurityNode getOrCreateCBSNode() { if (cbsChannel == null) { logger.info("Setting CBS channel."); cbsChannelProcessor = createRequestResponseChannel(CBS_SESSION_NAME, CBS_LINK_NAME, CBS_ADDRESS); cbsChannel = new ClaimsBasedSecurityChannel( cbsChannelProcessor, connectionOptions.getTokenCredential(), connectionOptions.getAuthorizationType(), connectionOptions.getRetry()); } return cbsChannel; } private synchronized Connection getOrCreateConnection() throws IOException { if (connection == null) { logger.atInfo() .addKeyValue(HOSTNAME_KEY, handler.getHostname()) .addKeyValue("port", handler.getProtocolPort()) .log("Creating and starting connection."); final Reactor reactor = reactorProvider.createReactor(connectionId, handler.getMaxFrameSize()); connection = reactor.connectionToHost(handler.getHostname(), handler.getProtocolPort(), handler); final ReactorExceptionHandler reactorExceptionHandler = new ReactorExceptionHandler(); final Duration timeoutDivided = connectionOptions.getRetry().getTryTimeout().dividedBy(2); final Duration pendingTasksDuration = ClientConstants.SERVER_BUSY_WAIT_TIME.compareTo(timeoutDivided) < 0 ? ClientConstants.SERVER_BUSY_WAIT_TIME : timeoutDivided; final Scheduler scheduler = Schedulers.newSingle("reactor-executor"); executor = new ReactorExecutor(reactor, scheduler, connectionId, reactorExceptionHandler, pendingTasksDuration, connectionOptions.getFullyQualifiedNamespace()); final Mono<Void> executorCloseMono = Mono.defer(() -> { synchronized (this) { return executor.closeAsync(); } }); reactorProvider.getReactorDispatcher().getShutdownSignal() .flatMap(signal -> { reactorExceptionHandler.onConnectionShutdown(signal); return executorCloseMono; }) .onErrorResume(error -> { reactorExceptionHandler.onConnectionError(error); return executorCloseMono; }) .subscribe(); executor.start(); } return connection; } private final class ReactorExceptionHandler extends AmqpExceptionHandler { private ReactorExceptionHandler() { super(); } @Override public void onConnectionError(Throwable exception) { logger.atInfo() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionError, Starting new reactor", exception); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onReactorError: Disposing."); closeAsync(new AmqpShutdownSignal(false, false, "onReactorError: " + exception.toString())) .subscribe(); } } @Override void onConnectionShutdown(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown. Shutting down."); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown: disposing."); closeAsync(shutdownSignal).subscribe(); } } } private static final class SessionSubscription { private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AmqpSession session; private final Disposable subscription; private SessionSubscription(AmqpSession session, Disposable subscription) { this.session = session; this.subscription = subscription; } private AmqpSession getSession() { return session; } private void dispose() { if (isDisposed.getAndSet(true)) { return; } if (session instanceof ReactorSession) { ((ReactorSession) session).closeAsync("Closing session.", null, true) .subscribe(); } else { session.dispose(); } subscription.dispose(); } private Mono<Void> isClosed() { if (session instanceof ReactorSession) { return ((ReactorSession) session).isClosed(); } else { return Mono.empty(); } } } }
class ReactorConnection implements AmqpConnection { private static final String CBS_SESSION_NAME = "cbs-session"; private static final String CBS_ADDRESS = "$cbs"; private static final String CBS_LINK_NAME = "cbs"; private static final String MANAGEMENT_SESSION_NAME = "mgmt-session"; private static final String MANAGEMENT_ADDRESS = "$management"; private static final String MANAGEMENT_LINK_NAME = "mgmt"; private final ClientLogger logger; private final ConcurrentMap<String, SessionSubscription> sessionMap = new ConcurrentHashMap<>(); private final ConcurrentHashMap<String, AmqpManagementNode> managementNodes = new ConcurrentHashMap<>(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.One<AmqpShutdownSignal> shutdownSignalSink = Sinks.one(); private final Flux<AmqpEndpointState> endpointStates; private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final String connectionId; private final Mono<Connection> connectionMono; private final ConnectionHandler handler; private final ReactorHandlerProvider handlerProvider; private final TokenManagerProvider tokenManagerProvider; private final MessageSerializer messageSerializer; private final ConnectionOptions connectionOptions; private final ReactorProvider reactorProvider; private final AmqpRetryPolicy retryPolicy; private final SenderSettleMode senderSettleMode; private final ReceiverSettleMode receiverSettleMode; private final Duration operationTimeout; private final Composite subscriptions; private ReactorExecutor executor; private volatile ClaimsBasedSecurityChannel cbsChannel; private volatile AmqpChannelProcessor<RequestResponseChannel> cbsChannelProcessor; private volatile Connection connection; /** * Creates a new AMQP connection that uses proton-j. * * @param connectionId Identifier for the connection. * @param connectionOptions A set of options used to create the AMQP connection. * @param reactorProvider Provides proton-j Reactor instances. * @param handlerProvider Provides {@link BaseHandler} to listen to proton-j reactor events. * @param tokenManagerProvider Provides the appropriate token manager to authorize with CBS node. * @param messageSerializer Serializer to translate objects to and from proton-j {@link Message messages}. * @param senderSettleMode to set as {@link SenderSettleMode} on sender. * @param receiverSettleMode to set as {@link ReceiverSettleMode} on receiver. */ public ReactorConnection(String connectionId, ConnectionOptions connectionOptions, ReactorProvider reactorProvider, ReactorHandlerProvider handlerProvider, TokenManagerProvider tokenManagerProvider, MessageSerializer messageSerializer, SenderSettleMode senderSettleMode, ReceiverSettleMode receiverSettleMode) { this.connectionOptions = connectionOptions; this.reactorProvider = reactorProvider; this.connectionId = connectionId; this.logger = new ClientLogger(ReactorConnection.class, createContextWithConnectionId(connectionId)); this.handlerProvider = handlerProvider; this.tokenManagerProvider = Objects.requireNonNull(tokenManagerProvider, "'tokenManagerProvider' cannot be null."); this.messageSerializer = messageSerializer; this.handler = handlerProvider.createConnectionHandler(connectionId, connectionOptions); this.retryPolicy = RetryUtil.getRetryPolicy(connectionOptions.getRetry()); this.operationTimeout = connectionOptions.getRetry().getTryTimeout(); this.senderSettleMode = senderSettleMode; this.receiverSettleMode = receiverSettleMode; this.connectionMono = Mono.fromCallable(this::getOrCreateConnection) .flatMap(reactorConnection -> { final Mono<AmqpEndpointState> activeEndpoint = getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(operationTimeout, Mono.error(() -> new AmqpException(true, String.format( "Connection '%s' not opened within AmqpRetryOptions.tryTimeout(): %s", connectionId, operationTimeout), handler.getErrorContext()))); return activeEndpoint.thenReturn(reactorConnection); }) .doOnError(error -> { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already disposed: Error occurred while connection was starting.", error); } else { closeAsync(new AmqpShutdownSignal(false, false, "Error occurred while connection was starting. Error: " + error)).subscribe(); } }); this.endpointStates = this.handler.getEndpointStates() .takeUntilOther(shutdownSignalSink.asMono()) .map(state -> { logger.verbose("State {}", state); return AmqpEndpointStateUtil.getConnectionState(state); }) .onErrorResume(error -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to error."); return closeAsync(new AmqpShutdownSignal(false, false, error.getMessage())).then(Mono.error(error)); } else { return Mono.error(error); } }) .doOnComplete(() -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to connection close."); closeAsync(new AmqpShutdownSignal(false, false, "Connection handler closed.")).subscribe(); } }) .cache(1); this.subscriptions = Disposables.composite(this.endpointStates.subscribe()); } /** * {@inheritDoc} */ @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } /** * Gets the shutdown signal associated with this connection. When it emits, the underlying connection is closed. * * @return Shutdown signals associated with this connection. It emits a signal when the underlying connection is * closed. */ @Override public Flux<AmqpShutdownSignal> getShutdownSignals() { return shutdownSignalSink.asMono().cache().flux(); } @Override public Mono<AmqpManagementNode> getManagementNode(String entityPath) { return Mono.defer(() -> { if (isDisposed()) { return Mono.error(logger.atError() .addKeyValue(ENTITY_PATH_KEY, entityPath) .log(Exceptions.propagate(new IllegalStateException("Connection is disposed. Cannot get management instance.")))); } final AmqpManagementNode existing = managementNodes.get(entityPath); if (existing != null) { return Mono.just(existing); } final TokenManager tokenManager = new AzureTokenManagerProvider(connectionOptions.getAuthorizationType(), connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getAuthorizationScope()) .getTokenManager(getClaimsBasedSecurityNode(), entityPath); return tokenManager.authorize().thenReturn(managementNodes.compute(entityPath, (key, current) -> { if (current != null) { logger.info("A management node exists already, returning it."); tokenManager.close(); return current; } final String sessionName = entityPath + "-" + MANAGEMENT_SESSION_NAME; final String linkName = entityPath + "-" + MANAGEMENT_LINK_NAME; final String address = entityPath + "/" + MANAGEMENT_ADDRESS; logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .addKeyValue("address", address) .log("Creating management node."); final AmqpChannelProcessor<RequestResponseChannel> requestResponseChannel = createRequestResponseChannel(sessionName, linkName, address); return new ManagementChannel(requestResponseChannel, getFullyQualifiedNamespace(), entityPath, tokenManager); })); }); } /** * {@inheritDoc} */ @Override public Mono<ClaimsBasedSecurityNode> getClaimsBasedSecurityNode() { return connectionMono.then(Mono.fromCallable(() -> getOrCreateCBSNode())); } @Override public String getId() { return connectionId; } /** * {@inheritDoc} */ @Override public String getFullyQualifiedNamespace() { return handler.getHostname(); } /** * {@inheritDoc} */ @Override public int getMaxFrameSize() { return handler.getMaxFrameSize(); } /** * {@inheritDoc} */ @Override public Map<String, Object> getConnectionProperties() { return handler.getConnectionProperties(); } /** * {@inheritDoc} */ @Override public Mono<AmqpSession> createSession(String sessionName) { return connectionMono.map(connection -> { final SessionSubscription sessionSubscription = sessionMap.computeIfAbsent(sessionName, key -> { final SessionHandler sessionHandler = handlerProvider.createSessionHandler(connectionId, getFullyQualifiedNamespace(), key, connectionOptions.getRetry().getTryTimeout()); final Session session = connection.session(); BaseHandler.setHandler(session, sessionHandler); final AmqpSession amqpSession = createSession(key, session, sessionHandler); final Disposable subscription = amqpSession.getEndpointStates() .subscribe(state -> { }, error -> { if (isDisposed.get()) { return; } logger.atInfo() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Error occurred. Removing and disposing session", error); removeSession(key); }, () -> { if (isDisposed.get()) { return; } logger.atVerbose() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Complete. Removing and disposing session."); removeSession(key); }); return new SessionSubscription(amqpSession, subscription); }); return sessionSubscription; }).flatMap(sessionSubscription -> { final Mono<AmqpEndpointState> activeSession = sessionSubscription.getSession().getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(retryPolicy.getRetryOptions().getTryTimeout(), Mono.error(() -> new AmqpException(true, String.format("connectionId[%s] sessionName[%s] Timeout waiting for session to be active.", connectionId, sessionName), handler.getErrorContext()))); return activeSession.thenReturn(sessionSubscription.getSession()); }); } /** * Creates a new AMQP session with the given parameters. * * @param sessionName Name of the AMQP session. * @param session The reactor session associated with this session. * @param handler Session handler for the reactor session. * * @return A new instance of AMQP session. */ protected AmqpSession createSession(String sessionName, Session session, SessionHandler handler) { return new ReactorSession(this, session, handler, sessionName, reactorProvider, handlerProvider, getClaimsBasedSecurityNode(), tokenManagerProvider, messageSerializer, connectionOptions.getRetry()); } /** * {@inheritDoc} */ @Override public boolean removeSession(String sessionName) { if (sessionName == null) { return false; } final SessionSubscription removed = sessionMap.remove(sessionName); if (removed != null) { removed.dispose(); } return removed != null; } @Override public boolean isDisposed() { return isDisposed.get(); } /** * {@inheritDoc} */ @Override public void dispose() { final Duration timeout = operationTimeout.plus(operationTimeout); closeAsync().block(timeout); } /** * Gets the active AMQP connection for this instance. * * @return The AMQP connection. * * @throws AmqpException if the {@link Connection} was not transitioned to an active state within the given * {@link AmqpRetryOptions */ protected Mono<Connection> getReactorConnection() { return connectionMono; } /** * Creates a bidirectional link between the message broker and the client. * * @param sessionName Name of the session. * @param linkName Name of the link. * @param entityPath Address to the message broker. * * @return A new {@link RequestResponseChannel} to communicate with the message broker. */ protected AmqpChannelProcessor<RequestResponseChannel> createRequestResponseChannel(String sessionName, String linkName, String entityPath) { Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); final Flux<RequestResponseChannel> createChannel = createSession(sessionName) .cast(ReactorSession.class) .map(reactorSession -> new RequestResponseChannel(this, getId(), getFullyQualifiedNamespace(), linkName, entityPath, reactorSession.session(), connectionOptions.getRetry(), handlerProvider, reactorProvider, messageSerializer, senderSettleMode, receiverSettleMode)) .doOnNext(e -> { logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .log("Emitting new response channel."); }) .repeat(() -> !this.isDisposed()); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(ENTITY_PATH_KEY, entityPath); return createChannel .subscribeWith(new AmqpChannelProcessor<>(getFullyQualifiedNamespace(), channel -> channel.getEndpointStates(), retryPolicy, loggingContext)); } @Override public Mono<Void> closeAsync() { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already closed. Not disposing again."); return isClosedMono.asMono(); } return closeAsync(new AmqpShutdownSignal(false, true, "Disposed by client.")); } private synchronized void closeConnectionWork() { if (connection == null) { isClosedMono.emitEmpty((signalType, emitResult) -> { addSignalTypeAndResult(logger.atInfo(), signalType, emitResult) .log("Unable to complete closeMono."); return false; }); return; } connection.close(); handler.close(); final ArrayList<Mono<Void>> closingSessions = new ArrayList<>(); sessionMap.values().forEach(link -> closingSessions.add(link.isClosed())); final Mono<Void> closedExecutor = executor != null ? Mono.defer(() -> { synchronized (this) { logger.info("Closing executor."); return executor.closeAsync(); } }) : Mono.empty(); final Mono<Void> closeSessionAndExecutorMono = Mono.when(closingSessions) .timeout(operationTimeout) .onErrorResume(error -> { logger.info("Timed out waiting for all sessions to close."); return Mono.empty(); }) .then(closedExecutor) .then(Mono.fromRunnable(() -> { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result) .log("Unable to emit connection closed signal."); return false; }); subscriptions.dispose(); })); subscriptions.add(closeSessionAndExecutorMono.subscribe()); } private synchronized ClaimsBasedSecurityNode getOrCreateCBSNode() { if (cbsChannel == null) { logger.info("Setting CBS channel."); cbsChannelProcessor = createRequestResponseChannel(CBS_SESSION_NAME, CBS_LINK_NAME, CBS_ADDRESS); cbsChannel = new ClaimsBasedSecurityChannel( cbsChannelProcessor, connectionOptions.getTokenCredential(), connectionOptions.getAuthorizationType(), connectionOptions.getRetry()); } return cbsChannel; } private synchronized Connection getOrCreateConnection() throws IOException { if (connection == null) { logger.atInfo() .addKeyValue(HOSTNAME_KEY, handler.getHostname()) .addKeyValue("port", handler.getProtocolPort()) .log("Creating and starting connection."); final Reactor reactor = reactorProvider.createReactor(connectionId, handler.getMaxFrameSize()); connection = reactor.connectionToHost(handler.getHostname(), handler.getProtocolPort(), handler); final ReactorExceptionHandler reactorExceptionHandler = new ReactorExceptionHandler(); final Duration timeoutDivided = connectionOptions.getRetry().getTryTimeout().dividedBy(2); final Duration pendingTasksDuration = ClientConstants.SERVER_BUSY_WAIT_TIME.compareTo(timeoutDivided) < 0 ? ClientConstants.SERVER_BUSY_WAIT_TIME : timeoutDivided; final Scheduler scheduler = Schedulers.newSingle("reactor-executor"); executor = new ReactorExecutor(reactor, scheduler, connectionId, reactorExceptionHandler, pendingTasksDuration, connectionOptions.getFullyQualifiedNamespace()); final Mono<Void> executorCloseMono = Mono.defer(() -> { synchronized (this) { return executor.closeAsync(); } }); reactorProvider.getReactorDispatcher().getShutdownSignal() .flatMap(signal -> { reactorExceptionHandler.onConnectionShutdown(signal); return executorCloseMono; }) .onErrorResume(error -> { reactorExceptionHandler.onConnectionError(error); return executorCloseMono; }) .subscribe(); executor.start(); } return connection; } private final class ReactorExceptionHandler extends AmqpExceptionHandler { private ReactorExceptionHandler() { super(); } @Override public void onConnectionError(Throwable exception) { logger.atInfo() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionError, Starting new reactor", exception); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onReactorError: Disposing."); closeAsync(new AmqpShutdownSignal(false, false, "onReactorError: " + exception.toString())) .subscribe(); } } @Override void onConnectionShutdown(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown. Shutting down."); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown: disposing."); closeAsync(shutdownSignal).subscribe(); } } } private static final class SessionSubscription { private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AmqpSession session; private final Disposable subscription; private SessionSubscription(AmqpSession session, Disposable subscription) { this.session = session; this.subscription = subscription; } private AmqpSession getSession() { return session; } private void dispose() { if (isDisposed.getAndSet(true)) { return; } if (session instanceof ReactorSession) { ((ReactorSession) session).closeAsync("Closing session.", null, true) .subscribe(); } else { session.dispose(); } subscription.dispose(); } private Mono<Void> isClosed() { if (session instanceof ReactorSession) { return ((ReactorSession) session).isClosed(); } else { return Mono.empty(); } } } }
Thank you Kun! As you explained earlier, I understand the new shutdown order will help solve "the aggressive log" problem. But I wonder if there is a different way to solve the "the aggressive log" without changing the shutdown signal order? [For the new shutdown order, I'm confident in the explicit-close case, but we need to carefully evaluate the side effects in the server initiated closure (implicit-close) and recovery cases. It involves more thinking given the permutations service can do it. We could treat the "close timeout" as a separate issue following that evaluation] So localizing the discussion to "the aggressive log" - Is it possible to solve it with minimal impact? For example instead of `takeUntilOther(shutdownSignalSink.asMono())`, does ````.repeat(() -> !this.isDisposed());```` works? \\cc @conniey
Mono<Void> closeAsync(AmqpShutdownSignal shutdownSignal) { final Mono<Void> emitShutdownSignalOperation = Mono.fromRunnable(() -> { addShutdownSignal(logger.atInfo(), shutdownSignal).log("Disposing of ReactorConnection."); final Sinks.EmitResult result = shutdownSignalSink.tryEmitValue(shutdownSignal); if (result.isFailure()) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(EMIT_RESULT_KEY, result) .log("Unable to emit shutdown signal."); } }); final Mono<Void> cbsCloseOperation; if (cbsChannelProcessor != null) { cbsCloseOperation = cbsChannelProcessor.flatMap(channel -> channel.closeAsync()); } else { cbsCloseOperation = Mono.empty(); } final Mono<Void> managementNodeCloseOperations = Mono.when( Flux.fromStream(managementNodes.values().stream()).flatMap(node -> node.closeAsync())); final Mono<Void> closeReactor = Mono.fromRunnable(() -> { logger.verbose("Scheduling closeConnection work."); final ReactorDispatcher dispatcher = reactorProvider.getReactorDispatcher(); if (dispatcher != null) { try { dispatcher.invoke(() -> closeConnectionWork()); } catch (IOException e) { logger.warning("IOException while scheduling closeConnection work. Manually disposing.", e); closeConnectionWork(); } catch (RejectedExecutionException e) { logger.info("Could not schedule closeConnection work. Manually disposing."); closeConnectionWork(); } } else { closeConnectionWork(); } }); return Mono.whenDelayError( cbsCloseOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed CBS node.")), managementNodeCloseOperations.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed management nodes.")), emitShutdownSignalOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Emitted connection shutdown signal. "))) .then(closeReactor.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed reactor dispatcher."))) .then(isClosedMono.asMono()); }
emitShutdownSignalOperation.doFinally(signalType ->
Mono<Void> closeAsync(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal).log("Disposing of ReactorConnection."); final Sinks.EmitResult result = shutdownSignalSink.tryEmitValue(shutdownSignal); if (result.isFailure()) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(EMIT_RESULT_KEY, result) .log("Unable to emit shutdown signal."); } final Mono<Void> cbsCloseOperation; if (cbsChannelProcessor != null) { cbsCloseOperation = cbsChannelProcessor.flatMap(channel -> channel.closeAsync()); } else { cbsCloseOperation = Mono.empty(); } final Mono<Void> managementNodeCloseOperations = Mono.when( Flux.fromStream(managementNodes.values().stream()).flatMap(node -> node.closeAsync())); final Mono<Void> closeReactor = Mono.fromRunnable(() -> { logger.verbose("Scheduling closeConnection work."); final ReactorDispatcher dispatcher = reactorProvider.getReactorDispatcher(); if (dispatcher != null) { try { dispatcher.invoke(() -> closeConnectionWork()); } catch (IOException e) { logger.warning("IOException while scheduling closeConnection work. Manually disposing.", e); closeConnectionWork(); } catch (RejectedExecutionException e) { logger.info("Could not schedule closeConnection work. Manually disposing."); closeConnectionWork(); } } else { closeConnectionWork(); } }); return Mono.whenDelayError( cbsCloseOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed CBS node.")), managementNodeCloseOperations.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed management nodes."))) .then(closeReactor.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed reactor dispatcher."))) .then(isClosedMono.asMono()); }
class ReactorConnection implements AmqpConnection { private static final String CBS_SESSION_NAME = "cbs-session"; private static final String CBS_ADDRESS = "$cbs"; private static final String CBS_LINK_NAME = "cbs"; private static final String MANAGEMENT_SESSION_NAME = "mgmt-session"; private static final String MANAGEMENT_ADDRESS = "$management"; private static final String MANAGEMENT_LINK_NAME = "mgmt"; private final ClientLogger logger; private final ConcurrentMap<String, SessionSubscription> sessionMap = new ConcurrentHashMap<>(); private final ConcurrentHashMap<String, AmqpManagementNode> managementNodes = new ConcurrentHashMap<>(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.One<AmqpShutdownSignal> shutdownSignalSink = Sinks.one(); private final Flux<AmqpEndpointState> endpointStates; private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final String connectionId; private final Mono<Connection> connectionMono; private final ConnectionHandler handler; private final ReactorHandlerProvider handlerProvider; private final TokenManagerProvider tokenManagerProvider; private final MessageSerializer messageSerializer; private final ConnectionOptions connectionOptions; private final ReactorProvider reactorProvider; private final AmqpRetryPolicy retryPolicy; private final SenderSettleMode senderSettleMode; private final ReceiverSettleMode receiverSettleMode; private final Duration operationTimeout; private final Composite subscriptions; private ReactorExecutor executor; private volatile ClaimsBasedSecurityChannel cbsChannel; private volatile AmqpChannelProcessor<RequestResponseChannel> cbsChannelProcessor; private volatile Connection connection; /** * Creates a new AMQP connection that uses proton-j. * * @param connectionId Identifier for the connection. * @param connectionOptions A set of options used to create the AMQP connection. * @param reactorProvider Provides proton-j Reactor instances. * @param handlerProvider Provides {@link BaseHandler} to listen to proton-j reactor events. * @param tokenManagerProvider Provides the appropriate token manager to authorize with CBS node. * @param messageSerializer Serializer to translate objects to and from proton-j {@link Message messages}. * @param senderSettleMode to set as {@link SenderSettleMode} on sender. * @param receiverSettleMode to set as {@link ReceiverSettleMode} on receiver. */ public ReactorConnection(String connectionId, ConnectionOptions connectionOptions, ReactorProvider reactorProvider, ReactorHandlerProvider handlerProvider, TokenManagerProvider tokenManagerProvider, MessageSerializer messageSerializer, SenderSettleMode senderSettleMode, ReceiverSettleMode receiverSettleMode) { this.connectionOptions = connectionOptions; this.reactorProvider = reactorProvider; this.connectionId = connectionId; this.logger = new ClientLogger(ReactorConnection.class, createContextWithConnectionId(connectionId)); this.handlerProvider = handlerProvider; this.tokenManagerProvider = Objects.requireNonNull(tokenManagerProvider, "'tokenManagerProvider' cannot be null."); this.messageSerializer = messageSerializer; this.handler = handlerProvider.createConnectionHandler(connectionId, connectionOptions); this.retryPolicy = RetryUtil.getRetryPolicy(connectionOptions.getRetry()); this.operationTimeout = connectionOptions.getRetry().getTryTimeout(); this.senderSettleMode = senderSettleMode; this.receiverSettleMode = receiverSettleMode; this.connectionMono = Mono.fromCallable(this::getOrCreateConnection) .flatMap(reactorConnection -> { final Mono<AmqpEndpointState> activeEndpoint = getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(operationTimeout, Mono.error(() -> new AmqpException(true, String.format( "Connection '%s' not opened within AmqpRetryOptions.tryTimeout(): %s", connectionId, operationTimeout), handler.getErrorContext()))); return activeEndpoint.thenReturn(reactorConnection); }) .doOnError(error -> { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already disposed: Error occurred while connection was starting.", error); } else { closeAsync(new AmqpShutdownSignal(false, false, "Error occurred while connection was starting. Error: " + error)).subscribe(); } }); this.endpointStates = this.handler.getEndpointStates() .takeUntilOther(shutdownSignalSink.asMono()) .map(state -> { logger.verbose("State {}", state); return AmqpEndpointStateUtil.getConnectionState(state); }) .onErrorResume(error -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to error."); return closeAsync(new AmqpShutdownSignal(false, false, error.getMessage())).then(Mono.error(error)); } else { return Mono.error(error); } }) .doOnComplete(() -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to connection close."); closeAsync(new AmqpShutdownSignal(false, false, "Connection handler closed.")).subscribe(); } }) .cache(1); this.subscriptions = Disposables.composite(this.endpointStates.subscribe()); } /** * {@inheritDoc} */ @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } /** * Gets the shutdown signal associated with this connection. When it emits, the underlying connection is closed. * * @return Shutdown signals associated with this connection. It emits a signal when the underlying connection is * closed. */ @Override public Flux<AmqpShutdownSignal> getShutdownSignals() { return shutdownSignalSink.asMono().cache().flux(); } @Override public Mono<AmqpManagementNode> getManagementNode(String entityPath) { return Mono.defer(() -> { if (isDisposed()) { return Mono.error(logger.atError() .addKeyValue(ENTITY_PATH_KEY, entityPath) .log(Exceptions.propagate(new IllegalStateException("Connection is disposed. Cannot get management instance.")))); } final AmqpManagementNode existing = managementNodes.get(entityPath); if (existing != null) { return Mono.just(existing); } final TokenManager tokenManager = new AzureTokenManagerProvider(connectionOptions.getAuthorizationType(), connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getAuthorizationScope()) .getTokenManager(getClaimsBasedSecurityNode(), entityPath); return tokenManager.authorize().thenReturn(managementNodes.compute(entityPath, (key, current) -> { if (current != null) { logger.info("A management node exists already, returning it."); tokenManager.close(); return current; } final String sessionName = entityPath + "-" + MANAGEMENT_SESSION_NAME; final String linkName = entityPath + "-" + MANAGEMENT_LINK_NAME; final String address = entityPath + "/" + MANAGEMENT_ADDRESS; logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .addKeyValue("address", address) .log("Creating management node."); final AmqpChannelProcessor<RequestResponseChannel> requestResponseChannel = createRequestResponseChannel(sessionName, linkName, address); return new ManagementChannel(requestResponseChannel, getFullyQualifiedNamespace(), entityPath, tokenManager); })); }); } /** * {@inheritDoc} */ @Override public Mono<ClaimsBasedSecurityNode> getClaimsBasedSecurityNode() { return connectionMono.then(Mono.fromCallable(() -> getOrCreateCBSNode())); } @Override public String getId() { return connectionId; } /** * {@inheritDoc} */ @Override public String getFullyQualifiedNamespace() { return handler.getHostname(); } /** * {@inheritDoc} */ @Override public int getMaxFrameSize() { return handler.getMaxFrameSize(); } /** * {@inheritDoc} */ @Override public Map<String, Object> getConnectionProperties() { return handler.getConnectionProperties(); } /** * {@inheritDoc} */ @Override public Mono<AmqpSession> createSession(String sessionName) { return connectionMono.map(connection -> { final SessionSubscription sessionSubscription = sessionMap.computeIfAbsent(sessionName, key -> { final SessionHandler sessionHandler = handlerProvider.createSessionHandler(connectionId, getFullyQualifiedNamespace(), key, connectionOptions.getRetry().getTryTimeout()); final Session session = connection.session(); BaseHandler.setHandler(session, sessionHandler); final AmqpSession amqpSession = createSession(key, session, sessionHandler); final Disposable subscription = amqpSession.getEndpointStates() .subscribe(state -> { }, error -> { if (isDisposed.get()) { return; } logger.atInfo() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Error occurred. Removing and disposing session", error); removeSession(key); }, () -> { if (isDisposed.get()) { return; } logger.atVerbose() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Complete. Removing and disposing session."); removeSession(key); }); return new SessionSubscription(amqpSession, subscription); }); return sessionSubscription; }).flatMap(sessionSubscription -> { final Mono<AmqpEndpointState> activeSession = sessionSubscription.getSession().getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(retryPolicy.getRetryOptions().getTryTimeout(), Mono.error(() -> new AmqpException(true, String.format("connectionId[%s] sessionName[%s] Timeout waiting for session to be active.", connectionId, sessionName), handler.getErrorContext()))); return activeSession.thenReturn(sessionSubscription.getSession()); }); } /** * Creates a new AMQP session with the given parameters. * * @param sessionName Name of the AMQP session. * @param session The reactor session associated with this session. * @param handler Session handler for the reactor session. * * @return A new instance of AMQP session. */ protected AmqpSession createSession(String sessionName, Session session, SessionHandler handler) { return new ReactorSession(this, session, handler, sessionName, reactorProvider, handlerProvider, getClaimsBasedSecurityNode(), tokenManagerProvider, messageSerializer, connectionOptions.getRetry()); } /** * {@inheritDoc} */ @Override public boolean removeSession(String sessionName) { if (sessionName == null) { return false; } final SessionSubscription removed = sessionMap.remove(sessionName); if (removed != null) { removed.dispose(); } return removed != null; } @Override public boolean isDisposed() { return isDisposed.get(); } /** * {@inheritDoc} */ @Override public void dispose() { final Duration timeout = operationTimeout.plus(operationTimeout); closeAsync().block(timeout); } /** * Gets the active AMQP connection for this instance. * * @return The AMQP connection. * * @throws AmqpException if the {@link Connection} was not transitioned to an active state within the given * {@link AmqpRetryOptions */ protected Mono<Connection> getReactorConnection() { return connectionMono; } /** * Creates a bidirectional link between the message broker and the client. * * @param sessionName Name of the session. * @param linkName Name of the link. * @param entityPath Address to the message broker. * * @return A new {@link RequestResponseChannel} to communicate with the message broker. */ protected AmqpChannelProcessor<RequestResponseChannel> createRequestResponseChannel(String sessionName, String linkName, String entityPath) { Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); final Flux<RequestResponseChannel> createChannel = createSession(sessionName) .cast(ReactorSession.class) .map(reactorSession -> new RequestResponseChannel(this, getId(), getFullyQualifiedNamespace(), linkName, entityPath, reactorSession.session(), connectionOptions.getRetry(), handlerProvider, reactorProvider, messageSerializer, senderSettleMode, receiverSettleMode)) .doOnNext(e -> { logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .log("Emitting new response channel."); }) .repeat() .takeUntilOther(shutdownSignalSink.asMono()); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(ENTITY_PATH_KEY, entityPath); return createChannel .subscribeWith(new AmqpChannelProcessor<>(getFullyQualifiedNamespace(), channel -> channel.getEndpointStates(), retryPolicy, loggingContext)); } @Override public Mono<Void> closeAsync() { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already closed. Not disposing again."); return isClosedMono.asMono(); } return closeAsync(new AmqpShutdownSignal(false, true, "Disposed by client.")); } private synchronized void closeConnectionWork() { if (connection == null) { isClosedMono.emitEmpty((signalType, emitResult) -> { addSignalTypeAndResult(logger.atInfo(), signalType, emitResult) .log("Unable to complete closeMono."); return false; }); return; } connection.close(); handler.close(); final ArrayList<Mono<Void>> closingSessions = new ArrayList<>(); sessionMap.values().forEach(link -> closingSessions.add(link.isClosed())); final Mono<Void> closedExecutor = executor != null ? Mono.defer(() -> { synchronized (this) { logger.info("Closing executor."); return executor.closeAsync(); } }) : Mono.empty(); final Mono<Void> closeSessionAndExecutorMono = Mono.when(closingSessions) .timeout(operationTimeout) .onErrorResume(error -> { logger.info("Timed out waiting for all sessions to close."); return Mono.empty(); }) .then(closedExecutor) .then(Mono.fromRunnable(() -> { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result) .log("Unable to emit connection closed signal."); return false; }); subscriptions.dispose(); })); subscriptions.add(closeSessionAndExecutorMono.subscribe()); } private synchronized ClaimsBasedSecurityNode getOrCreateCBSNode() { if (cbsChannel == null) { logger.info("Setting CBS channel."); cbsChannelProcessor = createRequestResponseChannel(CBS_SESSION_NAME, CBS_LINK_NAME, CBS_ADDRESS); cbsChannel = new ClaimsBasedSecurityChannel( cbsChannelProcessor, connectionOptions.getTokenCredential(), connectionOptions.getAuthorizationType(), connectionOptions.getRetry()); } return cbsChannel; } private synchronized Connection getOrCreateConnection() throws IOException { if (connection == null) { logger.atInfo() .addKeyValue(HOSTNAME_KEY, handler.getHostname()) .addKeyValue("port", handler.getProtocolPort()) .log("Creating and starting connection."); final Reactor reactor = reactorProvider.createReactor(connectionId, handler.getMaxFrameSize()); connection = reactor.connectionToHost(handler.getHostname(), handler.getProtocolPort(), handler); final ReactorExceptionHandler reactorExceptionHandler = new ReactorExceptionHandler(); final Duration timeoutDivided = connectionOptions.getRetry().getTryTimeout().dividedBy(2); final Duration pendingTasksDuration = ClientConstants.SERVER_BUSY_WAIT_TIME.compareTo(timeoutDivided) < 0 ? ClientConstants.SERVER_BUSY_WAIT_TIME : timeoutDivided; final Scheduler scheduler = Schedulers.newSingle("reactor-executor"); executor = new ReactorExecutor(reactor, scheduler, connectionId, reactorExceptionHandler, pendingTasksDuration, connectionOptions.getFullyQualifiedNamespace()); final Mono<Void> executorCloseMono = Mono.defer(() -> { synchronized (this) { return executor.closeAsync(); } }); reactorProvider.getReactorDispatcher().getShutdownSignal() .flatMap(signal -> { reactorExceptionHandler.onConnectionShutdown(signal); return executorCloseMono; }) .onErrorResume(error -> { reactorExceptionHandler.onConnectionError(error); return executorCloseMono; }) .subscribe(); executor.start(); } return connection; } private final class ReactorExceptionHandler extends AmqpExceptionHandler { private ReactorExceptionHandler() { super(); } @Override public void onConnectionError(Throwable exception) { logger.atInfo() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionError, Starting new reactor", exception); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onReactorError: Disposing."); closeAsync(new AmqpShutdownSignal(false, false, "onReactorError: " + exception.toString())) .subscribe(); } } @Override void onConnectionShutdown(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown. Shutting down."); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown: disposing."); closeAsync(shutdownSignal).subscribe(); } } } private static final class SessionSubscription { private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AmqpSession session; private final Disposable subscription; private SessionSubscription(AmqpSession session, Disposable subscription) { this.session = session; this.subscription = subscription; } private AmqpSession getSession() { return session; } private void dispose() { if (isDisposed.getAndSet(true)) { return; } if (session instanceof ReactorSession) { ((ReactorSession) session).closeAsync("Closing session.", null, true) .subscribe(); } else { session.dispose(); } subscription.dispose(); } private Mono<Void> isClosed() { if (session instanceof ReactorSession) { return ((ReactorSession) session).isClosed(); } else { return Mono.empty(); } } } }
class ReactorConnection implements AmqpConnection { private static final String CBS_SESSION_NAME = "cbs-session"; private static final String CBS_ADDRESS = "$cbs"; private static final String CBS_LINK_NAME = "cbs"; private static final String MANAGEMENT_SESSION_NAME = "mgmt-session"; private static final String MANAGEMENT_ADDRESS = "$management"; private static final String MANAGEMENT_LINK_NAME = "mgmt"; private final ClientLogger logger; private final ConcurrentMap<String, SessionSubscription> sessionMap = new ConcurrentHashMap<>(); private final ConcurrentHashMap<String, AmqpManagementNode> managementNodes = new ConcurrentHashMap<>(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.One<AmqpShutdownSignal> shutdownSignalSink = Sinks.one(); private final Flux<AmqpEndpointState> endpointStates; private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final String connectionId; private final Mono<Connection> connectionMono; private final ConnectionHandler handler; private final ReactorHandlerProvider handlerProvider; private final TokenManagerProvider tokenManagerProvider; private final MessageSerializer messageSerializer; private final ConnectionOptions connectionOptions; private final ReactorProvider reactorProvider; private final AmqpRetryPolicy retryPolicy; private final SenderSettleMode senderSettleMode; private final ReceiverSettleMode receiverSettleMode; private final Duration operationTimeout; private final Composite subscriptions; private ReactorExecutor executor; private volatile ClaimsBasedSecurityChannel cbsChannel; private volatile AmqpChannelProcessor<RequestResponseChannel> cbsChannelProcessor; private volatile Connection connection; /** * Creates a new AMQP connection that uses proton-j. * * @param connectionId Identifier for the connection. * @param connectionOptions A set of options used to create the AMQP connection. * @param reactorProvider Provides proton-j Reactor instances. * @param handlerProvider Provides {@link BaseHandler} to listen to proton-j reactor events. * @param tokenManagerProvider Provides the appropriate token manager to authorize with CBS node. * @param messageSerializer Serializer to translate objects to and from proton-j {@link Message messages}. * @param senderSettleMode to set as {@link SenderSettleMode} on sender. * @param receiverSettleMode to set as {@link ReceiverSettleMode} on receiver. */ public ReactorConnection(String connectionId, ConnectionOptions connectionOptions, ReactorProvider reactorProvider, ReactorHandlerProvider handlerProvider, TokenManagerProvider tokenManagerProvider, MessageSerializer messageSerializer, SenderSettleMode senderSettleMode, ReceiverSettleMode receiverSettleMode) { this.connectionOptions = connectionOptions; this.reactorProvider = reactorProvider; this.connectionId = connectionId; this.logger = new ClientLogger(ReactorConnection.class, createContextWithConnectionId(connectionId)); this.handlerProvider = handlerProvider; this.tokenManagerProvider = Objects.requireNonNull(tokenManagerProvider, "'tokenManagerProvider' cannot be null."); this.messageSerializer = messageSerializer; this.handler = handlerProvider.createConnectionHandler(connectionId, connectionOptions); this.retryPolicy = RetryUtil.getRetryPolicy(connectionOptions.getRetry()); this.operationTimeout = connectionOptions.getRetry().getTryTimeout(); this.senderSettleMode = senderSettleMode; this.receiverSettleMode = receiverSettleMode; this.connectionMono = Mono.fromCallable(this::getOrCreateConnection) .flatMap(reactorConnection -> { final Mono<AmqpEndpointState> activeEndpoint = getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(operationTimeout, Mono.error(() -> new AmqpException(true, String.format( "Connection '%s' not opened within AmqpRetryOptions.tryTimeout(): %s", connectionId, operationTimeout), handler.getErrorContext()))); return activeEndpoint.thenReturn(reactorConnection); }) .doOnError(error -> { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already disposed: Error occurred while connection was starting.", error); } else { closeAsync(new AmqpShutdownSignal(false, false, "Error occurred while connection was starting. Error: " + error)).subscribe(); } }); this.endpointStates = this.handler.getEndpointStates() .takeUntilOther(shutdownSignalSink.asMono()) .map(state -> { logger.verbose("State {}", state); return AmqpEndpointStateUtil.getConnectionState(state); }) .onErrorResume(error -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to error."); return closeAsync(new AmqpShutdownSignal(false, false, error.getMessage())).then(Mono.error(error)); } else { return Mono.error(error); } }) .doOnComplete(() -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to connection close."); closeAsync(new AmqpShutdownSignal(false, false, "Connection handler closed.")).subscribe(); } }) .cache(1); this.subscriptions = Disposables.composite(this.endpointStates.subscribe()); } /** * {@inheritDoc} */ @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } /** * Gets the shutdown signal associated with this connection. When it emits, the underlying connection is closed. * * @return Shutdown signals associated with this connection. It emits a signal when the underlying connection is * closed. */ @Override public Flux<AmqpShutdownSignal> getShutdownSignals() { return shutdownSignalSink.asMono().cache().flux(); } @Override public Mono<AmqpManagementNode> getManagementNode(String entityPath) { return Mono.defer(() -> { if (isDisposed()) { return Mono.error(logger.atError() .addKeyValue(ENTITY_PATH_KEY, entityPath) .log(Exceptions.propagate(new IllegalStateException("Connection is disposed. Cannot get management instance.")))); } final AmqpManagementNode existing = managementNodes.get(entityPath); if (existing != null) { return Mono.just(existing); } final TokenManager tokenManager = new AzureTokenManagerProvider(connectionOptions.getAuthorizationType(), connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getAuthorizationScope()) .getTokenManager(getClaimsBasedSecurityNode(), entityPath); return tokenManager.authorize().thenReturn(managementNodes.compute(entityPath, (key, current) -> { if (current != null) { logger.info("A management node exists already, returning it."); tokenManager.close(); return current; } final String sessionName = entityPath + "-" + MANAGEMENT_SESSION_NAME; final String linkName = entityPath + "-" + MANAGEMENT_LINK_NAME; final String address = entityPath + "/" + MANAGEMENT_ADDRESS; logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .addKeyValue("address", address) .log("Creating management node."); final AmqpChannelProcessor<RequestResponseChannel> requestResponseChannel = createRequestResponseChannel(sessionName, linkName, address); return new ManagementChannel(requestResponseChannel, getFullyQualifiedNamespace(), entityPath, tokenManager); })); }); } /** * {@inheritDoc} */ @Override public Mono<ClaimsBasedSecurityNode> getClaimsBasedSecurityNode() { return connectionMono.then(Mono.fromCallable(() -> getOrCreateCBSNode())); } @Override public String getId() { return connectionId; } /** * {@inheritDoc} */ @Override public String getFullyQualifiedNamespace() { return handler.getHostname(); } /** * {@inheritDoc} */ @Override public int getMaxFrameSize() { return handler.getMaxFrameSize(); } /** * {@inheritDoc} */ @Override public Map<String, Object> getConnectionProperties() { return handler.getConnectionProperties(); } /** * {@inheritDoc} */ @Override public Mono<AmqpSession> createSession(String sessionName) { return connectionMono.map(connection -> { final SessionSubscription sessionSubscription = sessionMap.computeIfAbsent(sessionName, key -> { final SessionHandler sessionHandler = handlerProvider.createSessionHandler(connectionId, getFullyQualifiedNamespace(), key, connectionOptions.getRetry().getTryTimeout()); final Session session = connection.session(); BaseHandler.setHandler(session, sessionHandler); final AmqpSession amqpSession = createSession(key, session, sessionHandler); final Disposable subscription = amqpSession.getEndpointStates() .subscribe(state -> { }, error -> { if (isDisposed.get()) { return; } logger.atInfo() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Error occurred. Removing and disposing session", error); removeSession(key); }, () -> { if (isDisposed.get()) { return; } logger.atVerbose() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Complete. Removing and disposing session."); removeSession(key); }); return new SessionSubscription(amqpSession, subscription); }); return sessionSubscription; }).flatMap(sessionSubscription -> { final Mono<AmqpEndpointState> activeSession = sessionSubscription.getSession().getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(retryPolicy.getRetryOptions().getTryTimeout(), Mono.error(() -> new AmqpException(true, String.format("connectionId[%s] sessionName[%s] Timeout waiting for session to be active.", connectionId, sessionName), handler.getErrorContext()))); return activeSession.thenReturn(sessionSubscription.getSession()); }); } /** * Creates a new AMQP session with the given parameters. * * @param sessionName Name of the AMQP session. * @param session The reactor session associated with this session. * @param handler Session handler for the reactor session. * * @return A new instance of AMQP session. */ protected AmqpSession createSession(String sessionName, Session session, SessionHandler handler) { return new ReactorSession(this, session, handler, sessionName, reactorProvider, handlerProvider, getClaimsBasedSecurityNode(), tokenManagerProvider, messageSerializer, connectionOptions.getRetry()); } /** * {@inheritDoc} */ @Override public boolean removeSession(String sessionName) { if (sessionName == null) { return false; } final SessionSubscription removed = sessionMap.remove(sessionName); if (removed != null) { removed.dispose(); } return removed != null; } @Override public boolean isDisposed() { return isDisposed.get(); } /** * {@inheritDoc} */ @Override public void dispose() { final Duration timeout = operationTimeout.plus(operationTimeout); closeAsync().block(timeout); } /** * Gets the active AMQP connection for this instance. * * @return The AMQP connection. * * @throws AmqpException if the {@link Connection} was not transitioned to an active state within the given * {@link AmqpRetryOptions */ protected Mono<Connection> getReactorConnection() { return connectionMono; } /** * Creates a bidirectional link between the message broker and the client. * * @param sessionName Name of the session. * @param linkName Name of the link. * @param entityPath Address to the message broker. * * @return A new {@link RequestResponseChannel} to communicate with the message broker. */ protected AmqpChannelProcessor<RequestResponseChannel> createRequestResponseChannel(String sessionName, String linkName, String entityPath) { Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); final Flux<RequestResponseChannel> createChannel = createSession(sessionName) .cast(ReactorSession.class) .map(reactorSession -> new RequestResponseChannel(this, getId(), getFullyQualifiedNamespace(), linkName, entityPath, reactorSession.session(), connectionOptions.getRetry(), handlerProvider, reactorProvider, messageSerializer, senderSettleMode, receiverSettleMode)) .doOnNext(e -> { logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .log("Emitting new response channel."); }) .repeat(() -> !this.isDisposed()); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(ENTITY_PATH_KEY, entityPath); return createChannel .subscribeWith(new AmqpChannelProcessor<>(getFullyQualifiedNamespace(), channel -> channel.getEndpointStates(), retryPolicy, loggingContext)); } @Override public Mono<Void> closeAsync() { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already closed. Not disposing again."); return isClosedMono.asMono(); } return closeAsync(new AmqpShutdownSignal(false, true, "Disposed by client.")); } private synchronized void closeConnectionWork() { if (connection == null) { isClosedMono.emitEmpty((signalType, emitResult) -> { addSignalTypeAndResult(logger.atInfo(), signalType, emitResult) .log("Unable to complete closeMono."); return false; }); return; } connection.close(); handler.close(); final ArrayList<Mono<Void>> closingSessions = new ArrayList<>(); sessionMap.values().forEach(link -> closingSessions.add(link.isClosed())); final Mono<Void> closedExecutor = executor != null ? Mono.defer(() -> { synchronized (this) { logger.info("Closing executor."); return executor.closeAsync(); } }) : Mono.empty(); final Mono<Void> closeSessionAndExecutorMono = Mono.when(closingSessions) .timeout(operationTimeout) .onErrorResume(error -> { logger.info("Timed out waiting for all sessions to close."); return Mono.empty(); }) .then(closedExecutor) .then(Mono.fromRunnable(() -> { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result) .log("Unable to emit connection closed signal."); return false; }); subscriptions.dispose(); })); subscriptions.add(closeSessionAndExecutorMono.subscribe()); } private synchronized ClaimsBasedSecurityNode getOrCreateCBSNode() { if (cbsChannel == null) { logger.info("Setting CBS channel."); cbsChannelProcessor = createRequestResponseChannel(CBS_SESSION_NAME, CBS_LINK_NAME, CBS_ADDRESS); cbsChannel = new ClaimsBasedSecurityChannel( cbsChannelProcessor, connectionOptions.getTokenCredential(), connectionOptions.getAuthorizationType(), connectionOptions.getRetry()); } return cbsChannel; } private synchronized Connection getOrCreateConnection() throws IOException { if (connection == null) { logger.atInfo() .addKeyValue(HOSTNAME_KEY, handler.getHostname()) .addKeyValue("port", handler.getProtocolPort()) .log("Creating and starting connection."); final Reactor reactor = reactorProvider.createReactor(connectionId, handler.getMaxFrameSize()); connection = reactor.connectionToHost(handler.getHostname(), handler.getProtocolPort(), handler); final ReactorExceptionHandler reactorExceptionHandler = new ReactorExceptionHandler(); final Duration timeoutDivided = connectionOptions.getRetry().getTryTimeout().dividedBy(2); final Duration pendingTasksDuration = ClientConstants.SERVER_BUSY_WAIT_TIME.compareTo(timeoutDivided) < 0 ? ClientConstants.SERVER_BUSY_WAIT_TIME : timeoutDivided; final Scheduler scheduler = Schedulers.newSingle("reactor-executor"); executor = new ReactorExecutor(reactor, scheduler, connectionId, reactorExceptionHandler, pendingTasksDuration, connectionOptions.getFullyQualifiedNamespace()); final Mono<Void> executorCloseMono = Mono.defer(() -> { synchronized (this) { return executor.closeAsync(); } }); reactorProvider.getReactorDispatcher().getShutdownSignal() .flatMap(signal -> { reactorExceptionHandler.onConnectionShutdown(signal); return executorCloseMono; }) .onErrorResume(error -> { reactorExceptionHandler.onConnectionError(error); return executorCloseMono; }) .subscribe(); executor.start(); } return connection; } private final class ReactorExceptionHandler extends AmqpExceptionHandler { private ReactorExceptionHandler() { super(); } @Override public void onConnectionError(Throwable exception) { logger.atInfo() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionError, Starting new reactor", exception); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onReactorError: Disposing."); closeAsync(new AmqpShutdownSignal(false, false, "onReactorError: " + exception.toString())) .subscribe(); } } @Override void onConnectionShutdown(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown. Shutting down."); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown: disposing."); closeAsync(shutdownSignal).subscribe(); } } } private static final class SessionSubscription { private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AmqpSession session; private final Disposable subscription; private SessionSubscription(AmqpSession session, Disposable subscription) { this.session = session; this.subscription = subscription; } private AmqpSession getSession() { return session; } private void dispose() { if (isDisposed.getAndSet(true)) { return; } if (session instanceof ReactorSession) { ((ReactorSession) session).closeAsync("Closing session.", null, true) .subscribe(); } else { session.dispose(); } subscription.dispose(); } private Mono<Void> isClosed() { if (session instanceof ReactorSession) { return ((ReactorSession) session).isClosed(); } else { return Mono.empty(); } } } }
That makes sense. I would feel comfortable using repeat(() -> this.isDisposed())
Mono<Void> closeAsync(AmqpShutdownSignal shutdownSignal) { final Mono<Void> emitShutdownSignalOperation = Mono.fromRunnable(() -> { addShutdownSignal(logger.atInfo(), shutdownSignal).log("Disposing of ReactorConnection."); final Sinks.EmitResult result = shutdownSignalSink.tryEmitValue(shutdownSignal); if (result.isFailure()) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(EMIT_RESULT_KEY, result) .log("Unable to emit shutdown signal."); } }); final Mono<Void> cbsCloseOperation; if (cbsChannelProcessor != null) { cbsCloseOperation = cbsChannelProcessor.flatMap(channel -> channel.closeAsync()); } else { cbsCloseOperation = Mono.empty(); } final Mono<Void> managementNodeCloseOperations = Mono.when( Flux.fromStream(managementNodes.values().stream()).flatMap(node -> node.closeAsync())); final Mono<Void> closeReactor = Mono.fromRunnable(() -> { logger.verbose("Scheduling closeConnection work."); final ReactorDispatcher dispatcher = reactorProvider.getReactorDispatcher(); if (dispatcher != null) { try { dispatcher.invoke(() -> closeConnectionWork()); } catch (IOException e) { logger.warning("IOException while scheduling closeConnection work. Manually disposing.", e); closeConnectionWork(); } catch (RejectedExecutionException e) { logger.info("Could not schedule closeConnection work. Manually disposing."); closeConnectionWork(); } } else { closeConnectionWork(); } }); return Mono.whenDelayError( cbsCloseOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed CBS node.")), managementNodeCloseOperations.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed management nodes.")), emitShutdownSignalOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Emitted connection shutdown signal. "))) .then(closeReactor.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed reactor dispatcher."))) .then(isClosedMono.asMono()); }
emitShutdownSignalOperation.doFinally(signalType ->
Mono<Void> closeAsync(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal).log("Disposing of ReactorConnection."); final Sinks.EmitResult result = shutdownSignalSink.tryEmitValue(shutdownSignal); if (result.isFailure()) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(EMIT_RESULT_KEY, result) .log("Unable to emit shutdown signal."); } final Mono<Void> cbsCloseOperation; if (cbsChannelProcessor != null) { cbsCloseOperation = cbsChannelProcessor.flatMap(channel -> channel.closeAsync()); } else { cbsCloseOperation = Mono.empty(); } final Mono<Void> managementNodeCloseOperations = Mono.when( Flux.fromStream(managementNodes.values().stream()).flatMap(node -> node.closeAsync())); final Mono<Void> closeReactor = Mono.fromRunnable(() -> { logger.verbose("Scheduling closeConnection work."); final ReactorDispatcher dispatcher = reactorProvider.getReactorDispatcher(); if (dispatcher != null) { try { dispatcher.invoke(() -> closeConnectionWork()); } catch (IOException e) { logger.warning("IOException while scheduling closeConnection work. Manually disposing.", e); closeConnectionWork(); } catch (RejectedExecutionException e) { logger.info("Could not schedule closeConnection work. Manually disposing."); closeConnectionWork(); } } else { closeConnectionWork(); } }); return Mono.whenDelayError( cbsCloseOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed CBS node.")), managementNodeCloseOperations.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed management nodes."))) .then(closeReactor.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed reactor dispatcher."))) .then(isClosedMono.asMono()); }
class ReactorConnection implements AmqpConnection { private static final String CBS_SESSION_NAME = "cbs-session"; private static final String CBS_ADDRESS = "$cbs"; private static final String CBS_LINK_NAME = "cbs"; private static final String MANAGEMENT_SESSION_NAME = "mgmt-session"; private static final String MANAGEMENT_ADDRESS = "$management"; private static final String MANAGEMENT_LINK_NAME = "mgmt"; private final ClientLogger logger; private final ConcurrentMap<String, SessionSubscription> sessionMap = new ConcurrentHashMap<>(); private final ConcurrentHashMap<String, AmqpManagementNode> managementNodes = new ConcurrentHashMap<>(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.One<AmqpShutdownSignal> shutdownSignalSink = Sinks.one(); private final Flux<AmqpEndpointState> endpointStates; private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final String connectionId; private final Mono<Connection> connectionMono; private final ConnectionHandler handler; private final ReactorHandlerProvider handlerProvider; private final TokenManagerProvider tokenManagerProvider; private final MessageSerializer messageSerializer; private final ConnectionOptions connectionOptions; private final ReactorProvider reactorProvider; private final AmqpRetryPolicy retryPolicy; private final SenderSettleMode senderSettleMode; private final ReceiverSettleMode receiverSettleMode; private final Duration operationTimeout; private final Composite subscriptions; private ReactorExecutor executor; private volatile ClaimsBasedSecurityChannel cbsChannel; private volatile AmqpChannelProcessor<RequestResponseChannel> cbsChannelProcessor; private volatile Connection connection; /** * Creates a new AMQP connection that uses proton-j. * * @param connectionId Identifier for the connection. * @param connectionOptions A set of options used to create the AMQP connection. * @param reactorProvider Provides proton-j Reactor instances. * @param handlerProvider Provides {@link BaseHandler} to listen to proton-j reactor events. * @param tokenManagerProvider Provides the appropriate token manager to authorize with CBS node. * @param messageSerializer Serializer to translate objects to and from proton-j {@link Message messages}. * @param senderSettleMode to set as {@link SenderSettleMode} on sender. * @param receiverSettleMode to set as {@link ReceiverSettleMode} on receiver. */ public ReactorConnection(String connectionId, ConnectionOptions connectionOptions, ReactorProvider reactorProvider, ReactorHandlerProvider handlerProvider, TokenManagerProvider tokenManagerProvider, MessageSerializer messageSerializer, SenderSettleMode senderSettleMode, ReceiverSettleMode receiverSettleMode) { this.connectionOptions = connectionOptions; this.reactorProvider = reactorProvider; this.connectionId = connectionId; this.logger = new ClientLogger(ReactorConnection.class, createContextWithConnectionId(connectionId)); this.handlerProvider = handlerProvider; this.tokenManagerProvider = Objects.requireNonNull(tokenManagerProvider, "'tokenManagerProvider' cannot be null."); this.messageSerializer = messageSerializer; this.handler = handlerProvider.createConnectionHandler(connectionId, connectionOptions); this.retryPolicy = RetryUtil.getRetryPolicy(connectionOptions.getRetry()); this.operationTimeout = connectionOptions.getRetry().getTryTimeout(); this.senderSettleMode = senderSettleMode; this.receiverSettleMode = receiverSettleMode; this.connectionMono = Mono.fromCallable(this::getOrCreateConnection) .flatMap(reactorConnection -> { final Mono<AmqpEndpointState> activeEndpoint = getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(operationTimeout, Mono.error(() -> new AmqpException(true, String.format( "Connection '%s' not opened within AmqpRetryOptions.tryTimeout(): %s", connectionId, operationTimeout), handler.getErrorContext()))); return activeEndpoint.thenReturn(reactorConnection); }) .doOnError(error -> { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already disposed: Error occurred while connection was starting.", error); } else { closeAsync(new AmqpShutdownSignal(false, false, "Error occurred while connection was starting. Error: " + error)).subscribe(); } }); this.endpointStates = this.handler.getEndpointStates() .takeUntilOther(shutdownSignalSink.asMono()) .map(state -> { logger.verbose("State {}", state); return AmqpEndpointStateUtil.getConnectionState(state); }) .onErrorResume(error -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to error."); return closeAsync(new AmqpShutdownSignal(false, false, error.getMessage())).then(Mono.error(error)); } else { return Mono.error(error); } }) .doOnComplete(() -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to connection close."); closeAsync(new AmqpShutdownSignal(false, false, "Connection handler closed.")).subscribe(); } }) .cache(1); this.subscriptions = Disposables.composite(this.endpointStates.subscribe()); } /** * {@inheritDoc} */ @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } /** * Gets the shutdown signal associated with this connection. When it emits, the underlying connection is closed. * * @return Shutdown signals associated with this connection. It emits a signal when the underlying connection is * closed. */ @Override public Flux<AmqpShutdownSignal> getShutdownSignals() { return shutdownSignalSink.asMono().cache().flux(); } @Override public Mono<AmqpManagementNode> getManagementNode(String entityPath) { return Mono.defer(() -> { if (isDisposed()) { return Mono.error(logger.atError() .addKeyValue(ENTITY_PATH_KEY, entityPath) .log(Exceptions.propagate(new IllegalStateException("Connection is disposed. Cannot get management instance.")))); } final AmqpManagementNode existing = managementNodes.get(entityPath); if (existing != null) { return Mono.just(existing); } final TokenManager tokenManager = new AzureTokenManagerProvider(connectionOptions.getAuthorizationType(), connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getAuthorizationScope()) .getTokenManager(getClaimsBasedSecurityNode(), entityPath); return tokenManager.authorize().thenReturn(managementNodes.compute(entityPath, (key, current) -> { if (current != null) { logger.info("A management node exists already, returning it."); tokenManager.close(); return current; } final String sessionName = entityPath + "-" + MANAGEMENT_SESSION_NAME; final String linkName = entityPath + "-" + MANAGEMENT_LINK_NAME; final String address = entityPath + "/" + MANAGEMENT_ADDRESS; logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .addKeyValue("address", address) .log("Creating management node."); final AmqpChannelProcessor<RequestResponseChannel> requestResponseChannel = createRequestResponseChannel(sessionName, linkName, address); return new ManagementChannel(requestResponseChannel, getFullyQualifiedNamespace(), entityPath, tokenManager); })); }); } /** * {@inheritDoc} */ @Override public Mono<ClaimsBasedSecurityNode> getClaimsBasedSecurityNode() { return connectionMono.then(Mono.fromCallable(() -> getOrCreateCBSNode())); } @Override public String getId() { return connectionId; } /** * {@inheritDoc} */ @Override public String getFullyQualifiedNamespace() { return handler.getHostname(); } /** * {@inheritDoc} */ @Override public int getMaxFrameSize() { return handler.getMaxFrameSize(); } /** * {@inheritDoc} */ @Override public Map<String, Object> getConnectionProperties() { return handler.getConnectionProperties(); } /** * {@inheritDoc} */ @Override public Mono<AmqpSession> createSession(String sessionName) { return connectionMono.map(connection -> { final SessionSubscription sessionSubscription = sessionMap.computeIfAbsent(sessionName, key -> { final SessionHandler sessionHandler = handlerProvider.createSessionHandler(connectionId, getFullyQualifiedNamespace(), key, connectionOptions.getRetry().getTryTimeout()); final Session session = connection.session(); BaseHandler.setHandler(session, sessionHandler); final AmqpSession amqpSession = createSession(key, session, sessionHandler); final Disposable subscription = amqpSession.getEndpointStates() .subscribe(state -> { }, error -> { if (isDisposed.get()) { return; } logger.atInfo() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Error occurred. Removing and disposing session", error); removeSession(key); }, () -> { if (isDisposed.get()) { return; } logger.atVerbose() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Complete. Removing and disposing session."); removeSession(key); }); return new SessionSubscription(amqpSession, subscription); }); return sessionSubscription; }).flatMap(sessionSubscription -> { final Mono<AmqpEndpointState> activeSession = sessionSubscription.getSession().getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(retryPolicy.getRetryOptions().getTryTimeout(), Mono.error(() -> new AmqpException(true, String.format("connectionId[%s] sessionName[%s] Timeout waiting for session to be active.", connectionId, sessionName), handler.getErrorContext()))); return activeSession.thenReturn(sessionSubscription.getSession()); }); } /** * Creates a new AMQP session with the given parameters. * * @param sessionName Name of the AMQP session. * @param session The reactor session associated with this session. * @param handler Session handler for the reactor session. * * @return A new instance of AMQP session. */ protected AmqpSession createSession(String sessionName, Session session, SessionHandler handler) { return new ReactorSession(this, session, handler, sessionName, reactorProvider, handlerProvider, getClaimsBasedSecurityNode(), tokenManagerProvider, messageSerializer, connectionOptions.getRetry()); } /** * {@inheritDoc} */ @Override public boolean removeSession(String sessionName) { if (sessionName == null) { return false; } final SessionSubscription removed = sessionMap.remove(sessionName); if (removed != null) { removed.dispose(); } return removed != null; } @Override public boolean isDisposed() { return isDisposed.get(); } /** * {@inheritDoc} */ @Override public void dispose() { final Duration timeout = operationTimeout.plus(operationTimeout); closeAsync().block(timeout); } /** * Gets the active AMQP connection for this instance. * * @return The AMQP connection. * * @throws AmqpException if the {@link Connection} was not transitioned to an active state within the given * {@link AmqpRetryOptions */ protected Mono<Connection> getReactorConnection() { return connectionMono; } /** * Creates a bidirectional link between the message broker and the client. * * @param sessionName Name of the session. * @param linkName Name of the link. * @param entityPath Address to the message broker. * * @return A new {@link RequestResponseChannel} to communicate with the message broker. */ protected AmqpChannelProcessor<RequestResponseChannel> createRequestResponseChannel(String sessionName, String linkName, String entityPath) { Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); final Flux<RequestResponseChannel> createChannel = createSession(sessionName) .cast(ReactorSession.class) .map(reactorSession -> new RequestResponseChannel(this, getId(), getFullyQualifiedNamespace(), linkName, entityPath, reactorSession.session(), connectionOptions.getRetry(), handlerProvider, reactorProvider, messageSerializer, senderSettleMode, receiverSettleMode)) .doOnNext(e -> { logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .log("Emitting new response channel."); }) .repeat() .takeUntilOther(shutdownSignalSink.asMono()); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(ENTITY_PATH_KEY, entityPath); return createChannel .subscribeWith(new AmqpChannelProcessor<>(getFullyQualifiedNamespace(), channel -> channel.getEndpointStates(), retryPolicy, loggingContext)); } @Override public Mono<Void> closeAsync() { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already closed. Not disposing again."); return isClosedMono.asMono(); } return closeAsync(new AmqpShutdownSignal(false, true, "Disposed by client.")); } private synchronized void closeConnectionWork() { if (connection == null) { isClosedMono.emitEmpty((signalType, emitResult) -> { addSignalTypeAndResult(logger.atInfo(), signalType, emitResult) .log("Unable to complete closeMono."); return false; }); return; } connection.close(); handler.close(); final ArrayList<Mono<Void>> closingSessions = new ArrayList<>(); sessionMap.values().forEach(link -> closingSessions.add(link.isClosed())); final Mono<Void> closedExecutor = executor != null ? Mono.defer(() -> { synchronized (this) { logger.info("Closing executor."); return executor.closeAsync(); } }) : Mono.empty(); final Mono<Void> closeSessionAndExecutorMono = Mono.when(closingSessions) .timeout(operationTimeout) .onErrorResume(error -> { logger.info("Timed out waiting for all sessions to close."); return Mono.empty(); }) .then(closedExecutor) .then(Mono.fromRunnable(() -> { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result) .log("Unable to emit connection closed signal."); return false; }); subscriptions.dispose(); })); subscriptions.add(closeSessionAndExecutorMono.subscribe()); } private synchronized ClaimsBasedSecurityNode getOrCreateCBSNode() { if (cbsChannel == null) { logger.info("Setting CBS channel."); cbsChannelProcessor = createRequestResponseChannel(CBS_SESSION_NAME, CBS_LINK_NAME, CBS_ADDRESS); cbsChannel = new ClaimsBasedSecurityChannel( cbsChannelProcessor, connectionOptions.getTokenCredential(), connectionOptions.getAuthorizationType(), connectionOptions.getRetry()); } return cbsChannel; } private synchronized Connection getOrCreateConnection() throws IOException { if (connection == null) { logger.atInfo() .addKeyValue(HOSTNAME_KEY, handler.getHostname()) .addKeyValue("port", handler.getProtocolPort()) .log("Creating and starting connection."); final Reactor reactor = reactorProvider.createReactor(connectionId, handler.getMaxFrameSize()); connection = reactor.connectionToHost(handler.getHostname(), handler.getProtocolPort(), handler); final ReactorExceptionHandler reactorExceptionHandler = new ReactorExceptionHandler(); final Duration timeoutDivided = connectionOptions.getRetry().getTryTimeout().dividedBy(2); final Duration pendingTasksDuration = ClientConstants.SERVER_BUSY_WAIT_TIME.compareTo(timeoutDivided) < 0 ? ClientConstants.SERVER_BUSY_WAIT_TIME : timeoutDivided; final Scheduler scheduler = Schedulers.newSingle("reactor-executor"); executor = new ReactorExecutor(reactor, scheduler, connectionId, reactorExceptionHandler, pendingTasksDuration, connectionOptions.getFullyQualifiedNamespace()); final Mono<Void> executorCloseMono = Mono.defer(() -> { synchronized (this) { return executor.closeAsync(); } }); reactorProvider.getReactorDispatcher().getShutdownSignal() .flatMap(signal -> { reactorExceptionHandler.onConnectionShutdown(signal); return executorCloseMono; }) .onErrorResume(error -> { reactorExceptionHandler.onConnectionError(error); return executorCloseMono; }) .subscribe(); executor.start(); } return connection; } private final class ReactorExceptionHandler extends AmqpExceptionHandler { private ReactorExceptionHandler() { super(); } @Override public void onConnectionError(Throwable exception) { logger.atInfo() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionError, Starting new reactor", exception); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onReactorError: Disposing."); closeAsync(new AmqpShutdownSignal(false, false, "onReactorError: " + exception.toString())) .subscribe(); } } @Override void onConnectionShutdown(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown. Shutting down."); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown: disposing."); closeAsync(shutdownSignal).subscribe(); } } } private static final class SessionSubscription { private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AmqpSession session; private final Disposable subscription; private SessionSubscription(AmqpSession session, Disposable subscription) { this.session = session; this.subscription = subscription; } private AmqpSession getSession() { return session; } private void dispose() { if (isDisposed.getAndSet(true)) { return; } if (session instanceof ReactorSession) { ((ReactorSession) session).closeAsync("Closing session.", null, true) .subscribe(); } else { session.dispose(); } subscription.dispose(); } private Mono<Void> isClosed() { if (session instanceof ReactorSession) { return ((ReactorSession) session).isClosed(); } else { return Mono.empty(); } } } }
class ReactorConnection implements AmqpConnection { private static final String CBS_SESSION_NAME = "cbs-session"; private static final String CBS_ADDRESS = "$cbs"; private static final String CBS_LINK_NAME = "cbs"; private static final String MANAGEMENT_SESSION_NAME = "mgmt-session"; private static final String MANAGEMENT_ADDRESS = "$management"; private static final String MANAGEMENT_LINK_NAME = "mgmt"; private final ClientLogger logger; private final ConcurrentMap<String, SessionSubscription> sessionMap = new ConcurrentHashMap<>(); private final ConcurrentHashMap<String, AmqpManagementNode> managementNodes = new ConcurrentHashMap<>(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.One<AmqpShutdownSignal> shutdownSignalSink = Sinks.one(); private final Flux<AmqpEndpointState> endpointStates; private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final String connectionId; private final Mono<Connection> connectionMono; private final ConnectionHandler handler; private final ReactorHandlerProvider handlerProvider; private final TokenManagerProvider tokenManagerProvider; private final MessageSerializer messageSerializer; private final ConnectionOptions connectionOptions; private final ReactorProvider reactorProvider; private final AmqpRetryPolicy retryPolicy; private final SenderSettleMode senderSettleMode; private final ReceiverSettleMode receiverSettleMode; private final Duration operationTimeout; private final Composite subscriptions; private ReactorExecutor executor; private volatile ClaimsBasedSecurityChannel cbsChannel; private volatile AmqpChannelProcessor<RequestResponseChannel> cbsChannelProcessor; private volatile Connection connection; /** * Creates a new AMQP connection that uses proton-j. * * @param connectionId Identifier for the connection. * @param connectionOptions A set of options used to create the AMQP connection. * @param reactorProvider Provides proton-j Reactor instances. * @param handlerProvider Provides {@link BaseHandler} to listen to proton-j reactor events. * @param tokenManagerProvider Provides the appropriate token manager to authorize with CBS node. * @param messageSerializer Serializer to translate objects to and from proton-j {@link Message messages}. * @param senderSettleMode to set as {@link SenderSettleMode} on sender. * @param receiverSettleMode to set as {@link ReceiverSettleMode} on receiver. */ public ReactorConnection(String connectionId, ConnectionOptions connectionOptions, ReactorProvider reactorProvider, ReactorHandlerProvider handlerProvider, TokenManagerProvider tokenManagerProvider, MessageSerializer messageSerializer, SenderSettleMode senderSettleMode, ReceiverSettleMode receiverSettleMode) { this.connectionOptions = connectionOptions; this.reactorProvider = reactorProvider; this.connectionId = connectionId; this.logger = new ClientLogger(ReactorConnection.class, createContextWithConnectionId(connectionId)); this.handlerProvider = handlerProvider; this.tokenManagerProvider = Objects.requireNonNull(tokenManagerProvider, "'tokenManagerProvider' cannot be null."); this.messageSerializer = messageSerializer; this.handler = handlerProvider.createConnectionHandler(connectionId, connectionOptions); this.retryPolicy = RetryUtil.getRetryPolicy(connectionOptions.getRetry()); this.operationTimeout = connectionOptions.getRetry().getTryTimeout(); this.senderSettleMode = senderSettleMode; this.receiverSettleMode = receiverSettleMode; this.connectionMono = Mono.fromCallable(this::getOrCreateConnection) .flatMap(reactorConnection -> { final Mono<AmqpEndpointState> activeEndpoint = getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(operationTimeout, Mono.error(() -> new AmqpException(true, String.format( "Connection '%s' not opened within AmqpRetryOptions.tryTimeout(): %s", connectionId, operationTimeout), handler.getErrorContext()))); return activeEndpoint.thenReturn(reactorConnection); }) .doOnError(error -> { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already disposed: Error occurred while connection was starting.", error); } else { closeAsync(new AmqpShutdownSignal(false, false, "Error occurred while connection was starting. Error: " + error)).subscribe(); } }); this.endpointStates = this.handler.getEndpointStates() .takeUntilOther(shutdownSignalSink.asMono()) .map(state -> { logger.verbose("State {}", state); return AmqpEndpointStateUtil.getConnectionState(state); }) .onErrorResume(error -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to error."); return closeAsync(new AmqpShutdownSignal(false, false, error.getMessage())).then(Mono.error(error)); } else { return Mono.error(error); } }) .doOnComplete(() -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to connection close."); closeAsync(new AmqpShutdownSignal(false, false, "Connection handler closed.")).subscribe(); } }) .cache(1); this.subscriptions = Disposables.composite(this.endpointStates.subscribe()); } /** * {@inheritDoc} */ @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } /** * Gets the shutdown signal associated with this connection. When it emits, the underlying connection is closed. * * @return Shutdown signals associated with this connection. It emits a signal when the underlying connection is * closed. */ @Override public Flux<AmqpShutdownSignal> getShutdownSignals() { return shutdownSignalSink.asMono().cache().flux(); } @Override public Mono<AmqpManagementNode> getManagementNode(String entityPath) { return Mono.defer(() -> { if (isDisposed()) { return Mono.error(logger.atError() .addKeyValue(ENTITY_PATH_KEY, entityPath) .log(Exceptions.propagate(new IllegalStateException("Connection is disposed. Cannot get management instance.")))); } final AmqpManagementNode existing = managementNodes.get(entityPath); if (existing != null) { return Mono.just(existing); } final TokenManager tokenManager = new AzureTokenManagerProvider(connectionOptions.getAuthorizationType(), connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getAuthorizationScope()) .getTokenManager(getClaimsBasedSecurityNode(), entityPath); return tokenManager.authorize().thenReturn(managementNodes.compute(entityPath, (key, current) -> { if (current != null) { logger.info("A management node exists already, returning it."); tokenManager.close(); return current; } final String sessionName = entityPath + "-" + MANAGEMENT_SESSION_NAME; final String linkName = entityPath + "-" + MANAGEMENT_LINK_NAME; final String address = entityPath + "/" + MANAGEMENT_ADDRESS; logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .addKeyValue("address", address) .log("Creating management node."); final AmqpChannelProcessor<RequestResponseChannel> requestResponseChannel = createRequestResponseChannel(sessionName, linkName, address); return new ManagementChannel(requestResponseChannel, getFullyQualifiedNamespace(), entityPath, tokenManager); })); }); } /** * {@inheritDoc} */ @Override public Mono<ClaimsBasedSecurityNode> getClaimsBasedSecurityNode() { return connectionMono.then(Mono.fromCallable(() -> getOrCreateCBSNode())); } @Override public String getId() { return connectionId; } /** * {@inheritDoc} */ @Override public String getFullyQualifiedNamespace() { return handler.getHostname(); } /** * {@inheritDoc} */ @Override public int getMaxFrameSize() { return handler.getMaxFrameSize(); } /** * {@inheritDoc} */ @Override public Map<String, Object> getConnectionProperties() { return handler.getConnectionProperties(); } /** * {@inheritDoc} */ @Override public Mono<AmqpSession> createSession(String sessionName) { return connectionMono.map(connection -> { final SessionSubscription sessionSubscription = sessionMap.computeIfAbsent(sessionName, key -> { final SessionHandler sessionHandler = handlerProvider.createSessionHandler(connectionId, getFullyQualifiedNamespace(), key, connectionOptions.getRetry().getTryTimeout()); final Session session = connection.session(); BaseHandler.setHandler(session, sessionHandler); final AmqpSession amqpSession = createSession(key, session, sessionHandler); final Disposable subscription = amqpSession.getEndpointStates() .subscribe(state -> { }, error -> { if (isDisposed.get()) { return; } logger.atInfo() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Error occurred. Removing and disposing session", error); removeSession(key); }, () -> { if (isDisposed.get()) { return; } logger.atVerbose() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Complete. Removing and disposing session."); removeSession(key); }); return new SessionSubscription(amqpSession, subscription); }); return sessionSubscription; }).flatMap(sessionSubscription -> { final Mono<AmqpEndpointState> activeSession = sessionSubscription.getSession().getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(retryPolicy.getRetryOptions().getTryTimeout(), Mono.error(() -> new AmqpException(true, String.format("connectionId[%s] sessionName[%s] Timeout waiting for session to be active.", connectionId, sessionName), handler.getErrorContext()))); return activeSession.thenReturn(sessionSubscription.getSession()); }); } /** * Creates a new AMQP session with the given parameters. * * @param sessionName Name of the AMQP session. * @param session The reactor session associated with this session. * @param handler Session handler for the reactor session. * * @return A new instance of AMQP session. */ protected AmqpSession createSession(String sessionName, Session session, SessionHandler handler) { return new ReactorSession(this, session, handler, sessionName, reactorProvider, handlerProvider, getClaimsBasedSecurityNode(), tokenManagerProvider, messageSerializer, connectionOptions.getRetry()); } /** * {@inheritDoc} */ @Override public boolean removeSession(String sessionName) { if (sessionName == null) { return false; } final SessionSubscription removed = sessionMap.remove(sessionName); if (removed != null) { removed.dispose(); } return removed != null; } @Override public boolean isDisposed() { return isDisposed.get(); } /** * {@inheritDoc} */ @Override public void dispose() { final Duration timeout = operationTimeout.plus(operationTimeout); closeAsync().block(timeout); } /** * Gets the active AMQP connection for this instance. * * @return The AMQP connection. * * @throws AmqpException if the {@link Connection} was not transitioned to an active state within the given * {@link AmqpRetryOptions */ protected Mono<Connection> getReactorConnection() { return connectionMono; } /** * Creates a bidirectional link between the message broker and the client. * * @param sessionName Name of the session. * @param linkName Name of the link. * @param entityPath Address to the message broker. * * @return A new {@link RequestResponseChannel} to communicate with the message broker. */ protected AmqpChannelProcessor<RequestResponseChannel> createRequestResponseChannel(String sessionName, String linkName, String entityPath) { Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); final Flux<RequestResponseChannel> createChannel = createSession(sessionName) .cast(ReactorSession.class) .map(reactorSession -> new RequestResponseChannel(this, getId(), getFullyQualifiedNamespace(), linkName, entityPath, reactorSession.session(), connectionOptions.getRetry(), handlerProvider, reactorProvider, messageSerializer, senderSettleMode, receiverSettleMode)) .doOnNext(e -> { logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .log("Emitting new response channel."); }) .repeat(() -> !this.isDisposed()); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(ENTITY_PATH_KEY, entityPath); return createChannel .subscribeWith(new AmqpChannelProcessor<>(getFullyQualifiedNamespace(), channel -> channel.getEndpointStates(), retryPolicy, loggingContext)); } @Override public Mono<Void> closeAsync() { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already closed. Not disposing again."); return isClosedMono.asMono(); } return closeAsync(new AmqpShutdownSignal(false, true, "Disposed by client.")); } private synchronized void closeConnectionWork() { if (connection == null) { isClosedMono.emitEmpty((signalType, emitResult) -> { addSignalTypeAndResult(logger.atInfo(), signalType, emitResult) .log("Unable to complete closeMono."); return false; }); return; } connection.close(); handler.close(); final ArrayList<Mono<Void>> closingSessions = new ArrayList<>(); sessionMap.values().forEach(link -> closingSessions.add(link.isClosed())); final Mono<Void> closedExecutor = executor != null ? Mono.defer(() -> { synchronized (this) { logger.info("Closing executor."); return executor.closeAsync(); } }) : Mono.empty(); final Mono<Void> closeSessionAndExecutorMono = Mono.when(closingSessions) .timeout(operationTimeout) .onErrorResume(error -> { logger.info("Timed out waiting for all sessions to close."); return Mono.empty(); }) .then(closedExecutor) .then(Mono.fromRunnable(() -> { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result) .log("Unable to emit connection closed signal."); return false; }); subscriptions.dispose(); })); subscriptions.add(closeSessionAndExecutorMono.subscribe()); } private synchronized ClaimsBasedSecurityNode getOrCreateCBSNode() { if (cbsChannel == null) { logger.info("Setting CBS channel."); cbsChannelProcessor = createRequestResponseChannel(CBS_SESSION_NAME, CBS_LINK_NAME, CBS_ADDRESS); cbsChannel = new ClaimsBasedSecurityChannel( cbsChannelProcessor, connectionOptions.getTokenCredential(), connectionOptions.getAuthorizationType(), connectionOptions.getRetry()); } return cbsChannel; } private synchronized Connection getOrCreateConnection() throws IOException { if (connection == null) { logger.atInfo() .addKeyValue(HOSTNAME_KEY, handler.getHostname()) .addKeyValue("port", handler.getProtocolPort()) .log("Creating and starting connection."); final Reactor reactor = reactorProvider.createReactor(connectionId, handler.getMaxFrameSize()); connection = reactor.connectionToHost(handler.getHostname(), handler.getProtocolPort(), handler); final ReactorExceptionHandler reactorExceptionHandler = new ReactorExceptionHandler(); final Duration timeoutDivided = connectionOptions.getRetry().getTryTimeout().dividedBy(2); final Duration pendingTasksDuration = ClientConstants.SERVER_BUSY_WAIT_TIME.compareTo(timeoutDivided) < 0 ? ClientConstants.SERVER_BUSY_WAIT_TIME : timeoutDivided; final Scheduler scheduler = Schedulers.newSingle("reactor-executor"); executor = new ReactorExecutor(reactor, scheduler, connectionId, reactorExceptionHandler, pendingTasksDuration, connectionOptions.getFullyQualifiedNamespace()); final Mono<Void> executorCloseMono = Mono.defer(() -> { synchronized (this) { return executor.closeAsync(); } }); reactorProvider.getReactorDispatcher().getShutdownSignal() .flatMap(signal -> { reactorExceptionHandler.onConnectionShutdown(signal); return executorCloseMono; }) .onErrorResume(error -> { reactorExceptionHandler.onConnectionError(error); return executorCloseMono; }) .subscribe(); executor.start(); } return connection; } private final class ReactorExceptionHandler extends AmqpExceptionHandler { private ReactorExceptionHandler() { super(); } @Override public void onConnectionError(Throwable exception) { logger.atInfo() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionError, Starting new reactor", exception); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onReactorError: Disposing."); closeAsync(new AmqpShutdownSignal(false, false, "onReactorError: " + exception.toString())) .subscribe(); } } @Override void onConnectionShutdown(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown. Shutting down."); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown: disposing."); closeAsync(shutdownSignal).subscribe(); } } } private static final class SessionSubscription { private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AmqpSession session; private final Disposable subscription; private SessionSubscription(AmqpSession session, Disposable subscription) { this.session = session; this.subscription = subscription; } private AmqpSession getSession() { return session; } private void dispose() { if (isDisposed.getAndSet(true)) { return; } if (session instanceof ReactorSession) { ((ReactorSession) session).closeAsync("Closing session.", null, true) .subscribe(); } else { session.dispose(); } subscription.dispose(); } private Mono<Void> isClosed() { if (session instanceof ReactorSession) { return ((ReactorSession) session).isClosed(); } else { return Mono.empty(); } } } }
Thanks Anu and Connie. Yes, the `.repeat(() -> !this.isDisposed())` can solve the "the aggressive log" problem. You are right, I haven't considered the server-initiated closure, I'll check the code to see if we could summarize some for future test. For "close timeout" problem, I'll create a separated issue to follow up. Maybe have the no-wait time as you mentioned, instead of changing the closing order.
Mono<Void> closeAsync(AmqpShutdownSignal shutdownSignal) { final Mono<Void> emitShutdownSignalOperation = Mono.fromRunnable(() -> { addShutdownSignal(logger.atInfo(), shutdownSignal).log("Disposing of ReactorConnection."); final Sinks.EmitResult result = shutdownSignalSink.tryEmitValue(shutdownSignal); if (result.isFailure()) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(EMIT_RESULT_KEY, result) .log("Unable to emit shutdown signal."); } }); final Mono<Void> cbsCloseOperation; if (cbsChannelProcessor != null) { cbsCloseOperation = cbsChannelProcessor.flatMap(channel -> channel.closeAsync()); } else { cbsCloseOperation = Mono.empty(); } final Mono<Void> managementNodeCloseOperations = Mono.when( Flux.fromStream(managementNodes.values().stream()).flatMap(node -> node.closeAsync())); final Mono<Void> closeReactor = Mono.fromRunnable(() -> { logger.verbose("Scheduling closeConnection work."); final ReactorDispatcher dispatcher = reactorProvider.getReactorDispatcher(); if (dispatcher != null) { try { dispatcher.invoke(() -> closeConnectionWork()); } catch (IOException e) { logger.warning("IOException while scheduling closeConnection work. Manually disposing.", e); closeConnectionWork(); } catch (RejectedExecutionException e) { logger.info("Could not schedule closeConnection work. Manually disposing."); closeConnectionWork(); } } else { closeConnectionWork(); } }); return Mono.whenDelayError( cbsCloseOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed CBS node.")), managementNodeCloseOperations.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed management nodes.")), emitShutdownSignalOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Emitted connection shutdown signal. "))) .then(closeReactor.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed reactor dispatcher."))) .then(isClosedMono.asMono()); }
emitShutdownSignalOperation.doFinally(signalType ->
Mono<Void> closeAsync(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal).log("Disposing of ReactorConnection."); final Sinks.EmitResult result = shutdownSignalSink.tryEmitValue(shutdownSignal); if (result.isFailure()) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(EMIT_RESULT_KEY, result) .log("Unable to emit shutdown signal."); } final Mono<Void> cbsCloseOperation; if (cbsChannelProcessor != null) { cbsCloseOperation = cbsChannelProcessor.flatMap(channel -> channel.closeAsync()); } else { cbsCloseOperation = Mono.empty(); } final Mono<Void> managementNodeCloseOperations = Mono.when( Flux.fromStream(managementNodes.values().stream()).flatMap(node -> node.closeAsync())); final Mono<Void> closeReactor = Mono.fromRunnable(() -> { logger.verbose("Scheduling closeConnection work."); final ReactorDispatcher dispatcher = reactorProvider.getReactorDispatcher(); if (dispatcher != null) { try { dispatcher.invoke(() -> closeConnectionWork()); } catch (IOException e) { logger.warning("IOException while scheduling closeConnection work. Manually disposing.", e); closeConnectionWork(); } catch (RejectedExecutionException e) { logger.info("Could not schedule closeConnection work. Manually disposing."); closeConnectionWork(); } } else { closeConnectionWork(); } }); return Mono.whenDelayError( cbsCloseOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed CBS node.")), managementNodeCloseOperations.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed management nodes."))) .then(closeReactor.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed reactor dispatcher."))) .then(isClosedMono.asMono()); }
class ReactorConnection implements AmqpConnection { private static final String CBS_SESSION_NAME = "cbs-session"; private static final String CBS_ADDRESS = "$cbs"; private static final String CBS_LINK_NAME = "cbs"; private static final String MANAGEMENT_SESSION_NAME = "mgmt-session"; private static final String MANAGEMENT_ADDRESS = "$management"; private static final String MANAGEMENT_LINK_NAME = "mgmt"; private final ClientLogger logger; private final ConcurrentMap<String, SessionSubscription> sessionMap = new ConcurrentHashMap<>(); private final ConcurrentHashMap<String, AmqpManagementNode> managementNodes = new ConcurrentHashMap<>(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.One<AmqpShutdownSignal> shutdownSignalSink = Sinks.one(); private final Flux<AmqpEndpointState> endpointStates; private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final String connectionId; private final Mono<Connection> connectionMono; private final ConnectionHandler handler; private final ReactorHandlerProvider handlerProvider; private final TokenManagerProvider tokenManagerProvider; private final MessageSerializer messageSerializer; private final ConnectionOptions connectionOptions; private final ReactorProvider reactorProvider; private final AmqpRetryPolicy retryPolicy; private final SenderSettleMode senderSettleMode; private final ReceiverSettleMode receiverSettleMode; private final Duration operationTimeout; private final Composite subscriptions; private ReactorExecutor executor; private volatile ClaimsBasedSecurityChannel cbsChannel; private volatile AmqpChannelProcessor<RequestResponseChannel> cbsChannelProcessor; private volatile Connection connection; /** * Creates a new AMQP connection that uses proton-j. * * @param connectionId Identifier for the connection. * @param connectionOptions A set of options used to create the AMQP connection. * @param reactorProvider Provides proton-j Reactor instances. * @param handlerProvider Provides {@link BaseHandler} to listen to proton-j reactor events. * @param tokenManagerProvider Provides the appropriate token manager to authorize with CBS node. * @param messageSerializer Serializer to translate objects to and from proton-j {@link Message messages}. * @param senderSettleMode to set as {@link SenderSettleMode} on sender. * @param receiverSettleMode to set as {@link ReceiverSettleMode} on receiver. */ public ReactorConnection(String connectionId, ConnectionOptions connectionOptions, ReactorProvider reactorProvider, ReactorHandlerProvider handlerProvider, TokenManagerProvider tokenManagerProvider, MessageSerializer messageSerializer, SenderSettleMode senderSettleMode, ReceiverSettleMode receiverSettleMode) { this.connectionOptions = connectionOptions; this.reactorProvider = reactorProvider; this.connectionId = connectionId; this.logger = new ClientLogger(ReactorConnection.class, createContextWithConnectionId(connectionId)); this.handlerProvider = handlerProvider; this.tokenManagerProvider = Objects.requireNonNull(tokenManagerProvider, "'tokenManagerProvider' cannot be null."); this.messageSerializer = messageSerializer; this.handler = handlerProvider.createConnectionHandler(connectionId, connectionOptions); this.retryPolicy = RetryUtil.getRetryPolicy(connectionOptions.getRetry()); this.operationTimeout = connectionOptions.getRetry().getTryTimeout(); this.senderSettleMode = senderSettleMode; this.receiverSettleMode = receiverSettleMode; this.connectionMono = Mono.fromCallable(this::getOrCreateConnection) .flatMap(reactorConnection -> { final Mono<AmqpEndpointState> activeEndpoint = getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(operationTimeout, Mono.error(() -> new AmqpException(true, String.format( "Connection '%s' not opened within AmqpRetryOptions.tryTimeout(): %s", connectionId, operationTimeout), handler.getErrorContext()))); return activeEndpoint.thenReturn(reactorConnection); }) .doOnError(error -> { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already disposed: Error occurred while connection was starting.", error); } else { closeAsync(new AmqpShutdownSignal(false, false, "Error occurred while connection was starting. Error: " + error)).subscribe(); } }); this.endpointStates = this.handler.getEndpointStates() .takeUntilOther(shutdownSignalSink.asMono()) .map(state -> { logger.verbose("State {}", state); return AmqpEndpointStateUtil.getConnectionState(state); }) .onErrorResume(error -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to error."); return closeAsync(new AmqpShutdownSignal(false, false, error.getMessage())).then(Mono.error(error)); } else { return Mono.error(error); } }) .doOnComplete(() -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to connection close."); closeAsync(new AmqpShutdownSignal(false, false, "Connection handler closed.")).subscribe(); } }) .cache(1); this.subscriptions = Disposables.composite(this.endpointStates.subscribe()); } /** * {@inheritDoc} */ @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } /** * Gets the shutdown signal associated with this connection. When it emits, the underlying connection is closed. * * @return Shutdown signals associated with this connection. It emits a signal when the underlying connection is * closed. */ @Override public Flux<AmqpShutdownSignal> getShutdownSignals() { return shutdownSignalSink.asMono().cache().flux(); } @Override public Mono<AmqpManagementNode> getManagementNode(String entityPath) { return Mono.defer(() -> { if (isDisposed()) { return Mono.error(logger.atError() .addKeyValue(ENTITY_PATH_KEY, entityPath) .log(Exceptions.propagate(new IllegalStateException("Connection is disposed. Cannot get management instance.")))); } final AmqpManagementNode existing = managementNodes.get(entityPath); if (existing != null) { return Mono.just(existing); } final TokenManager tokenManager = new AzureTokenManagerProvider(connectionOptions.getAuthorizationType(), connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getAuthorizationScope()) .getTokenManager(getClaimsBasedSecurityNode(), entityPath); return tokenManager.authorize().thenReturn(managementNodes.compute(entityPath, (key, current) -> { if (current != null) { logger.info("A management node exists already, returning it."); tokenManager.close(); return current; } final String sessionName = entityPath + "-" + MANAGEMENT_SESSION_NAME; final String linkName = entityPath + "-" + MANAGEMENT_LINK_NAME; final String address = entityPath + "/" + MANAGEMENT_ADDRESS; logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .addKeyValue("address", address) .log("Creating management node."); final AmqpChannelProcessor<RequestResponseChannel> requestResponseChannel = createRequestResponseChannel(sessionName, linkName, address); return new ManagementChannel(requestResponseChannel, getFullyQualifiedNamespace(), entityPath, tokenManager); })); }); } /** * {@inheritDoc} */ @Override public Mono<ClaimsBasedSecurityNode> getClaimsBasedSecurityNode() { return connectionMono.then(Mono.fromCallable(() -> getOrCreateCBSNode())); } @Override public String getId() { return connectionId; } /** * {@inheritDoc} */ @Override public String getFullyQualifiedNamespace() { return handler.getHostname(); } /** * {@inheritDoc} */ @Override public int getMaxFrameSize() { return handler.getMaxFrameSize(); } /** * {@inheritDoc} */ @Override public Map<String, Object> getConnectionProperties() { return handler.getConnectionProperties(); } /** * {@inheritDoc} */ @Override public Mono<AmqpSession> createSession(String sessionName) { return connectionMono.map(connection -> { final SessionSubscription sessionSubscription = sessionMap.computeIfAbsent(sessionName, key -> { final SessionHandler sessionHandler = handlerProvider.createSessionHandler(connectionId, getFullyQualifiedNamespace(), key, connectionOptions.getRetry().getTryTimeout()); final Session session = connection.session(); BaseHandler.setHandler(session, sessionHandler); final AmqpSession amqpSession = createSession(key, session, sessionHandler); final Disposable subscription = amqpSession.getEndpointStates() .subscribe(state -> { }, error -> { if (isDisposed.get()) { return; } logger.atInfo() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Error occurred. Removing and disposing session", error); removeSession(key); }, () -> { if (isDisposed.get()) { return; } logger.atVerbose() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Complete. Removing and disposing session."); removeSession(key); }); return new SessionSubscription(amqpSession, subscription); }); return sessionSubscription; }).flatMap(sessionSubscription -> { final Mono<AmqpEndpointState> activeSession = sessionSubscription.getSession().getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(retryPolicy.getRetryOptions().getTryTimeout(), Mono.error(() -> new AmqpException(true, String.format("connectionId[%s] sessionName[%s] Timeout waiting for session to be active.", connectionId, sessionName), handler.getErrorContext()))); return activeSession.thenReturn(sessionSubscription.getSession()); }); } /** * Creates a new AMQP session with the given parameters. * * @param sessionName Name of the AMQP session. * @param session The reactor session associated with this session. * @param handler Session handler for the reactor session. * * @return A new instance of AMQP session. */ protected AmqpSession createSession(String sessionName, Session session, SessionHandler handler) { return new ReactorSession(this, session, handler, sessionName, reactorProvider, handlerProvider, getClaimsBasedSecurityNode(), tokenManagerProvider, messageSerializer, connectionOptions.getRetry()); } /** * {@inheritDoc} */ @Override public boolean removeSession(String sessionName) { if (sessionName == null) { return false; } final SessionSubscription removed = sessionMap.remove(sessionName); if (removed != null) { removed.dispose(); } return removed != null; } @Override public boolean isDisposed() { return isDisposed.get(); } /** * {@inheritDoc} */ @Override public void dispose() { final Duration timeout = operationTimeout.plus(operationTimeout); closeAsync().block(timeout); } /** * Gets the active AMQP connection for this instance. * * @return The AMQP connection. * * @throws AmqpException if the {@link Connection} was not transitioned to an active state within the given * {@link AmqpRetryOptions */ protected Mono<Connection> getReactorConnection() { return connectionMono; } /** * Creates a bidirectional link between the message broker and the client. * * @param sessionName Name of the session. * @param linkName Name of the link. * @param entityPath Address to the message broker. * * @return A new {@link RequestResponseChannel} to communicate with the message broker. */ protected AmqpChannelProcessor<RequestResponseChannel> createRequestResponseChannel(String sessionName, String linkName, String entityPath) { Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); final Flux<RequestResponseChannel> createChannel = createSession(sessionName) .cast(ReactorSession.class) .map(reactorSession -> new RequestResponseChannel(this, getId(), getFullyQualifiedNamespace(), linkName, entityPath, reactorSession.session(), connectionOptions.getRetry(), handlerProvider, reactorProvider, messageSerializer, senderSettleMode, receiverSettleMode)) .doOnNext(e -> { logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .log("Emitting new response channel."); }) .repeat() .takeUntilOther(shutdownSignalSink.asMono()); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(ENTITY_PATH_KEY, entityPath); return createChannel .subscribeWith(new AmqpChannelProcessor<>(getFullyQualifiedNamespace(), channel -> channel.getEndpointStates(), retryPolicy, loggingContext)); } @Override public Mono<Void> closeAsync() { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already closed. Not disposing again."); return isClosedMono.asMono(); } return closeAsync(new AmqpShutdownSignal(false, true, "Disposed by client.")); } private synchronized void closeConnectionWork() { if (connection == null) { isClosedMono.emitEmpty((signalType, emitResult) -> { addSignalTypeAndResult(logger.atInfo(), signalType, emitResult) .log("Unable to complete closeMono."); return false; }); return; } connection.close(); handler.close(); final ArrayList<Mono<Void>> closingSessions = new ArrayList<>(); sessionMap.values().forEach(link -> closingSessions.add(link.isClosed())); final Mono<Void> closedExecutor = executor != null ? Mono.defer(() -> { synchronized (this) { logger.info("Closing executor."); return executor.closeAsync(); } }) : Mono.empty(); final Mono<Void> closeSessionAndExecutorMono = Mono.when(closingSessions) .timeout(operationTimeout) .onErrorResume(error -> { logger.info("Timed out waiting for all sessions to close."); return Mono.empty(); }) .then(closedExecutor) .then(Mono.fromRunnable(() -> { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result) .log("Unable to emit connection closed signal."); return false; }); subscriptions.dispose(); })); subscriptions.add(closeSessionAndExecutorMono.subscribe()); } private synchronized ClaimsBasedSecurityNode getOrCreateCBSNode() { if (cbsChannel == null) { logger.info("Setting CBS channel."); cbsChannelProcessor = createRequestResponseChannel(CBS_SESSION_NAME, CBS_LINK_NAME, CBS_ADDRESS); cbsChannel = new ClaimsBasedSecurityChannel( cbsChannelProcessor, connectionOptions.getTokenCredential(), connectionOptions.getAuthorizationType(), connectionOptions.getRetry()); } return cbsChannel; } private synchronized Connection getOrCreateConnection() throws IOException { if (connection == null) { logger.atInfo() .addKeyValue(HOSTNAME_KEY, handler.getHostname()) .addKeyValue("port", handler.getProtocolPort()) .log("Creating and starting connection."); final Reactor reactor = reactorProvider.createReactor(connectionId, handler.getMaxFrameSize()); connection = reactor.connectionToHost(handler.getHostname(), handler.getProtocolPort(), handler); final ReactorExceptionHandler reactorExceptionHandler = new ReactorExceptionHandler(); final Duration timeoutDivided = connectionOptions.getRetry().getTryTimeout().dividedBy(2); final Duration pendingTasksDuration = ClientConstants.SERVER_BUSY_WAIT_TIME.compareTo(timeoutDivided) < 0 ? ClientConstants.SERVER_BUSY_WAIT_TIME : timeoutDivided; final Scheduler scheduler = Schedulers.newSingle("reactor-executor"); executor = new ReactorExecutor(reactor, scheduler, connectionId, reactorExceptionHandler, pendingTasksDuration, connectionOptions.getFullyQualifiedNamespace()); final Mono<Void> executorCloseMono = Mono.defer(() -> { synchronized (this) { return executor.closeAsync(); } }); reactorProvider.getReactorDispatcher().getShutdownSignal() .flatMap(signal -> { reactorExceptionHandler.onConnectionShutdown(signal); return executorCloseMono; }) .onErrorResume(error -> { reactorExceptionHandler.onConnectionError(error); return executorCloseMono; }) .subscribe(); executor.start(); } return connection; } private final class ReactorExceptionHandler extends AmqpExceptionHandler { private ReactorExceptionHandler() { super(); } @Override public void onConnectionError(Throwable exception) { logger.atInfo() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionError, Starting new reactor", exception); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onReactorError: Disposing."); closeAsync(new AmqpShutdownSignal(false, false, "onReactorError: " + exception.toString())) .subscribe(); } } @Override void onConnectionShutdown(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown. Shutting down."); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown: disposing."); closeAsync(shutdownSignal).subscribe(); } } } private static final class SessionSubscription { private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AmqpSession session; private final Disposable subscription; private SessionSubscription(AmqpSession session, Disposable subscription) { this.session = session; this.subscription = subscription; } private AmqpSession getSession() { return session; } private void dispose() { if (isDisposed.getAndSet(true)) { return; } if (session instanceof ReactorSession) { ((ReactorSession) session).closeAsync("Closing session.", null, true) .subscribe(); } else { session.dispose(); } subscription.dispose(); } private Mono<Void> isClosed() { if (session instanceof ReactorSession) { return ((ReactorSession) session).isClosed(); } else { return Mono.empty(); } } } }
class ReactorConnection implements AmqpConnection { private static final String CBS_SESSION_NAME = "cbs-session"; private static final String CBS_ADDRESS = "$cbs"; private static final String CBS_LINK_NAME = "cbs"; private static final String MANAGEMENT_SESSION_NAME = "mgmt-session"; private static final String MANAGEMENT_ADDRESS = "$management"; private static final String MANAGEMENT_LINK_NAME = "mgmt"; private final ClientLogger logger; private final ConcurrentMap<String, SessionSubscription> sessionMap = new ConcurrentHashMap<>(); private final ConcurrentHashMap<String, AmqpManagementNode> managementNodes = new ConcurrentHashMap<>(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.One<AmqpShutdownSignal> shutdownSignalSink = Sinks.one(); private final Flux<AmqpEndpointState> endpointStates; private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final String connectionId; private final Mono<Connection> connectionMono; private final ConnectionHandler handler; private final ReactorHandlerProvider handlerProvider; private final TokenManagerProvider tokenManagerProvider; private final MessageSerializer messageSerializer; private final ConnectionOptions connectionOptions; private final ReactorProvider reactorProvider; private final AmqpRetryPolicy retryPolicy; private final SenderSettleMode senderSettleMode; private final ReceiverSettleMode receiverSettleMode; private final Duration operationTimeout; private final Composite subscriptions; private ReactorExecutor executor; private volatile ClaimsBasedSecurityChannel cbsChannel; private volatile AmqpChannelProcessor<RequestResponseChannel> cbsChannelProcessor; private volatile Connection connection; /** * Creates a new AMQP connection that uses proton-j. * * @param connectionId Identifier for the connection. * @param connectionOptions A set of options used to create the AMQP connection. * @param reactorProvider Provides proton-j Reactor instances. * @param handlerProvider Provides {@link BaseHandler} to listen to proton-j reactor events. * @param tokenManagerProvider Provides the appropriate token manager to authorize with CBS node. * @param messageSerializer Serializer to translate objects to and from proton-j {@link Message messages}. * @param senderSettleMode to set as {@link SenderSettleMode} on sender. * @param receiverSettleMode to set as {@link ReceiverSettleMode} on receiver. */ public ReactorConnection(String connectionId, ConnectionOptions connectionOptions, ReactorProvider reactorProvider, ReactorHandlerProvider handlerProvider, TokenManagerProvider tokenManagerProvider, MessageSerializer messageSerializer, SenderSettleMode senderSettleMode, ReceiverSettleMode receiverSettleMode) { this.connectionOptions = connectionOptions; this.reactorProvider = reactorProvider; this.connectionId = connectionId; this.logger = new ClientLogger(ReactorConnection.class, createContextWithConnectionId(connectionId)); this.handlerProvider = handlerProvider; this.tokenManagerProvider = Objects.requireNonNull(tokenManagerProvider, "'tokenManagerProvider' cannot be null."); this.messageSerializer = messageSerializer; this.handler = handlerProvider.createConnectionHandler(connectionId, connectionOptions); this.retryPolicy = RetryUtil.getRetryPolicy(connectionOptions.getRetry()); this.operationTimeout = connectionOptions.getRetry().getTryTimeout(); this.senderSettleMode = senderSettleMode; this.receiverSettleMode = receiverSettleMode; this.connectionMono = Mono.fromCallable(this::getOrCreateConnection) .flatMap(reactorConnection -> { final Mono<AmqpEndpointState> activeEndpoint = getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(operationTimeout, Mono.error(() -> new AmqpException(true, String.format( "Connection '%s' not opened within AmqpRetryOptions.tryTimeout(): %s", connectionId, operationTimeout), handler.getErrorContext()))); return activeEndpoint.thenReturn(reactorConnection); }) .doOnError(error -> { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already disposed: Error occurred while connection was starting.", error); } else { closeAsync(new AmqpShutdownSignal(false, false, "Error occurred while connection was starting. Error: " + error)).subscribe(); } }); this.endpointStates = this.handler.getEndpointStates() .takeUntilOther(shutdownSignalSink.asMono()) .map(state -> { logger.verbose("State {}", state); return AmqpEndpointStateUtil.getConnectionState(state); }) .onErrorResume(error -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to error."); return closeAsync(new AmqpShutdownSignal(false, false, error.getMessage())).then(Mono.error(error)); } else { return Mono.error(error); } }) .doOnComplete(() -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to connection close."); closeAsync(new AmqpShutdownSignal(false, false, "Connection handler closed.")).subscribe(); } }) .cache(1); this.subscriptions = Disposables.composite(this.endpointStates.subscribe()); } /** * {@inheritDoc} */ @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } /** * Gets the shutdown signal associated with this connection. When it emits, the underlying connection is closed. * * @return Shutdown signals associated with this connection. It emits a signal when the underlying connection is * closed. */ @Override public Flux<AmqpShutdownSignal> getShutdownSignals() { return shutdownSignalSink.asMono().cache().flux(); } @Override public Mono<AmqpManagementNode> getManagementNode(String entityPath) { return Mono.defer(() -> { if (isDisposed()) { return Mono.error(logger.atError() .addKeyValue(ENTITY_PATH_KEY, entityPath) .log(Exceptions.propagate(new IllegalStateException("Connection is disposed. Cannot get management instance.")))); } final AmqpManagementNode existing = managementNodes.get(entityPath); if (existing != null) { return Mono.just(existing); } final TokenManager tokenManager = new AzureTokenManagerProvider(connectionOptions.getAuthorizationType(), connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getAuthorizationScope()) .getTokenManager(getClaimsBasedSecurityNode(), entityPath); return tokenManager.authorize().thenReturn(managementNodes.compute(entityPath, (key, current) -> { if (current != null) { logger.info("A management node exists already, returning it."); tokenManager.close(); return current; } final String sessionName = entityPath + "-" + MANAGEMENT_SESSION_NAME; final String linkName = entityPath + "-" + MANAGEMENT_LINK_NAME; final String address = entityPath + "/" + MANAGEMENT_ADDRESS; logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .addKeyValue("address", address) .log("Creating management node."); final AmqpChannelProcessor<RequestResponseChannel> requestResponseChannel = createRequestResponseChannel(sessionName, linkName, address); return new ManagementChannel(requestResponseChannel, getFullyQualifiedNamespace(), entityPath, tokenManager); })); }); } /** * {@inheritDoc} */ @Override public Mono<ClaimsBasedSecurityNode> getClaimsBasedSecurityNode() { return connectionMono.then(Mono.fromCallable(() -> getOrCreateCBSNode())); } @Override public String getId() { return connectionId; } /** * {@inheritDoc} */ @Override public String getFullyQualifiedNamespace() { return handler.getHostname(); } /** * {@inheritDoc} */ @Override public int getMaxFrameSize() { return handler.getMaxFrameSize(); } /** * {@inheritDoc} */ @Override public Map<String, Object> getConnectionProperties() { return handler.getConnectionProperties(); } /** * {@inheritDoc} */ @Override public Mono<AmqpSession> createSession(String sessionName) { return connectionMono.map(connection -> { final SessionSubscription sessionSubscription = sessionMap.computeIfAbsent(sessionName, key -> { final SessionHandler sessionHandler = handlerProvider.createSessionHandler(connectionId, getFullyQualifiedNamespace(), key, connectionOptions.getRetry().getTryTimeout()); final Session session = connection.session(); BaseHandler.setHandler(session, sessionHandler); final AmqpSession amqpSession = createSession(key, session, sessionHandler); final Disposable subscription = amqpSession.getEndpointStates() .subscribe(state -> { }, error -> { if (isDisposed.get()) { return; } logger.atInfo() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Error occurred. Removing and disposing session", error); removeSession(key); }, () -> { if (isDisposed.get()) { return; } logger.atVerbose() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Complete. Removing and disposing session."); removeSession(key); }); return new SessionSubscription(amqpSession, subscription); }); return sessionSubscription; }).flatMap(sessionSubscription -> { final Mono<AmqpEndpointState> activeSession = sessionSubscription.getSession().getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(retryPolicy.getRetryOptions().getTryTimeout(), Mono.error(() -> new AmqpException(true, String.format("connectionId[%s] sessionName[%s] Timeout waiting for session to be active.", connectionId, sessionName), handler.getErrorContext()))); return activeSession.thenReturn(sessionSubscription.getSession()); }); } /** * Creates a new AMQP session with the given parameters. * * @param sessionName Name of the AMQP session. * @param session The reactor session associated with this session. * @param handler Session handler for the reactor session. * * @return A new instance of AMQP session. */ protected AmqpSession createSession(String sessionName, Session session, SessionHandler handler) { return new ReactorSession(this, session, handler, sessionName, reactorProvider, handlerProvider, getClaimsBasedSecurityNode(), tokenManagerProvider, messageSerializer, connectionOptions.getRetry()); } /** * {@inheritDoc} */ @Override public boolean removeSession(String sessionName) { if (sessionName == null) { return false; } final SessionSubscription removed = sessionMap.remove(sessionName); if (removed != null) { removed.dispose(); } return removed != null; } @Override public boolean isDisposed() { return isDisposed.get(); } /** * {@inheritDoc} */ @Override public void dispose() { final Duration timeout = operationTimeout.plus(operationTimeout); closeAsync().block(timeout); } /** * Gets the active AMQP connection for this instance. * * @return The AMQP connection. * * @throws AmqpException if the {@link Connection} was not transitioned to an active state within the given * {@link AmqpRetryOptions */ protected Mono<Connection> getReactorConnection() { return connectionMono; } /** * Creates a bidirectional link between the message broker and the client. * * @param sessionName Name of the session. * @param linkName Name of the link. * @param entityPath Address to the message broker. * * @return A new {@link RequestResponseChannel} to communicate with the message broker. */ protected AmqpChannelProcessor<RequestResponseChannel> createRequestResponseChannel(String sessionName, String linkName, String entityPath) { Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); final Flux<RequestResponseChannel> createChannel = createSession(sessionName) .cast(ReactorSession.class) .map(reactorSession -> new RequestResponseChannel(this, getId(), getFullyQualifiedNamespace(), linkName, entityPath, reactorSession.session(), connectionOptions.getRetry(), handlerProvider, reactorProvider, messageSerializer, senderSettleMode, receiverSettleMode)) .doOnNext(e -> { logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .log("Emitting new response channel."); }) .repeat(() -> !this.isDisposed()); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(ENTITY_PATH_KEY, entityPath); return createChannel .subscribeWith(new AmqpChannelProcessor<>(getFullyQualifiedNamespace(), channel -> channel.getEndpointStates(), retryPolicy, loggingContext)); } @Override public Mono<Void> closeAsync() { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already closed. Not disposing again."); return isClosedMono.asMono(); } return closeAsync(new AmqpShutdownSignal(false, true, "Disposed by client.")); } private synchronized void closeConnectionWork() { if (connection == null) { isClosedMono.emitEmpty((signalType, emitResult) -> { addSignalTypeAndResult(logger.atInfo(), signalType, emitResult) .log("Unable to complete closeMono."); return false; }); return; } connection.close(); handler.close(); final ArrayList<Mono<Void>> closingSessions = new ArrayList<>(); sessionMap.values().forEach(link -> closingSessions.add(link.isClosed())); final Mono<Void> closedExecutor = executor != null ? Mono.defer(() -> { synchronized (this) { logger.info("Closing executor."); return executor.closeAsync(); } }) : Mono.empty(); final Mono<Void> closeSessionAndExecutorMono = Mono.when(closingSessions) .timeout(operationTimeout) .onErrorResume(error -> { logger.info("Timed out waiting for all sessions to close."); return Mono.empty(); }) .then(closedExecutor) .then(Mono.fromRunnable(() -> { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result) .log("Unable to emit connection closed signal."); return false; }); subscriptions.dispose(); })); subscriptions.add(closeSessionAndExecutorMono.subscribe()); } private synchronized ClaimsBasedSecurityNode getOrCreateCBSNode() { if (cbsChannel == null) { logger.info("Setting CBS channel."); cbsChannelProcessor = createRequestResponseChannel(CBS_SESSION_NAME, CBS_LINK_NAME, CBS_ADDRESS); cbsChannel = new ClaimsBasedSecurityChannel( cbsChannelProcessor, connectionOptions.getTokenCredential(), connectionOptions.getAuthorizationType(), connectionOptions.getRetry()); } return cbsChannel; } private synchronized Connection getOrCreateConnection() throws IOException { if (connection == null) { logger.atInfo() .addKeyValue(HOSTNAME_KEY, handler.getHostname()) .addKeyValue("port", handler.getProtocolPort()) .log("Creating and starting connection."); final Reactor reactor = reactorProvider.createReactor(connectionId, handler.getMaxFrameSize()); connection = reactor.connectionToHost(handler.getHostname(), handler.getProtocolPort(), handler); final ReactorExceptionHandler reactorExceptionHandler = new ReactorExceptionHandler(); final Duration timeoutDivided = connectionOptions.getRetry().getTryTimeout().dividedBy(2); final Duration pendingTasksDuration = ClientConstants.SERVER_BUSY_WAIT_TIME.compareTo(timeoutDivided) < 0 ? ClientConstants.SERVER_BUSY_WAIT_TIME : timeoutDivided; final Scheduler scheduler = Schedulers.newSingle("reactor-executor"); executor = new ReactorExecutor(reactor, scheduler, connectionId, reactorExceptionHandler, pendingTasksDuration, connectionOptions.getFullyQualifiedNamespace()); final Mono<Void> executorCloseMono = Mono.defer(() -> { synchronized (this) { return executor.closeAsync(); } }); reactorProvider.getReactorDispatcher().getShutdownSignal() .flatMap(signal -> { reactorExceptionHandler.onConnectionShutdown(signal); return executorCloseMono; }) .onErrorResume(error -> { reactorExceptionHandler.onConnectionError(error); return executorCloseMono; }) .subscribe(); executor.start(); } return connection; } private final class ReactorExceptionHandler extends AmqpExceptionHandler { private ReactorExceptionHandler() { super(); } @Override public void onConnectionError(Throwable exception) { logger.atInfo() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionError, Starting new reactor", exception); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onReactorError: Disposing."); closeAsync(new AmqpShutdownSignal(false, false, "onReactorError: " + exception.toString())) .subscribe(); } } @Override void onConnectionShutdown(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown. Shutting down."); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown: disposing."); closeAsync(shutdownSignal).subscribe(); } } } private static final class SessionSubscription { private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AmqpSession session; private final Disposable subscription; private SessionSubscription(AmqpSession session, Disposable subscription) { this.session = session; this.subscription = subscription; } private AmqpSession getSession() { return session; } private void dispose() { if (isDisposed.getAndSet(true)) { return; } if (session instanceof ReactorSession) { ((ReactorSession) session).closeAsync("Closing session.", null, true) .subscribe(); } else { session.dispose(); } subscription.dispose(); } private Mono<Void> isClosed() { if (session instanceof ReactorSession) { return ((ReactorSession) session).isClosed(); } else { return Mono.empty(); } } } }
Thanks Kun.
Mono<Void> closeAsync(AmqpShutdownSignal shutdownSignal) { final Mono<Void> emitShutdownSignalOperation = Mono.fromRunnable(() -> { addShutdownSignal(logger.atInfo(), shutdownSignal).log("Disposing of ReactorConnection."); final Sinks.EmitResult result = shutdownSignalSink.tryEmitValue(shutdownSignal); if (result.isFailure()) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(EMIT_RESULT_KEY, result) .log("Unable to emit shutdown signal."); } }); final Mono<Void> cbsCloseOperation; if (cbsChannelProcessor != null) { cbsCloseOperation = cbsChannelProcessor.flatMap(channel -> channel.closeAsync()); } else { cbsCloseOperation = Mono.empty(); } final Mono<Void> managementNodeCloseOperations = Mono.when( Flux.fromStream(managementNodes.values().stream()).flatMap(node -> node.closeAsync())); final Mono<Void> closeReactor = Mono.fromRunnable(() -> { logger.verbose("Scheduling closeConnection work."); final ReactorDispatcher dispatcher = reactorProvider.getReactorDispatcher(); if (dispatcher != null) { try { dispatcher.invoke(() -> closeConnectionWork()); } catch (IOException e) { logger.warning("IOException while scheduling closeConnection work. Manually disposing.", e); closeConnectionWork(); } catch (RejectedExecutionException e) { logger.info("Could not schedule closeConnection work. Manually disposing."); closeConnectionWork(); } } else { closeConnectionWork(); } }); return Mono.whenDelayError( cbsCloseOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed CBS node.")), managementNodeCloseOperations.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed management nodes.")), emitShutdownSignalOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Emitted connection shutdown signal. "))) .then(closeReactor.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed reactor dispatcher."))) .then(isClosedMono.asMono()); }
emitShutdownSignalOperation.doFinally(signalType ->
Mono<Void> closeAsync(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal).log("Disposing of ReactorConnection."); final Sinks.EmitResult result = shutdownSignalSink.tryEmitValue(shutdownSignal); if (result.isFailure()) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(EMIT_RESULT_KEY, result) .log("Unable to emit shutdown signal."); } final Mono<Void> cbsCloseOperation; if (cbsChannelProcessor != null) { cbsCloseOperation = cbsChannelProcessor.flatMap(channel -> channel.closeAsync()); } else { cbsCloseOperation = Mono.empty(); } final Mono<Void> managementNodeCloseOperations = Mono.when( Flux.fromStream(managementNodes.values().stream()).flatMap(node -> node.closeAsync())); final Mono<Void> closeReactor = Mono.fromRunnable(() -> { logger.verbose("Scheduling closeConnection work."); final ReactorDispatcher dispatcher = reactorProvider.getReactorDispatcher(); if (dispatcher != null) { try { dispatcher.invoke(() -> closeConnectionWork()); } catch (IOException e) { logger.warning("IOException while scheduling closeConnection work. Manually disposing.", e); closeConnectionWork(); } catch (RejectedExecutionException e) { logger.info("Could not schedule closeConnection work. Manually disposing."); closeConnectionWork(); } } else { closeConnectionWork(); } }); return Mono.whenDelayError( cbsCloseOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed CBS node.")), managementNodeCloseOperations.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed management nodes."))) .then(closeReactor.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed reactor dispatcher."))) .then(isClosedMono.asMono()); }
class ReactorConnection implements AmqpConnection { private static final String CBS_SESSION_NAME = "cbs-session"; private static final String CBS_ADDRESS = "$cbs"; private static final String CBS_LINK_NAME = "cbs"; private static final String MANAGEMENT_SESSION_NAME = "mgmt-session"; private static final String MANAGEMENT_ADDRESS = "$management"; private static final String MANAGEMENT_LINK_NAME = "mgmt"; private final ClientLogger logger; private final ConcurrentMap<String, SessionSubscription> sessionMap = new ConcurrentHashMap<>(); private final ConcurrentHashMap<String, AmqpManagementNode> managementNodes = new ConcurrentHashMap<>(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.One<AmqpShutdownSignal> shutdownSignalSink = Sinks.one(); private final Flux<AmqpEndpointState> endpointStates; private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final String connectionId; private final Mono<Connection> connectionMono; private final ConnectionHandler handler; private final ReactorHandlerProvider handlerProvider; private final TokenManagerProvider tokenManagerProvider; private final MessageSerializer messageSerializer; private final ConnectionOptions connectionOptions; private final ReactorProvider reactorProvider; private final AmqpRetryPolicy retryPolicy; private final SenderSettleMode senderSettleMode; private final ReceiverSettleMode receiverSettleMode; private final Duration operationTimeout; private final Composite subscriptions; private ReactorExecutor executor; private volatile ClaimsBasedSecurityChannel cbsChannel; private volatile AmqpChannelProcessor<RequestResponseChannel> cbsChannelProcessor; private volatile Connection connection; /** * Creates a new AMQP connection that uses proton-j. * * @param connectionId Identifier for the connection. * @param connectionOptions A set of options used to create the AMQP connection. * @param reactorProvider Provides proton-j Reactor instances. * @param handlerProvider Provides {@link BaseHandler} to listen to proton-j reactor events. * @param tokenManagerProvider Provides the appropriate token manager to authorize with CBS node. * @param messageSerializer Serializer to translate objects to and from proton-j {@link Message messages}. * @param senderSettleMode to set as {@link SenderSettleMode} on sender. * @param receiverSettleMode to set as {@link ReceiverSettleMode} on receiver. */ public ReactorConnection(String connectionId, ConnectionOptions connectionOptions, ReactorProvider reactorProvider, ReactorHandlerProvider handlerProvider, TokenManagerProvider tokenManagerProvider, MessageSerializer messageSerializer, SenderSettleMode senderSettleMode, ReceiverSettleMode receiverSettleMode) { this.connectionOptions = connectionOptions; this.reactorProvider = reactorProvider; this.connectionId = connectionId; this.logger = new ClientLogger(ReactorConnection.class, createContextWithConnectionId(connectionId)); this.handlerProvider = handlerProvider; this.tokenManagerProvider = Objects.requireNonNull(tokenManagerProvider, "'tokenManagerProvider' cannot be null."); this.messageSerializer = messageSerializer; this.handler = handlerProvider.createConnectionHandler(connectionId, connectionOptions); this.retryPolicy = RetryUtil.getRetryPolicy(connectionOptions.getRetry()); this.operationTimeout = connectionOptions.getRetry().getTryTimeout(); this.senderSettleMode = senderSettleMode; this.receiverSettleMode = receiverSettleMode; this.connectionMono = Mono.fromCallable(this::getOrCreateConnection) .flatMap(reactorConnection -> { final Mono<AmqpEndpointState> activeEndpoint = getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(operationTimeout, Mono.error(() -> new AmqpException(true, String.format( "Connection '%s' not opened within AmqpRetryOptions.tryTimeout(): %s", connectionId, operationTimeout), handler.getErrorContext()))); return activeEndpoint.thenReturn(reactorConnection); }) .doOnError(error -> { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already disposed: Error occurred while connection was starting.", error); } else { closeAsync(new AmqpShutdownSignal(false, false, "Error occurred while connection was starting. Error: " + error)).subscribe(); } }); this.endpointStates = this.handler.getEndpointStates() .takeUntilOther(shutdownSignalSink.asMono()) .map(state -> { logger.verbose("State {}", state); return AmqpEndpointStateUtil.getConnectionState(state); }) .onErrorResume(error -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to error."); return closeAsync(new AmqpShutdownSignal(false, false, error.getMessage())).then(Mono.error(error)); } else { return Mono.error(error); } }) .doOnComplete(() -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to connection close."); closeAsync(new AmqpShutdownSignal(false, false, "Connection handler closed.")).subscribe(); } }) .cache(1); this.subscriptions = Disposables.composite(this.endpointStates.subscribe()); } /** * {@inheritDoc} */ @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } /** * Gets the shutdown signal associated with this connection. When it emits, the underlying connection is closed. * * @return Shutdown signals associated with this connection. It emits a signal when the underlying connection is * closed. */ @Override public Flux<AmqpShutdownSignal> getShutdownSignals() { return shutdownSignalSink.asMono().cache().flux(); } @Override public Mono<AmqpManagementNode> getManagementNode(String entityPath) { return Mono.defer(() -> { if (isDisposed()) { return Mono.error(logger.atError() .addKeyValue(ENTITY_PATH_KEY, entityPath) .log(Exceptions.propagate(new IllegalStateException("Connection is disposed. Cannot get management instance.")))); } final AmqpManagementNode existing = managementNodes.get(entityPath); if (existing != null) { return Mono.just(existing); } final TokenManager tokenManager = new AzureTokenManagerProvider(connectionOptions.getAuthorizationType(), connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getAuthorizationScope()) .getTokenManager(getClaimsBasedSecurityNode(), entityPath); return tokenManager.authorize().thenReturn(managementNodes.compute(entityPath, (key, current) -> { if (current != null) { logger.info("A management node exists already, returning it."); tokenManager.close(); return current; } final String sessionName = entityPath + "-" + MANAGEMENT_SESSION_NAME; final String linkName = entityPath + "-" + MANAGEMENT_LINK_NAME; final String address = entityPath + "/" + MANAGEMENT_ADDRESS; logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .addKeyValue("address", address) .log("Creating management node."); final AmqpChannelProcessor<RequestResponseChannel> requestResponseChannel = createRequestResponseChannel(sessionName, linkName, address); return new ManagementChannel(requestResponseChannel, getFullyQualifiedNamespace(), entityPath, tokenManager); })); }); } /** * {@inheritDoc} */ @Override public Mono<ClaimsBasedSecurityNode> getClaimsBasedSecurityNode() { return connectionMono.then(Mono.fromCallable(() -> getOrCreateCBSNode())); } @Override public String getId() { return connectionId; } /** * {@inheritDoc} */ @Override public String getFullyQualifiedNamespace() { return handler.getHostname(); } /** * {@inheritDoc} */ @Override public int getMaxFrameSize() { return handler.getMaxFrameSize(); } /** * {@inheritDoc} */ @Override public Map<String, Object> getConnectionProperties() { return handler.getConnectionProperties(); } /** * {@inheritDoc} */ @Override public Mono<AmqpSession> createSession(String sessionName) { return connectionMono.map(connection -> { final SessionSubscription sessionSubscription = sessionMap.computeIfAbsent(sessionName, key -> { final SessionHandler sessionHandler = handlerProvider.createSessionHandler(connectionId, getFullyQualifiedNamespace(), key, connectionOptions.getRetry().getTryTimeout()); final Session session = connection.session(); BaseHandler.setHandler(session, sessionHandler); final AmqpSession amqpSession = createSession(key, session, sessionHandler); final Disposable subscription = amqpSession.getEndpointStates() .subscribe(state -> { }, error -> { if (isDisposed.get()) { return; } logger.atInfo() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Error occurred. Removing and disposing session", error); removeSession(key); }, () -> { if (isDisposed.get()) { return; } logger.atVerbose() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Complete. Removing and disposing session."); removeSession(key); }); return new SessionSubscription(amqpSession, subscription); }); return sessionSubscription; }).flatMap(sessionSubscription -> { final Mono<AmqpEndpointState> activeSession = sessionSubscription.getSession().getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(retryPolicy.getRetryOptions().getTryTimeout(), Mono.error(() -> new AmqpException(true, String.format("connectionId[%s] sessionName[%s] Timeout waiting for session to be active.", connectionId, sessionName), handler.getErrorContext()))); return activeSession.thenReturn(sessionSubscription.getSession()); }); } /** * Creates a new AMQP session with the given parameters. * * @param sessionName Name of the AMQP session. * @param session The reactor session associated with this session. * @param handler Session handler for the reactor session. * * @return A new instance of AMQP session. */ protected AmqpSession createSession(String sessionName, Session session, SessionHandler handler) { return new ReactorSession(this, session, handler, sessionName, reactorProvider, handlerProvider, getClaimsBasedSecurityNode(), tokenManagerProvider, messageSerializer, connectionOptions.getRetry()); } /** * {@inheritDoc} */ @Override public boolean removeSession(String sessionName) { if (sessionName == null) { return false; } final SessionSubscription removed = sessionMap.remove(sessionName); if (removed != null) { removed.dispose(); } return removed != null; } @Override public boolean isDisposed() { return isDisposed.get(); } /** * {@inheritDoc} */ @Override public void dispose() { final Duration timeout = operationTimeout.plus(operationTimeout); closeAsync().block(timeout); } /** * Gets the active AMQP connection for this instance. * * @return The AMQP connection. * * @throws AmqpException if the {@link Connection} was not transitioned to an active state within the given * {@link AmqpRetryOptions */ protected Mono<Connection> getReactorConnection() { return connectionMono; } /** * Creates a bidirectional link between the message broker and the client. * * @param sessionName Name of the session. * @param linkName Name of the link. * @param entityPath Address to the message broker. * * @return A new {@link RequestResponseChannel} to communicate with the message broker. */ protected AmqpChannelProcessor<RequestResponseChannel> createRequestResponseChannel(String sessionName, String linkName, String entityPath) { Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); final Flux<RequestResponseChannel> createChannel = createSession(sessionName) .cast(ReactorSession.class) .map(reactorSession -> new RequestResponseChannel(this, getId(), getFullyQualifiedNamespace(), linkName, entityPath, reactorSession.session(), connectionOptions.getRetry(), handlerProvider, reactorProvider, messageSerializer, senderSettleMode, receiverSettleMode)) .doOnNext(e -> { logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .log("Emitting new response channel."); }) .repeat() .takeUntilOther(shutdownSignalSink.asMono()); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(ENTITY_PATH_KEY, entityPath); return createChannel .subscribeWith(new AmqpChannelProcessor<>(getFullyQualifiedNamespace(), channel -> channel.getEndpointStates(), retryPolicy, loggingContext)); } @Override public Mono<Void> closeAsync() { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already closed. Not disposing again."); return isClosedMono.asMono(); } return closeAsync(new AmqpShutdownSignal(false, true, "Disposed by client.")); } private synchronized void closeConnectionWork() { if (connection == null) { isClosedMono.emitEmpty((signalType, emitResult) -> { addSignalTypeAndResult(logger.atInfo(), signalType, emitResult) .log("Unable to complete closeMono."); return false; }); return; } connection.close(); handler.close(); final ArrayList<Mono<Void>> closingSessions = new ArrayList<>(); sessionMap.values().forEach(link -> closingSessions.add(link.isClosed())); final Mono<Void> closedExecutor = executor != null ? Mono.defer(() -> { synchronized (this) { logger.info("Closing executor."); return executor.closeAsync(); } }) : Mono.empty(); final Mono<Void> closeSessionAndExecutorMono = Mono.when(closingSessions) .timeout(operationTimeout) .onErrorResume(error -> { logger.info("Timed out waiting for all sessions to close."); return Mono.empty(); }) .then(closedExecutor) .then(Mono.fromRunnable(() -> { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result) .log("Unable to emit connection closed signal."); return false; }); subscriptions.dispose(); })); subscriptions.add(closeSessionAndExecutorMono.subscribe()); } private synchronized ClaimsBasedSecurityNode getOrCreateCBSNode() { if (cbsChannel == null) { logger.info("Setting CBS channel."); cbsChannelProcessor = createRequestResponseChannel(CBS_SESSION_NAME, CBS_LINK_NAME, CBS_ADDRESS); cbsChannel = new ClaimsBasedSecurityChannel( cbsChannelProcessor, connectionOptions.getTokenCredential(), connectionOptions.getAuthorizationType(), connectionOptions.getRetry()); } return cbsChannel; } private synchronized Connection getOrCreateConnection() throws IOException { if (connection == null) { logger.atInfo() .addKeyValue(HOSTNAME_KEY, handler.getHostname()) .addKeyValue("port", handler.getProtocolPort()) .log("Creating and starting connection."); final Reactor reactor = reactorProvider.createReactor(connectionId, handler.getMaxFrameSize()); connection = reactor.connectionToHost(handler.getHostname(), handler.getProtocolPort(), handler); final ReactorExceptionHandler reactorExceptionHandler = new ReactorExceptionHandler(); final Duration timeoutDivided = connectionOptions.getRetry().getTryTimeout().dividedBy(2); final Duration pendingTasksDuration = ClientConstants.SERVER_BUSY_WAIT_TIME.compareTo(timeoutDivided) < 0 ? ClientConstants.SERVER_BUSY_WAIT_TIME : timeoutDivided; final Scheduler scheduler = Schedulers.newSingle("reactor-executor"); executor = new ReactorExecutor(reactor, scheduler, connectionId, reactorExceptionHandler, pendingTasksDuration, connectionOptions.getFullyQualifiedNamespace()); final Mono<Void> executorCloseMono = Mono.defer(() -> { synchronized (this) { return executor.closeAsync(); } }); reactorProvider.getReactorDispatcher().getShutdownSignal() .flatMap(signal -> { reactorExceptionHandler.onConnectionShutdown(signal); return executorCloseMono; }) .onErrorResume(error -> { reactorExceptionHandler.onConnectionError(error); return executorCloseMono; }) .subscribe(); executor.start(); } return connection; } private final class ReactorExceptionHandler extends AmqpExceptionHandler { private ReactorExceptionHandler() { super(); } @Override public void onConnectionError(Throwable exception) { logger.atInfo() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionError, Starting new reactor", exception); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onReactorError: Disposing."); closeAsync(new AmqpShutdownSignal(false, false, "onReactorError: " + exception.toString())) .subscribe(); } } @Override void onConnectionShutdown(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown. Shutting down."); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown: disposing."); closeAsync(shutdownSignal).subscribe(); } } } private static final class SessionSubscription { private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AmqpSession session; private final Disposable subscription; private SessionSubscription(AmqpSession session, Disposable subscription) { this.session = session; this.subscription = subscription; } private AmqpSession getSession() { return session; } private void dispose() { if (isDisposed.getAndSet(true)) { return; } if (session instanceof ReactorSession) { ((ReactorSession) session).closeAsync("Closing session.", null, true) .subscribe(); } else { session.dispose(); } subscription.dispose(); } private Mono<Void> isClosed() { if (session instanceof ReactorSession) { return ((ReactorSession) session).isClosed(); } else { return Mono.empty(); } } } }
class ReactorConnection implements AmqpConnection { private static final String CBS_SESSION_NAME = "cbs-session"; private static final String CBS_ADDRESS = "$cbs"; private static final String CBS_LINK_NAME = "cbs"; private static final String MANAGEMENT_SESSION_NAME = "mgmt-session"; private static final String MANAGEMENT_ADDRESS = "$management"; private static final String MANAGEMENT_LINK_NAME = "mgmt"; private final ClientLogger logger; private final ConcurrentMap<String, SessionSubscription> sessionMap = new ConcurrentHashMap<>(); private final ConcurrentHashMap<String, AmqpManagementNode> managementNodes = new ConcurrentHashMap<>(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.One<AmqpShutdownSignal> shutdownSignalSink = Sinks.one(); private final Flux<AmqpEndpointState> endpointStates; private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final String connectionId; private final Mono<Connection> connectionMono; private final ConnectionHandler handler; private final ReactorHandlerProvider handlerProvider; private final TokenManagerProvider tokenManagerProvider; private final MessageSerializer messageSerializer; private final ConnectionOptions connectionOptions; private final ReactorProvider reactorProvider; private final AmqpRetryPolicy retryPolicy; private final SenderSettleMode senderSettleMode; private final ReceiverSettleMode receiverSettleMode; private final Duration operationTimeout; private final Composite subscriptions; private ReactorExecutor executor; private volatile ClaimsBasedSecurityChannel cbsChannel; private volatile AmqpChannelProcessor<RequestResponseChannel> cbsChannelProcessor; private volatile Connection connection; /** * Creates a new AMQP connection that uses proton-j. * * @param connectionId Identifier for the connection. * @param connectionOptions A set of options used to create the AMQP connection. * @param reactorProvider Provides proton-j Reactor instances. * @param handlerProvider Provides {@link BaseHandler} to listen to proton-j reactor events. * @param tokenManagerProvider Provides the appropriate token manager to authorize with CBS node. * @param messageSerializer Serializer to translate objects to and from proton-j {@link Message messages}. * @param senderSettleMode to set as {@link SenderSettleMode} on sender. * @param receiverSettleMode to set as {@link ReceiverSettleMode} on receiver. */ public ReactorConnection(String connectionId, ConnectionOptions connectionOptions, ReactorProvider reactorProvider, ReactorHandlerProvider handlerProvider, TokenManagerProvider tokenManagerProvider, MessageSerializer messageSerializer, SenderSettleMode senderSettleMode, ReceiverSettleMode receiverSettleMode) { this.connectionOptions = connectionOptions; this.reactorProvider = reactorProvider; this.connectionId = connectionId; this.logger = new ClientLogger(ReactorConnection.class, createContextWithConnectionId(connectionId)); this.handlerProvider = handlerProvider; this.tokenManagerProvider = Objects.requireNonNull(tokenManagerProvider, "'tokenManagerProvider' cannot be null."); this.messageSerializer = messageSerializer; this.handler = handlerProvider.createConnectionHandler(connectionId, connectionOptions); this.retryPolicy = RetryUtil.getRetryPolicy(connectionOptions.getRetry()); this.operationTimeout = connectionOptions.getRetry().getTryTimeout(); this.senderSettleMode = senderSettleMode; this.receiverSettleMode = receiverSettleMode; this.connectionMono = Mono.fromCallable(this::getOrCreateConnection) .flatMap(reactorConnection -> { final Mono<AmqpEndpointState> activeEndpoint = getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(operationTimeout, Mono.error(() -> new AmqpException(true, String.format( "Connection '%s' not opened within AmqpRetryOptions.tryTimeout(): %s", connectionId, operationTimeout), handler.getErrorContext()))); return activeEndpoint.thenReturn(reactorConnection); }) .doOnError(error -> { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already disposed: Error occurred while connection was starting.", error); } else { closeAsync(new AmqpShutdownSignal(false, false, "Error occurred while connection was starting. Error: " + error)).subscribe(); } }); this.endpointStates = this.handler.getEndpointStates() .takeUntilOther(shutdownSignalSink.asMono()) .map(state -> { logger.verbose("State {}", state); return AmqpEndpointStateUtil.getConnectionState(state); }) .onErrorResume(error -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to error."); return closeAsync(new AmqpShutdownSignal(false, false, error.getMessage())).then(Mono.error(error)); } else { return Mono.error(error); } }) .doOnComplete(() -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to connection close."); closeAsync(new AmqpShutdownSignal(false, false, "Connection handler closed.")).subscribe(); } }) .cache(1); this.subscriptions = Disposables.composite(this.endpointStates.subscribe()); } /** * {@inheritDoc} */ @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } /** * Gets the shutdown signal associated with this connection. When it emits, the underlying connection is closed. * * @return Shutdown signals associated with this connection. It emits a signal when the underlying connection is * closed. */ @Override public Flux<AmqpShutdownSignal> getShutdownSignals() { return shutdownSignalSink.asMono().cache().flux(); } @Override public Mono<AmqpManagementNode> getManagementNode(String entityPath) { return Mono.defer(() -> { if (isDisposed()) { return Mono.error(logger.atError() .addKeyValue(ENTITY_PATH_KEY, entityPath) .log(Exceptions.propagate(new IllegalStateException("Connection is disposed. Cannot get management instance.")))); } final AmqpManagementNode existing = managementNodes.get(entityPath); if (existing != null) { return Mono.just(existing); } final TokenManager tokenManager = new AzureTokenManagerProvider(connectionOptions.getAuthorizationType(), connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getAuthorizationScope()) .getTokenManager(getClaimsBasedSecurityNode(), entityPath); return tokenManager.authorize().thenReturn(managementNodes.compute(entityPath, (key, current) -> { if (current != null) { logger.info("A management node exists already, returning it."); tokenManager.close(); return current; } final String sessionName = entityPath + "-" + MANAGEMENT_SESSION_NAME; final String linkName = entityPath + "-" + MANAGEMENT_LINK_NAME; final String address = entityPath + "/" + MANAGEMENT_ADDRESS; logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .addKeyValue("address", address) .log("Creating management node."); final AmqpChannelProcessor<RequestResponseChannel> requestResponseChannel = createRequestResponseChannel(sessionName, linkName, address); return new ManagementChannel(requestResponseChannel, getFullyQualifiedNamespace(), entityPath, tokenManager); })); }); } /** * {@inheritDoc} */ @Override public Mono<ClaimsBasedSecurityNode> getClaimsBasedSecurityNode() { return connectionMono.then(Mono.fromCallable(() -> getOrCreateCBSNode())); } @Override public String getId() { return connectionId; } /** * {@inheritDoc} */ @Override public String getFullyQualifiedNamespace() { return handler.getHostname(); } /** * {@inheritDoc} */ @Override public int getMaxFrameSize() { return handler.getMaxFrameSize(); } /** * {@inheritDoc} */ @Override public Map<String, Object> getConnectionProperties() { return handler.getConnectionProperties(); } /** * {@inheritDoc} */ @Override public Mono<AmqpSession> createSession(String sessionName) { return connectionMono.map(connection -> { final SessionSubscription sessionSubscription = sessionMap.computeIfAbsent(sessionName, key -> { final SessionHandler sessionHandler = handlerProvider.createSessionHandler(connectionId, getFullyQualifiedNamespace(), key, connectionOptions.getRetry().getTryTimeout()); final Session session = connection.session(); BaseHandler.setHandler(session, sessionHandler); final AmqpSession amqpSession = createSession(key, session, sessionHandler); final Disposable subscription = amqpSession.getEndpointStates() .subscribe(state -> { }, error -> { if (isDisposed.get()) { return; } logger.atInfo() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Error occurred. Removing and disposing session", error); removeSession(key); }, () -> { if (isDisposed.get()) { return; } logger.atVerbose() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Complete. Removing and disposing session."); removeSession(key); }); return new SessionSubscription(amqpSession, subscription); }); return sessionSubscription; }).flatMap(sessionSubscription -> { final Mono<AmqpEndpointState> activeSession = sessionSubscription.getSession().getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(retryPolicy.getRetryOptions().getTryTimeout(), Mono.error(() -> new AmqpException(true, String.format("connectionId[%s] sessionName[%s] Timeout waiting for session to be active.", connectionId, sessionName), handler.getErrorContext()))); return activeSession.thenReturn(sessionSubscription.getSession()); }); } /** * Creates a new AMQP session with the given parameters. * * @param sessionName Name of the AMQP session. * @param session The reactor session associated with this session. * @param handler Session handler for the reactor session. * * @return A new instance of AMQP session. */ protected AmqpSession createSession(String sessionName, Session session, SessionHandler handler) { return new ReactorSession(this, session, handler, sessionName, reactorProvider, handlerProvider, getClaimsBasedSecurityNode(), tokenManagerProvider, messageSerializer, connectionOptions.getRetry()); } /** * {@inheritDoc} */ @Override public boolean removeSession(String sessionName) { if (sessionName == null) { return false; } final SessionSubscription removed = sessionMap.remove(sessionName); if (removed != null) { removed.dispose(); } return removed != null; } @Override public boolean isDisposed() { return isDisposed.get(); } /** * {@inheritDoc} */ @Override public void dispose() { final Duration timeout = operationTimeout.plus(operationTimeout); closeAsync().block(timeout); } /** * Gets the active AMQP connection for this instance. * * @return The AMQP connection. * * @throws AmqpException if the {@link Connection} was not transitioned to an active state within the given * {@link AmqpRetryOptions */ protected Mono<Connection> getReactorConnection() { return connectionMono; } /** * Creates a bidirectional link between the message broker and the client. * * @param sessionName Name of the session. * @param linkName Name of the link. * @param entityPath Address to the message broker. * * @return A new {@link RequestResponseChannel} to communicate with the message broker. */ protected AmqpChannelProcessor<RequestResponseChannel> createRequestResponseChannel(String sessionName, String linkName, String entityPath) { Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); final Flux<RequestResponseChannel> createChannel = createSession(sessionName) .cast(ReactorSession.class) .map(reactorSession -> new RequestResponseChannel(this, getId(), getFullyQualifiedNamespace(), linkName, entityPath, reactorSession.session(), connectionOptions.getRetry(), handlerProvider, reactorProvider, messageSerializer, senderSettleMode, receiverSettleMode)) .doOnNext(e -> { logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .log("Emitting new response channel."); }) .repeat(() -> !this.isDisposed()); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(ENTITY_PATH_KEY, entityPath); return createChannel .subscribeWith(new AmqpChannelProcessor<>(getFullyQualifiedNamespace(), channel -> channel.getEndpointStates(), retryPolicy, loggingContext)); } @Override public Mono<Void> closeAsync() { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already closed. Not disposing again."); return isClosedMono.asMono(); } return closeAsync(new AmqpShutdownSignal(false, true, "Disposed by client.")); } private synchronized void closeConnectionWork() { if (connection == null) { isClosedMono.emitEmpty((signalType, emitResult) -> { addSignalTypeAndResult(logger.atInfo(), signalType, emitResult) .log("Unable to complete closeMono."); return false; }); return; } connection.close(); handler.close(); final ArrayList<Mono<Void>> closingSessions = new ArrayList<>(); sessionMap.values().forEach(link -> closingSessions.add(link.isClosed())); final Mono<Void> closedExecutor = executor != null ? Mono.defer(() -> { synchronized (this) { logger.info("Closing executor."); return executor.closeAsync(); } }) : Mono.empty(); final Mono<Void> closeSessionAndExecutorMono = Mono.when(closingSessions) .timeout(operationTimeout) .onErrorResume(error -> { logger.info("Timed out waiting for all sessions to close."); return Mono.empty(); }) .then(closedExecutor) .then(Mono.fromRunnable(() -> { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result) .log("Unable to emit connection closed signal."); return false; }); subscriptions.dispose(); })); subscriptions.add(closeSessionAndExecutorMono.subscribe()); } private synchronized ClaimsBasedSecurityNode getOrCreateCBSNode() { if (cbsChannel == null) { logger.info("Setting CBS channel."); cbsChannelProcessor = createRequestResponseChannel(CBS_SESSION_NAME, CBS_LINK_NAME, CBS_ADDRESS); cbsChannel = new ClaimsBasedSecurityChannel( cbsChannelProcessor, connectionOptions.getTokenCredential(), connectionOptions.getAuthorizationType(), connectionOptions.getRetry()); } return cbsChannel; } private synchronized Connection getOrCreateConnection() throws IOException { if (connection == null) { logger.atInfo() .addKeyValue(HOSTNAME_KEY, handler.getHostname()) .addKeyValue("port", handler.getProtocolPort()) .log("Creating and starting connection."); final Reactor reactor = reactorProvider.createReactor(connectionId, handler.getMaxFrameSize()); connection = reactor.connectionToHost(handler.getHostname(), handler.getProtocolPort(), handler); final ReactorExceptionHandler reactorExceptionHandler = new ReactorExceptionHandler(); final Duration timeoutDivided = connectionOptions.getRetry().getTryTimeout().dividedBy(2); final Duration pendingTasksDuration = ClientConstants.SERVER_BUSY_WAIT_TIME.compareTo(timeoutDivided) < 0 ? ClientConstants.SERVER_BUSY_WAIT_TIME : timeoutDivided; final Scheduler scheduler = Schedulers.newSingle("reactor-executor"); executor = new ReactorExecutor(reactor, scheduler, connectionId, reactorExceptionHandler, pendingTasksDuration, connectionOptions.getFullyQualifiedNamespace()); final Mono<Void> executorCloseMono = Mono.defer(() -> { synchronized (this) { return executor.closeAsync(); } }); reactorProvider.getReactorDispatcher().getShutdownSignal() .flatMap(signal -> { reactorExceptionHandler.onConnectionShutdown(signal); return executorCloseMono; }) .onErrorResume(error -> { reactorExceptionHandler.onConnectionError(error); return executorCloseMono; }) .subscribe(); executor.start(); } return connection; } private final class ReactorExceptionHandler extends AmqpExceptionHandler { private ReactorExceptionHandler() { super(); } @Override public void onConnectionError(Throwable exception) { logger.atInfo() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionError, Starting new reactor", exception); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onReactorError: Disposing."); closeAsync(new AmqpShutdownSignal(false, false, "onReactorError: " + exception.toString())) .subscribe(); } } @Override void onConnectionShutdown(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown. Shutting down."); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown: disposing."); closeAsync(shutdownSignal).subscribe(); } } } private static final class SessionSubscription { private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AmqpSession session; private final Disposable subscription; private SessionSubscription(AmqpSession session, Disposable subscription) { this.session = session; this.subscription = subscription; } private AmqpSession getSession() { return session; } private void dispose() { if (isDisposed.getAndSet(true)) { return; } if (session instanceof ReactorSession) { ((ReactorSession) session).closeAsync("Closing session.", null, true) .subscribe(); } else { session.dispose(); } subscription.dispose(); } private Mono<Void> isClosed() { if (session instanceof ReactorSession) { return ((ReactorSession) session).isClosed(); } else { return Mono.empty(); } } } }
well-known?
public KeyVaultKeyStore() { LOGGER.log(FINE, "Constructing KeyVaultKeyStore."); creationDate = new Date(); String keyVaultUri = System.getProperty("azure.keyvault.uri"); String tenantId = System.getProperty("azure.keyvault.tenant-id"); String clientId = System.getProperty("azure.keyvault.client-id"); String clientSecret = System.getProperty("azure.keyvault.client-secret"); String managedIdentity = System.getProperty("azure.keyvault.managed-identity"); long refreshInterval = getRefreshInterval(); refreshCertificatesWhenHaveUnTrustCertificate = Optional.of("azure.keyvault.jca.refresh-certificates-when-have-un-trust-certificate") .map(System::getProperty) .map(Boolean::parseBoolean) .orElse(false); jreCertificates = JreCertificates.getInstance(); LOGGER.log(FINE, String.format("Loaded jre certificates: %s.", jreCertificates.getAliases())); wellKnowCertificates = SpecificPathCertificates.getSpecificPathCertificates(wellKnowPath); LOGGER.log(FINE, String.format("Loaded well known certificates: %s.", wellKnowCertificates.getAliases())); customCertificates = SpecificPathCertificates.getSpecificPathCertificates(customPath); LOGGER.log(FINE, String.format("Loaded custom certificates: %s.", customCertificates.getAliases())); keyVaultCertificates = new KeyVaultCertificates( refreshInterval, keyVaultUri, tenantId, clientId, clientSecret, managedIdentity); LOGGER.log(FINE, String.format("Loaded Key Vault certificates: %s.", keyVaultCertificates.getAliases())); classpathCertificates = new ClasspathCertificates(); LOGGER.log(FINE, String.format("Loaded classpath certificates: %s.", classpathCertificates.getAliases())); allCertificates = Arrays.asList( jreCertificates, wellKnowCertificates, customCertificates, keyVaultCertificates, classpathCertificates); }
LOGGER.log(FINE, String.format("Loaded well known certificates: %s.", wellKnowCertificates.getAliases()));
public KeyVaultKeyStore() { LOGGER.log(FINE, "Constructing KeyVaultKeyStore."); creationDate = new Date(); String keyVaultUri = System.getProperty("azure.keyvault.uri"); String tenantId = System.getProperty("azure.keyvault.tenant-id"); String clientId = System.getProperty("azure.keyvault.client-id"); String clientSecret = System.getProperty("azure.keyvault.client-secret"); String managedIdentity = System.getProperty("azure.keyvault.managed-identity"); long refreshInterval = getRefreshInterval(); refreshCertificatesWhenHaveUnTrustCertificate = Optional.of("azure.keyvault.jca.refresh-certificates-when-have-un-trust-certificate") .map(System::getProperty) .map(Boolean::parseBoolean) .orElse(false); jreCertificates = JreCertificates.getInstance(); LOGGER.log(FINE, String.format("Loaded jre certificates: %s.", jreCertificates.getAliases())); wellKnowCertificates = SpecificPathCertificates.getSpecificPathCertificates(wellKnowPath); LOGGER.log(FINE, String.format("Loaded well known certificates: %s.", wellKnowCertificates.getAliases())); customCertificates = SpecificPathCertificates.getSpecificPathCertificates(customPath); LOGGER.log(FINE, String.format("Loaded custom certificates: %s.", customCertificates.getAliases())); keyVaultCertificates = new KeyVaultCertificates( refreshInterval, keyVaultUri, tenantId, clientId, clientSecret, managedIdentity); LOGGER.log(FINE, String.format("Loaded Key Vault certificates: %s.", keyVaultCertificates.getAliases())); classpathCertificates = new ClasspathCertificates(); LOGGER.log(FINE, String.format("Loaded classpath certificates: %s.", classpathCertificates.getAliases())); allCertificates = Arrays.asList( jreCertificates, wellKnowCertificates, customCertificates, keyVaultCertificates, classpathCertificates); }
class KeyVaultKeyStore extends KeyStoreSpi { /** * Stores the key-store name. */ public static final String KEY_STORE_TYPE = "AzureKeyVault"; /** * Stores the algorithm name. */ public static final String ALGORITHM_NAME = KEY_STORE_TYPE; /** * Stores the logger. */ private static final Logger LOGGER = Logger.getLogger(KeyVaultKeyStore.class.getName()); /** * Stores the Jre key store certificates. */ private final JreCertificates jreCertificates; /** * Store well Know certificates loaded from specific path. */ private final SpecificPathCertificates wellKnowCertificates; /** * Store custom certificates loaded from specific path. */ private final SpecificPathCertificates customCertificates; /** * Store certificates loaded from KeyVault. */ private final KeyVaultCertificates keyVaultCertificates; /** * Store certificates loaded from classpath. */ private final ClasspathCertificates classpathCertificates; /** * Stores all the certificates. */ private final List<AzureCertificates> allCertificates; /** * Stores the creation date. */ private final Date creationDate; private final boolean refreshCertificatesWhenHaveUnTrustCertificate; /** * Store the path where the well know certificate is placed */ final String wellKnowPath = Optional.ofNullable(System.getProperty("azure.cert-path.well-known")) .orElse("/etc/certs/well-known/"); /** * Store the path where the custom certificate is placed */ final String customPath = Optional.ofNullable(System.getProperty("azure.cert-path.custom")) .orElse("/etc/certs/custom/"); /** * Constructor. * * <p> * The constructor uses System.getProperty for * <code>azure.keyvault.uri</code>, * <code>azure.keyvault.aadAuthenticationUrl</code>, * <code>azure.keyvault.tenantId</code>, * <code>azure.keyvault.clientId</code>, * <code>azure.keyvault.clientSecret</code> and * <code>azure.keyvault.managedIdentity</code> to initialize the * Key Vault client. * </p> */ Long getRefreshInterval() { return Stream.of("azure.keyvault.jca.certificates-refresh-interval-in-ms", "azure.keyvault.jca.certificates-refresh-interval") .map(System::getProperty) .filter(Objects::nonNull) .map(Long::valueOf) .findFirst() .orElse(0L); } /** * get key vault key store by system property * * @return KeyVault key store * @throws CertificateException if any of the certificates in the * keystore could not be loaded * @throws NoSuchAlgorithmException when algorithm is unavailable. * @throws KeyStoreException when no Provider supports a KeyStoreSpi implementation for the specified type * @throws IOException when an I/O error occurs. */ public static KeyStore getKeyVaultKeyStoreBySystemProperty() throws CertificateException, NoSuchAlgorithmException, KeyStoreException, IOException { KeyStore keyStore = KeyStore.getInstance(KeyVaultJcaProvider.PROVIDER_NAME); KeyVaultLoadStoreParameter parameter = new KeyVaultLoadStoreParameter( System.getProperty("azure.keyvault.uri"), System.getProperty("azure.keyvault.tenant-id"), System.getProperty("azure.keyvault.client-id"), System.getProperty("azure.keyvault.client-secret"), System.getProperty("azure.keyvault.managed-identity")); keyStore.load(parameter); return keyStore; } @Override public Enumeration<String> engineAliases() { return Collections.enumeration(getAllAliases()); } @Override public boolean engineContainsAlias(String alias) { return engineIsCertificateEntry(alias); } @Override public void engineDeleteEntry(String alias) { allCertificates.forEach(a -> a.deleteEntry(alias)); } @Override public boolean engineEntryInstanceOf(String alias, Class<? extends KeyStore.Entry> entryClass) { return super.engineEntryInstanceOf(alias, entryClass); } @Override public Certificate engineGetCertificate(String alias) { Certificate certificate = allCertificates.stream() .map(AzureCertificates::getCertificates) .filter(a -> a.containsKey(alias)) .findFirst() .map(certificates -> certificates.get(alias)) .orElse(null); if (refreshCertificatesWhenHaveUnTrustCertificate && certificate == null) { keyVaultCertificates.refreshCertificates(); certificate = keyVaultCertificates.getCertificates().get(alias); } return certificate; } @Override public String engineGetCertificateAlias(Certificate cert) { String alias = null; if (cert != null) { List<String> aliasList = getAllAliases(); for (String candidateAlias : aliasList) { Certificate certificate = engineGetCertificate(candidateAlias); if (certificate.equals(cert)) { alias = candidateAlias; break; } } } if (refreshCertificatesWhenHaveUnTrustCertificate && alias == null) { alias = keyVaultCertificates.refreshAndGetAliasByCertificate(cert); } return alias; } @Override public Certificate[] engineGetCertificateChain(String alias) { Certificate[] chain = null; Certificate certificate = engineGetCertificate(alias); if (certificate != null) { chain = new Certificate[1]; chain[0] = certificate; } return chain; } @Override public Date engineGetCreationDate(String alias) { return new Date(creationDate.getTime()); } @Override public KeyStore.Entry engineGetEntry(String alias, KeyStore.ProtectionParameter protParam) throws KeyStoreException, NoSuchAlgorithmException, UnrecoverableEntryException { return super.engineGetEntry(alias, protParam); } @Override public Key engineGetKey(String alias, char[] password) { return allCertificates.stream() .map(AzureCertificates::getCertificateKeys) .filter(a -> a.containsKey(alias)) .findFirst() .map(certificateKeys -> certificateKeys.get(alias)) .orElse(null); } @Override public boolean engineIsCertificateEntry(String alias) { return getAllAliases().contains(alias); } @Override public boolean engineIsKeyEntry(String alias) { return engineIsCertificateEntry(alias); } @Override public void engineLoad(KeyStore.LoadStoreParameter param) { if (param instanceof KeyVaultLoadStoreParameter) { KeyVaultLoadStoreParameter parameter = (KeyVaultLoadStoreParameter) param; keyVaultCertificates.updateKeyVaultClient(parameter.getUri(), parameter.getTenantId(), parameter.getClientId(), parameter.getClientSecret(), parameter.getManagedIdentity()); } classpathCertificates.loadCertificatesFromClasspath(); } @Override public void engineLoad(InputStream stream, char[] password) { classpathCertificates.loadCertificatesFromClasspath(); } private List<String> getAllAliases() { List<String> allAliases = new ArrayList<>(jreCertificates.getAliases()); Map<String, List<String>> aliasLists = new HashMap<>(); aliasLists.put("well known certificates", wellKnowCertificates.getAliases()); aliasLists.put("custom certificates", customCertificates.getAliases()); aliasLists.put("key vault certificates", keyVaultCertificates.getAliases()); aliasLists.put("class path certificates", classpathCertificates.getAliases()); aliasLists.forEach((certificatesType, certificates) -> certificates.forEach(alias -> { if (allAliases.contains(alias)) { LOGGER.log(FINE, String.format("The certificate %s under %s already exists", alias, certificatesType)); } else { allAliases.add(alias); } })); return allAliases; } @Override public void engineSetCertificateEntry(String alias, Certificate certificate) { if (getAllAliases().contains(alias)) { LOGGER.log(WARNING, "Cannot overwrite own certificate"); return; } classpathCertificates.setCertificateEntry(alias, certificate); } @Override public void engineSetEntry(String alias, KeyStore.Entry entry, KeyStore.ProtectionParameter protParam) throws KeyStoreException { super.engineSetEntry(alias, entry, protParam); } @Override public void engineSetKeyEntry(String alias, Key key, char[] password, Certificate[] chain) { } @Override public void engineSetKeyEntry(String alias, byte[] key, Certificate[] chain) { } @Override public int engineSize() { return getAllAliases().size(); } @Override public void engineStore(OutputStream stream, char[] password) { } @Override public void engineStore(KeyStore.LoadStoreParameter param) { } }
class KeyVaultKeyStore extends KeyStoreSpi { /** * Stores the key-store name. */ public static final String KEY_STORE_TYPE = "AzureKeyVault"; /** * Stores the algorithm name. */ public static final String ALGORITHM_NAME = KEY_STORE_TYPE; /** * Stores the logger. */ private static final Logger LOGGER = Logger.getLogger(KeyVaultKeyStore.class.getName()); /** * Stores the Jre key store certificates. */ private final JreCertificates jreCertificates; /** * Store well Know certificates loaded from specific path. */ private final SpecificPathCertificates wellKnowCertificates; /** * Store custom certificates loaded from specific path. */ private final SpecificPathCertificates customCertificates; /** * Store certificates loaded from KeyVault. */ private final KeyVaultCertificates keyVaultCertificates; /** * Store certificates loaded from classpath. */ private final ClasspathCertificates classpathCertificates; /** * Stores all the certificates. */ private final List<AzureCertificates> allCertificates; /** * Stores the creation date. */ private final Date creationDate; private final boolean refreshCertificatesWhenHaveUnTrustCertificate; /** * Store the path where the well know certificate is placed */ final String wellKnowPath = Optional.ofNullable(System.getProperty("azure.cert-path.well-known")) .orElse("/etc/certs/well-known/"); /** * Store the path where the custom certificate is placed */ final String customPath = Optional.ofNullable(System.getProperty("azure.cert-path.custom")) .orElse("/etc/certs/custom/"); /** * Constructor. * * <p> * The constructor uses System.getProperty for * <code>azure.keyvault.uri</code>, * <code>azure.keyvault.aadAuthenticationUrl</code>, * <code>azure.keyvault.tenantId</code>, * <code>azure.keyvault.clientId</code>, * <code>azure.keyvault.clientSecret</code> and * <code>azure.keyvault.managedIdentity</code> to initialize the * Key Vault client. * </p> */ Long getRefreshInterval() { return Stream.of("azure.keyvault.jca.certificates-refresh-interval-in-ms", "azure.keyvault.jca.certificates-refresh-interval") .map(System::getProperty) .filter(Objects::nonNull) .map(Long::valueOf) .findFirst() .orElse(0L); } /** * get key vault key store by system property * * @return KeyVault key store * @throws CertificateException if any of the certificates in the * keystore could not be loaded * @throws NoSuchAlgorithmException when algorithm is unavailable. * @throws KeyStoreException when no Provider supports a KeyStoreSpi implementation for the specified type * @throws IOException when an I/O error occurs. */ public static KeyStore getKeyVaultKeyStoreBySystemProperty() throws CertificateException, NoSuchAlgorithmException, KeyStoreException, IOException { KeyStore keyStore = KeyStore.getInstance(KeyVaultJcaProvider.PROVIDER_NAME); KeyVaultLoadStoreParameter parameter = new KeyVaultLoadStoreParameter( System.getProperty("azure.keyvault.uri"), System.getProperty("azure.keyvault.tenant-id"), System.getProperty("azure.keyvault.client-id"), System.getProperty("azure.keyvault.client-secret"), System.getProperty("azure.keyvault.managed-identity")); keyStore.load(parameter); return keyStore; } @Override public Enumeration<String> engineAliases() { return Collections.enumeration(getAllAliases()); } @Override public boolean engineContainsAlias(String alias) { return engineIsCertificateEntry(alias); } @Override public void engineDeleteEntry(String alias) { allCertificates.forEach(a -> a.deleteEntry(alias)); } @Override public boolean engineEntryInstanceOf(String alias, Class<? extends KeyStore.Entry> entryClass) { return super.engineEntryInstanceOf(alias, entryClass); } @Override public Certificate engineGetCertificate(String alias) { Certificate certificate = allCertificates.stream() .map(AzureCertificates::getCertificates) .filter(a -> a.containsKey(alias)) .findFirst() .map(certificates -> certificates.get(alias)) .orElse(null); if (refreshCertificatesWhenHaveUnTrustCertificate && certificate == null) { keyVaultCertificates.refreshCertificates(); certificate = keyVaultCertificates.getCertificates().get(alias); } return certificate; } @Override public String engineGetCertificateAlias(Certificate cert) { String alias = null; if (cert != null) { List<String> aliasList = getAllAliases(); for (String candidateAlias : aliasList) { Certificate certificate = engineGetCertificate(candidateAlias); if (certificate.equals(cert)) { alias = candidateAlias; break; } } } if (refreshCertificatesWhenHaveUnTrustCertificate && alias == null) { alias = keyVaultCertificates.refreshAndGetAliasByCertificate(cert); } return alias; } @Override public Certificate[] engineGetCertificateChain(String alias) { Certificate[] chain = null; Certificate certificate = engineGetCertificate(alias); if (certificate != null) { chain = new Certificate[1]; chain[0] = certificate; } return chain; } @Override public Date engineGetCreationDate(String alias) { return new Date(creationDate.getTime()); } @Override public KeyStore.Entry engineGetEntry(String alias, KeyStore.ProtectionParameter protParam) throws KeyStoreException, NoSuchAlgorithmException, UnrecoverableEntryException { return super.engineGetEntry(alias, protParam); } @Override public Key engineGetKey(String alias, char[] password) { return allCertificates.stream() .map(AzureCertificates::getCertificateKeys) .filter(a -> a.containsKey(alias)) .findFirst() .map(certificateKeys -> certificateKeys.get(alias)) .orElse(null); } @Override public boolean engineIsCertificateEntry(String alias) { return getAllAliases().contains(alias); } @Override public boolean engineIsKeyEntry(String alias) { return engineIsCertificateEntry(alias); } @Override public void engineLoad(KeyStore.LoadStoreParameter param) { if (param instanceof KeyVaultLoadStoreParameter) { KeyVaultLoadStoreParameter parameter = (KeyVaultLoadStoreParameter) param; keyVaultCertificates.updateKeyVaultClient(parameter.getUri(), parameter.getTenantId(), parameter.getClientId(), parameter.getClientSecret(), parameter.getManagedIdentity()); } classpathCertificates.loadCertificatesFromClasspath(); } @Override public void engineLoad(InputStream stream, char[] password) { classpathCertificates.loadCertificatesFromClasspath(); } private List<String> getAllAliases() { List<String> allAliases = new ArrayList<>(jreCertificates.getAliases()); Map<String, List<String>> aliasLists = new HashMap<>(); aliasLists.put("well known certificates", wellKnowCertificates.getAliases()); aliasLists.put("custom certificates", customCertificates.getAliases()); aliasLists.put("key vault certificates", keyVaultCertificates.getAliases()); aliasLists.put("class path certificates", classpathCertificates.getAliases()); aliasLists.forEach((certificatesType, certificates) -> certificates.forEach(alias -> { if (allAliases.contains(alias)) { LOGGER.log(FINE, String.format("The certificate %s under %s already exists", alias, certificatesType)); } else { allAliases.add(alias); } })); return allAliases; } @Override public void engineSetCertificateEntry(String alias, Certificate certificate) { if (getAllAliases().contains(alias)) { LOGGER.log(WARNING, "Cannot overwrite own certificate"); return; } classpathCertificates.setCertificateEntry(alias, certificate); } @Override public void engineSetEntry(String alias, KeyStore.Entry entry, KeyStore.ProtectionParameter protParam) throws KeyStoreException { super.engineSetEntry(alias, entry, protParam); } @Override public void engineSetKeyEntry(String alias, Key key, char[] password, Certificate[] chain) { } @Override public void engineSetKeyEntry(String alias, byte[] key, Certificate[] chain) { } @Override public int engineSize() { return getAllAliases().size(); } @Override public void engineStore(OutputStream stream, char[] password) { } @Override public void engineStore(KeyStore.LoadStoreParameter param) { } }
Both is OK.
public KeyVaultKeyStore() { LOGGER.log(FINE, "Constructing KeyVaultKeyStore."); creationDate = new Date(); String keyVaultUri = System.getProperty("azure.keyvault.uri"); String tenantId = System.getProperty("azure.keyvault.tenant-id"); String clientId = System.getProperty("azure.keyvault.client-id"); String clientSecret = System.getProperty("azure.keyvault.client-secret"); String managedIdentity = System.getProperty("azure.keyvault.managed-identity"); long refreshInterval = getRefreshInterval(); refreshCertificatesWhenHaveUnTrustCertificate = Optional.of("azure.keyvault.jca.refresh-certificates-when-have-un-trust-certificate") .map(System::getProperty) .map(Boolean::parseBoolean) .orElse(false); jreCertificates = JreCertificates.getInstance(); LOGGER.log(FINE, String.format("Loaded jre certificates: %s.", jreCertificates.getAliases())); wellKnowCertificates = SpecificPathCertificates.getSpecificPathCertificates(wellKnowPath); LOGGER.log(FINE, String.format("Loaded well known certificates: %s.", wellKnowCertificates.getAliases())); customCertificates = SpecificPathCertificates.getSpecificPathCertificates(customPath); LOGGER.log(FINE, String.format("Loaded custom certificates: %s.", customCertificates.getAliases())); keyVaultCertificates = new KeyVaultCertificates( refreshInterval, keyVaultUri, tenantId, clientId, clientSecret, managedIdentity); LOGGER.log(FINE, String.format("Loaded Key Vault certificates: %s.", keyVaultCertificates.getAliases())); classpathCertificates = new ClasspathCertificates(); LOGGER.log(FINE, String.format("Loaded classpath certificates: %s.", classpathCertificates.getAliases())); allCertificates = Arrays.asList( jreCertificates, wellKnowCertificates, customCertificates, keyVaultCertificates, classpathCertificates); }
LOGGER.log(FINE, String.format("Loaded well known certificates: %s.", wellKnowCertificates.getAliases()));
public KeyVaultKeyStore() { LOGGER.log(FINE, "Constructing KeyVaultKeyStore."); creationDate = new Date(); String keyVaultUri = System.getProperty("azure.keyvault.uri"); String tenantId = System.getProperty("azure.keyvault.tenant-id"); String clientId = System.getProperty("azure.keyvault.client-id"); String clientSecret = System.getProperty("azure.keyvault.client-secret"); String managedIdentity = System.getProperty("azure.keyvault.managed-identity"); long refreshInterval = getRefreshInterval(); refreshCertificatesWhenHaveUnTrustCertificate = Optional.of("azure.keyvault.jca.refresh-certificates-when-have-un-trust-certificate") .map(System::getProperty) .map(Boolean::parseBoolean) .orElse(false); jreCertificates = JreCertificates.getInstance(); LOGGER.log(FINE, String.format("Loaded jre certificates: %s.", jreCertificates.getAliases())); wellKnowCertificates = SpecificPathCertificates.getSpecificPathCertificates(wellKnowPath); LOGGER.log(FINE, String.format("Loaded well known certificates: %s.", wellKnowCertificates.getAliases())); customCertificates = SpecificPathCertificates.getSpecificPathCertificates(customPath); LOGGER.log(FINE, String.format("Loaded custom certificates: %s.", customCertificates.getAliases())); keyVaultCertificates = new KeyVaultCertificates( refreshInterval, keyVaultUri, tenantId, clientId, clientSecret, managedIdentity); LOGGER.log(FINE, String.format("Loaded Key Vault certificates: %s.", keyVaultCertificates.getAliases())); classpathCertificates = new ClasspathCertificates(); LOGGER.log(FINE, String.format("Loaded classpath certificates: %s.", classpathCertificates.getAliases())); allCertificates = Arrays.asList( jreCertificates, wellKnowCertificates, customCertificates, keyVaultCertificates, classpathCertificates); }
class KeyVaultKeyStore extends KeyStoreSpi { /** * Stores the key-store name. */ public static final String KEY_STORE_TYPE = "AzureKeyVault"; /** * Stores the algorithm name. */ public static final String ALGORITHM_NAME = KEY_STORE_TYPE; /** * Stores the logger. */ private static final Logger LOGGER = Logger.getLogger(KeyVaultKeyStore.class.getName()); /** * Stores the Jre key store certificates. */ private final JreCertificates jreCertificates; /** * Store well Know certificates loaded from specific path. */ private final SpecificPathCertificates wellKnowCertificates; /** * Store custom certificates loaded from specific path. */ private final SpecificPathCertificates customCertificates; /** * Store certificates loaded from KeyVault. */ private final KeyVaultCertificates keyVaultCertificates; /** * Store certificates loaded from classpath. */ private final ClasspathCertificates classpathCertificates; /** * Stores all the certificates. */ private final List<AzureCertificates> allCertificates; /** * Stores the creation date. */ private final Date creationDate; private final boolean refreshCertificatesWhenHaveUnTrustCertificate; /** * Store the path where the well know certificate is placed */ final String wellKnowPath = Optional.ofNullable(System.getProperty("azure.cert-path.well-known")) .orElse("/etc/certs/well-known/"); /** * Store the path where the custom certificate is placed */ final String customPath = Optional.ofNullable(System.getProperty("azure.cert-path.custom")) .orElse("/etc/certs/custom/"); /** * Constructor. * * <p> * The constructor uses System.getProperty for * <code>azure.keyvault.uri</code>, * <code>azure.keyvault.aadAuthenticationUrl</code>, * <code>azure.keyvault.tenantId</code>, * <code>azure.keyvault.clientId</code>, * <code>azure.keyvault.clientSecret</code> and * <code>azure.keyvault.managedIdentity</code> to initialize the * Key Vault client. * </p> */ Long getRefreshInterval() { return Stream.of("azure.keyvault.jca.certificates-refresh-interval-in-ms", "azure.keyvault.jca.certificates-refresh-interval") .map(System::getProperty) .filter(Objects::nonNull) .map(Long::valueOf) .findFirst() .orElse(0L); } /** * get key vault key store by system property * * @return KeyVault key store * @throws CertificateException if any of the certificates in the * keystore could not be loaded * @throws NoSuchAlgorithmException when algorithm is unavailable. * @throws KeyStoreException when no Provider supports a KeyStoreSpi implementation for the specified type * @throws IOException when an I/O error occurs. */ public static KeyStore getKeyVaultKeyStoreBySystemProperty() throws CertificateException, NoSuchAlgorithmException, KeyStoreException, IOException { KeyStore keyStore = KeyStore.getInstance(KeyVaultJcaProvider.PROVIDER_NAME); KeyVaultLoadStoreParameter parameter = new KeyVaultLoadStoreParameter( System.getProperty("azure.keyvault.uri"), System.getProperty("azure.keyvault.tenant-id"), System.getProperty("azure.keyvault.client-id"), System.getProperty("azure.keyvault.client-secret"), System.getProperty("azure.keyvault.managed-identity")); keyStore.load(parameter); return keyStore; } @Override public Enumeration<String> engineAliases() { return Collections.enumeration(getAllAliases()); } @Override public boolean engineContainsAlias(String alias) { return engineIsCertificateEntry(alias); } @Override public void engineDeleteEntry(String alias) { allCertificates.forEach(a -> a.deleteEntry(alias)); } @Override public boolean engineEntryInstanceOf(String alias, Class<? extends KeyStore.Entry> entryClass) { return super.engineEntryInstanceOf(alias, entryClass); } @Override public Certificate engineGetCertificate(String alias) { Certificate certificate = allCertificates.stream() .map(AzureCertificates::getCertificates) .filter(a -> a.containsKey(alias)) .findFirst() .map(certificates -> certificates.get(alias)) .orElse(null); if (refreshCertificatesWhenHaveUnTrustCertificate && certificate == null) { keyVaultCertificates.refreshCertificates(); certificate = keyVaultCertificates.getCertificates().get(alias); } return certificate; } @Override public String engineGetCertificateAlias(Certificate cert) { String alias = null; if (cert != null) { List<String> aliasList = getAllAliases(); for (String candidateAlias : aliasList) { Certificate certificate = engineGetCertificate(candidateAlias); if (certificate.equals(cert)) { alias = candidateAlias; break; } } } if (refreshCertificatesWhenHaveUnTrustCertificate && alias == null) { alias = keyVaultCertificates.refreshAndGetAliasByCertificate(cert); } return alias; } @Override public Certificate[] engineGetCertificateChain(String alias) { Certificate[] chain = null; Certificate certificate = engineGetCertificate(alias); if (certificate != null) { chain = new Certificate[1]; chain[0] = certificate; } return chain; } @Override public Date engineGetCreationDate(String alias) { return new Date(creationDate.getTime()); } @Override public KeyStore.Entry engineGetEntry(String alias, KeyStore.ProtectionParameter protParam) throws KeyStoreException, NoSuchAlgorithmException, UnrecoverableEntryException { return super.engineGetEntry(alias, protParam); } @Override public Key engineGetKey(String alias, char[] password) { return allCertificates.stream() .map(AzureCertificates::getCertificateKeys) .filter(a -> a.containsKey(alias)) .findFirst() .map(certificateKeys -> certificateKeys.get(alias)) .orElse(null); } @Override public boolean engineIsCertificateEntry(String alias) { return getAllAliases().contains(alias); } @Override public boolean engineIsKeyEntry(String alias) { return engineIsCertificateEntry(alias); } @Override public void engineLoad(KeyStore.LoadStoreParameter param) { if (param instanceof KeyVaultLoadStoreParameter) { KeyVaultLoadStoreParameter parameter = (KeyVaultLoadStoreParameter) param; keyVaultCertificates.updateKeyVaultClient(parameter.getUri(), parameter.getTenantId(), parameter.getClientId(), parameter.getClientSecret(), parameter.getManagedIdentity()); } classpathCertificates.loadCertificatesFromClasspath(); } @Override public void engineLoad(InputStream stream, char[] password) { classpathCertificates.loadCertificatesFromClasspath(); } private List<String> getAllAliases() { List<String> allAliases = new ArrayList<>(jreCertificates.getAliases()); Map<String, List<String>> aliasLists = new HashMap<>(); aliasLists.put("well known certificates", wellKnowCertificates.getAliases()); aliasLists.put("custom certificates", customCertificates.getAliases()); aliasLists.put("key vault certificates", keyVaultCertificates.getAliases()); aliasLists.put("class path certificates", classpathCertificates.getAliases()); aliasLists.forEach((certificatesType, certificates) -> certificates.forEach(alias -> { if (allAliases.contains(alias)) { LOGGER.log(FINE, String.format("The certificate %s under %s already exists", alias, certificatesType)); } else { allAliases.add(alias); } })); return allAliases; } @Override public void engineSetCertificateEntry(String alias, Certificate certificate) { if (getAllAliases().contains(alias)) { LOGGER.log(WARNING, "Cannot overwrite own certificate"); return; } classpathCertificates.setCertificateEntry(alias, certificate); } @Override public void engineSetEntry(String alias, KeyStore.Entry entry, KeyStore.ProtectionParameter protParam) throws KeyStoreException { super.engineSetEntry(alias, entry, protParam); } @Override public void engineSetKeyEntry(String alias, Key key, char[] password, Certificate[] chain) { } @Override public void engineSetKeyEntry(String alias, byte[] key, Certificate[] chain) { } @Override public int engineSize() { return getAllAliases().size(); } @Override public void engineStore(OutputStream stream, char[] password) { } @Override public void engineStore(KeyStore.LoadStoreParameter param) { } }
class KeyVaultKeyStore extends KeyStoreSpi { /** * Stores the key-store name. */ public static final String KEY_STORE_TYPE = "AzureKeyVault"; /** * Stores the algorithm name. */ public static final String ALGORITHM_NAME = KEY_STORE_TYPE; /** * Stores the logger. */ private static final Logger LOGGER = Logger.getLogger(KeyVaultKeyStore.class.getName()); /** * Stores the Jre key store certificates. */ private final JreCertificates jreCertificates; /** * Store well Know certificates loaded from specific path. */ private final SpecificPathCertificates wellKnowCertificates; /** * Store custom certificates loaded from specific path. */ private final SpecificPathCertificates customCertificates; /** * Store certificates loaded from KeyVault. */ private final KeyVaultCertificates keyVaultCertificates; /** * Store certificates loaded from classpath. */ private final ClasspathCertificates classpathCertificates; /** * Stores all the certificates. */ private final List<AzureCertificates> allCertificates; /** * Stores the creation date. */ private final Date creationDate; private final boolean refreshCertificatesWhenHaveUnTrustCertificate; /** * Store the path where the well know certificate is placed */ final String wellKnowPath = Optional.ofNullable(System.getProperty("azure.cert-path.well-known")) .orElse("/etc/certs/well-known/"); /** * Store the path where the custom certificate is placed */ final String customPath = Optional.ofNullable(System.getProperty("azure.cert-path.custom")) .orElse("/etc/certs/custom/"); /** * Constructor. * * <p> * The constructor uses System.getProperty for * <code>azure.keyvault.uri</code>, * <code>azure.keyvault.aadAuthenticationUrl</code>, * <code>azure.keyvault.tenantId</code>, * <code>azure.keyvault.clientId</code>, * <code>azure.keyvault.clientSecret</code> and * <code>azure.keyvault.managedIdentity</code> to initialize the * Key Vault client. * </p> */ Long getRefreshInterval() { return Stream.of("azure.keyvault.jca.certificates-refresh-interval-in-ms", "azure.keyvault.jca.certificates-refresh-interval") .map(System::getProperty) .filter(Objects::nonNull) .map(Long::valueOf) .findFirst() .orElse(0L); } /** * get key vault key store by system property * * @return KeyVault key store * @throws CertificateException if any of the certificates in the * keystore could not be loaded * @throws NoSuchAlgorithmException when algorithm is unavailable. * @throws KeyStoreException when no Provider supports a KeyStoreSpi implementation for the specified type * @throws IOException when an I/O error occurs. */ public static KeyStore getKeyVaultKeyStoreBySystemProperty() throws CertificateException, NoSuchAlgorithmException, KeyStoreException, IOException { KeyStore keyStore = KeyStore.getInstance(KeyVaultJcaProvider.PROVIDER_NAME); KeyVaultLoadStoreParameter parameter = new KeyVaultLoadStoreParameter( System.getProperty("azure.keyvault.uri"), System.getProperty("azure.keyvault.tenant-id"), System.getProperty("azure.keyvault.client-id"), System.getProperty("azure.keyvault.client-secret"), System.getProperty("azure.keyvault.managed-identity")); keyStore.load(parameter); return keyStore; } @Override public Enumeration<String> engineAliases() { return Collections.enumeration(getAllAliases()); } @Override public boolean engineContainsAlias(String alias) { return engineIsCertificateEntry(alias); } @Override public void engineDeleteEntry(String alias) { allCertificates.forEach(a -> a.deleteEntry(alias)); } @Override public boolean engineEntryInstanceOf(String alias, Class<? extends KeyStore.Entry> entryClass) { return super.engineEntryInstanceOf(alias, entryClass); } @Override public Certificate engineGetCertificate(String alias) { Certificate certificate = allCertificates.stream() .map(AzureCertificates::getCertificates) .filter(a -> a.containsKey(alias)) .findFirst() .map(certificates -> certificates.get(alias)) .orElse(null); if (refreshCertificatesWhenHaveUnTrustCertificate && certificate == null) { keyVaultCertificates.refreshCertificates(); certificate = keyVaultCertificates.getCertificates().get(alias); } return certificate; } @Override public String engineGetCertificateAlias(Certificate cert) { String alias = null; if (cert != null) { List<String> aliasList = getAllAliases(); for (String candidateAlias : aliasList) { Certificate certificate = engineGetCertificate(candidateAlias); if (certificate.equals(cert)) { alias = candidateAlias; break; } } } if (refreshCertificatesWhenHaveUnTrustCertificate && alias == null) { alias = keyVaultCertificates.refreshAndGetAliasByCertificate(cert); } return alias; } @Override public Certificate[] engineGetCertificateChain(String alias) { Certificate[] chain = null; Certificate certificate = engineGetCertificate(alias); if (certificate != null) { chain = new Certificate[1]; chain[0] = certificate; } return chain; } @Override public Date engineGetCreationDate(String alias) { return new Date(creationDate.getTime()); } @Override public KeyStore.Entry engineGetEntry(String alias, KeyStore.ProtectionParameter protParam) throws KeyStoreException, NoSuchAlgorithmException, UnrecoverableEntryException { return super.engineGetEntry(alias, protParam); } @Override public Key engineGetKey(String alias, char[] password) { return allCertificates.stream() .map(AzureCertificates::getCertificateKeys) .filter(a -> a.containsKey(alias)) .findFirst() .map(certificateKeys -> certificateKeys.get(alias)) .orElse(null); } @Override public boolean engineIsCertificateEntry(String alias) { return getAllAliases().contains(alias); } @Override public boolean engineIsKeyEntry(String alias) { return engineIsCertificateEntry(alias); } @Override public void engineLoad(KeyStore.LoadStoreParameter param) { if (param instanceof KeyVaultLoadStoreParameter) { KeyVaultLoadStoreParameter parameter = (KeyVaultLoadStoreParameter) param; keyVaultCertificates.updateKeyVaultClient(parameter.getUri(), parameter.getTenantId(), parameter.getClientId(), parameter.getClientSecret(), parameter.getManagedIdentity()); } classpathCertificates.loadCertificatesFromClasspath(); } @Override public void engineLoad(InputStream stream, char[] password) { classpathCertificates.loadCertificatesFromClasspath(); } private List<String> getAllAliases() { List<String> allAliases = new ArrayList<>(jreCertificates.getAliases()); Map<String, List<String>> aliasLists = new HashMap<>(); aliasLists.put("well known certificates", wellKnowCertificates.getAliases()); aliasLists.put("custom certificates", customCertificates.getAliases()); aliasLists.put("key vault certificates", keyVaultCertificates.getAliases()); aliasLists.put("class path certificates", classpathCertificates.getAliases()); aliasLists.forEach((certificatesType, certificates) -> certificates.forEach(alias -> { if (allAliases.contains(alias)) { LOGGER.log(FINE, String.format("The certificate %s under %s already exists", alias, certificatesType)); } else { allAliases.add(alias); } })); return allAliases; } @Override public void engineSetCertificateEntry(String alias, Certificate certificate) { if (getAllAliases().contains(alias)) { LOGGER.log(WARNING, "Cannot overwrite own certificate"); return; } classpathCertificates.setCertificateEntry(alias, certificate); } @Override public void engineSetEntry(String alias, KeyStore.Entry entry, KeyStore.ProtectionParameter protParam) throws KeyStoreException { super.engineSetEntry(alias, entry, protParam); } @Override public void engineSetKeyEntry(String alias, Key key, char[] password, Certificate[] chain) { } @Override public void engineSetKeyEntry(String alias, byte[] key, Certificate[] chain) { } @Override public int engineSize() { return getAllAliases().size(); } @Override public void engineStore(OutputStream stream, char[] password) { } @Override public void engineStore(KeyStore.LoadStoreParameter param) { } }
wellKnow**n**Certificates
public KeyVaultKeyStore() { LOGGER.log(FINE, "Constructing KeyVaultKeyStore."); creationDate = new Date(); String keyVaultUri = System.getProperty("azure.keyvault.uri"); String tenantId = System.getProperty("azure.keyvault.tenant-id"); String clientId = System.getProperty("azure.keyvault.client-id"); String clientSecret = System.getProperty("azure.keyvault.client-secret"); String managedIdentity = System.getProperty("azure.keyvault.managed-identity"); long refreshInterval = getRefreshInterval(); refreshCertificatesWhenHaveUnTrustCertificate = Optional.of("azure.keyvault.jca.refresh-certificates-when-have-un-trust-certificate") .map(System::getProperty) .map(Boolean::parseBoolean) .orElse(false); jreCertificates = JreCertificates.getInstance(); LOGGER.log(FINE, String.format("Loaded jre certificates: %s.", jreCertificates.getAliases())); wellKnowCertificates = SpecificPathCertificates.getSpecificPathCertificates(wellKnowPath); LOGGER.log(FINE, String.format("Loaded well known certificates: %s.", wellKnowCertificates.getAliases())); customCertificates = SpecificPathCertificates.getSpecificPathCertificates(customPath); LOGGER.log(FINE, String.format("Loaded custom certificates: %s.", customCertificates.getAliases())); keyVaultCertificates = new KeyVaultCertificates( refreshInterval, keyVaultUri, tenantId, clientId, clientSecret, managedIdentity); LOGGER.log(FINE, String.format("Loaded Key Vault certificates: %s.", keyVaultCertificates.getAliases())); classpathCertificates = new ClasspathCertificates(); LOGGER.log(FINE, String.format("Loaded classpath certificates: %s.", classpathCertificates.getAliases())); allCertificates = Arrays.asList( jreCertificates, wellKnowCertificates, customCertificates, keyVaultCertificates, classpathCertificates); }
LOGGER.log(FINE, String.format("Loaded well known certificates: %s.", wellKnowCertificates.getAliases()));
public KeyVaultKeyStore() { LOGGER.log(FINE, "Constructing KeyVaultKeyStore."); creationDate = new Date(); String keyVaultUri = System.getProperty("azure.keyvault.uri"); String tenantId = System.getProperty("azure.keyvault.tenant-id"); String clientId = System.getProperty("azure.keyvault.client-id"); String clientSecret = System.getProperty("azure.keyvault.client-secret"); String managedIdentity = System.getProperty("azure.keyvault.managed-identity"); long refreshInterval = getRefreshInterval(); refreshCertificatesWhenHaveUnTrustCertificate = Optional.of("azure.keyvault.jca.refresh-certificates-when-have-un-trust-certificate") .map(System::getProperty) .map(Boolean::parseBoolean) .orElse(false); jreCertificates = JreCertificates.getInstance(); LOGGER.log(FINE, String.format("Loaded jre certificates: %s.", jreCertificates.getAliases())); wellKnowCertificates = SpecificPathCertificates.getSpecificPathCertificates(wellKnowPath); LOGGER.log(FINE, String.format("Loaded well known certificates: %s.", wellKnowCertificates.getAliases())); customCertificates = SpecificPathCertificates.getSpecificPathCertificates(customPath); LOGGER.log(FINE, String.format("Loaded custom certificates: %s.", customCertificates.getAliases())); keyVaultCertificates = new KeyVaultCertificates( refreshInterval, keyVaultUri, tenantId, clientId, clientSecret, managedIdentity); LOGGER.log(FINE, String.format("Loaded Key Vault certificates: %s.", keyVaultCertificates.getAliases())); classpathCertificates = new ClasspathCertificates(); LOGGER.log(FINE, String.format("Loaded classpath certificates: %s.", classpathCertificates.getAliases())); allCertificates = Arrays.asList( jreCertificates, wellKnowCertificates, customCertificates, keyVaultCertificates, classpathCertificates); }
class KeyVaultKeyStore extends KeyStoreSpi { /** * Stores the key-store name. */ public static final String KEY_STORE_TYPE = "AzureKeyVault"; /** * Stores the algorithm name. */ public static final String ALGORITHM_NAME = KEY_STORE_TYPE; /** * Stores the logger. */ private static final Logger LOGGER = Logger.getLogger(KeyVaultKeyStore.class.getName()); /** * Stores the Jre key store certificates. */ private final JreCertificates jreCertificates; /** * Store well Know certificates loaded from specific path. */ private final SpecificPathCertificates wellKnowCertificates; /** * Store custom certificates loaded from specific path. */ private final SpecificPathCertificates customCertificates; /** * Store certificates loaded from KeyVault. */ private final KeyVaultCertificates keyVaultCertificates; /** * Store certificates loaded from classpath. */ private final ClasspathCertificates classpathCertificates; /** * Stores all the certificates. */ private final List<AzureCertificates> allCertificates; /** * Stores the creation date. */ private final Date creationDate; private final boolean refreshCertificatesWhenHaveUnTrustCertificate; /** * Store the path where the well know certificate is placed */ final String wellKnowPath = Optional.ofNullable(System.getProperty("azure.cert-path.well-known")) .orElse("/etc/certs/well-known/"); /** * Store the path where the custom certificate is placed */ final String customPath = Optional.ofNullable(System.getProperty("azure.cert-path.custom")) .orElse("/etc/certs/custom/"); /** * Constructor. * * <p> * The constructor uses System.getProperty for * <code>azure.keyvault.uri</code>, * <code>azure.keyvault.aadAuthenticationUrl</code>, * <code>azure.keyvault.tenantId</code>, * <code>azure.keyvault.clientId</code>, * <code>azure.keyvault.clientSecret</code> and * <code>azure.keyvault.managedIdentity</code> to initialize the * Key Vault client. * </p> */ Long getRefreshInterval() { return Stream.of("azure.keyvault.jca.certificates-refresh-interval-in-ms", "azure.keyvault.jca.certificates-refresh-interval") .map(System::getProperty) .filter(Objects::nonNull) .map(Long::valueOf) .findFirst() .orElse(0L); } /** * get key vault key store by system property * * @return KeyVault key store * @throws CertificateException if any of the certificates in the * keystore could not be loaded * @throws NoSuchAlgorithmException when algorithm is unavailable. * @throws KeyStoreException when no Provider supports a KeyStoreSpi implementation for the specified type * @throws IOException when an I/O error occurs. */ public static KeyStore getKeyVaultKeyStoreBySystemProperty() throws CertificateException, NoSuchAlgorithmException, KeyStoreException, IOException { KeyStore keyStore = KeyStore.getInstance(KeyVaultJcaProvider.PROVIDER_NAME); KeyVaultLoadStoreParameter parameter = new KeyVaultLoadStoreParameter( System.getProperty("azure.keyvault.uri"), System.getProperty("azure.keyvault.tenant-id"), System.getProperty("azure.keyvault.client-id"), System.getProperty("azure.keyvault.client-secret"), System.getProperty("azure.keyvault.managed-identity")); keyStore.load(parameter); return keyStore; } @Override public Enumeration<String> engineAliases() { return Collections.enumeration(getAllAliases()); } @Override public boolean engineContainsAlias(String alias) { return engineIsCertificateEntry(alias); } @Override public void engineDeleteEntry(String alias) { allCertificates.forEach(a -> a.deleteEntry(alias)); } @Override public boolean engineEntryInstanceOf(String alias, Class<? extends KeyStore.Entry> entryClass) { return super.engineEntryInstanceOf(alias, entryClass); } @Override public Certificate engineGetCertificate(String alias) { Certificate certificate = allCertificates.stream() .map(AzureCertificates::getCertificates) .filter(a -> a.containsKey(alias)) .findFirst() .map(certificates -> certificates.get(alias)) .orElse(null); if (refreshCertificatesWhenHaveUnTrustCertificate && certificate == null) { keyVaultCertificates.refreshCertificates(); certificate = keyVaultCertificates.getCertificates().get(alias); } return certificate; } @Override public String engineGetCertificateAlias(Certificate cert) { String alias = null; if (cert != null) { List<String> aliasList = getAllAliases(); for (String candidateAlias : aliasList) { Certificate certificate = engineGetCertificate(candidateAlias); if (certificate.equals(cert)) { alias = candidateAlias; break; } } } if (refreshCertificatesWhenHaveUnTrustCertificate && alias == null) { alias = keyVaultCertificates.refreshAndGetAliasByCertificate(cert); } return alias; } @Override public Certificate[] engineGetCertificateChain(String alias) { Certificate[] chain = null; Certificate certificate = engineGetCertificate(alias); if (certificate != null) { chain = new Certificate[1]; chain[0] = certificate; } return chain; } @Override public Date engineGetCreationDate(String alias) { return new Date(creationDate.getTime()); } @Override public KeyStore.Entry engineGetEntry(String alias, KeyStore.ProtectionParameter protParam) throws KeyStoreException, NoSuchAlgorithmException, UnrecoverableEntryException { return super.engineGetEntry(alias, protParam); } @Override public Key engineGetKey(String alias, char[] password) { return allCertificates.stream() .map(AzureCertificates::getCertificateKeys) .filter(a -> a.containsKey(alias)) .findFirst() .map(certificateKeys -> certificateKeys.get(alias)) .orElse(null); } @Override public boolean engineIsCertificateEntry(String alias) { return getAllAliases().contains(alias); } @Override public boolean engineIsKeyEntry(String alias) { return engineIsCertificateEntry(alias); } @Override public void engineLoad(KeyStore.LoadStoreParameter param) { if (param instanceof KeyVaultLoadStoreParameter) { KeyVaultLoadStoreParameter parameter = (KeyVaultLoadStoreParameter) param; keyVaultCertificates.updateKeyVaultClient(parameter.getUri(), parameter.getTenantId(), parameter.getClientId(), parameter.getClientSecret(), parameter.getManagedIdentity()); } classpathCertificates.loadCertificatesFromClasspath(); } @Override public void engineLoad(InputStream stream, char[] password) { classpathCertificates.loadCertificatesFromClasspath(); } private List<String> getAllAliases() { List<String> allAliases = new ArrayList<>(jreCertificates.getAliases()); Map<String, List<String>> aliasLists = new HashMap<>(); aliasLists.put("well known certificates", wellKnowCertificates.getAliases()); aliasLists.put("custom certificates", customCertificates.getAliases()); aliasLists.put("key vault certificates", keyVaultCertificates.getAliases()); aliasLists.put("class path certificates", classpathCertificates.getAliases()); aliasLists.forEach((certificatesType, certificates) -> certificates.forEach(alias -> { if (allAliases.contains(alias)) { LOGGER.log(FINE, String.format("The certificate %s under %s already exists", alias, certificatesType)); } else { allAliases.add(alias); } })); return allAliases; } @Override public void engineSetCertificateEntry(String alias, Certificate certificate) { if (getAllAliases().contains(alias)) { LOGGER.log(WARNING, "Cannot overwrite own certificate"); return; } classpathCertificates.setCertificateEntry(alias, certificate); } @Override public void engineSetEntry(String alias, KeyStore.Entry entry, KeyStore.ProtectionParameter protParam) throws KeyStoreException { super.engineSetEntry(alias, entry, protParam); } @Override public void engineSetKeyEntry(String alias, Key key, char[] password, Certificate[] chain) { } @Override public void engineSetKeyEntry(String alias, byte[] key, Certificate[] chain) { } @Override public int engineSize() { return getAllAliases().size(); } @Override public void engineStore(OutputStream stream, char[] password) { } @Override public void engineStore(KeyStore.LoadStoreParameter param) { } }
class KeyVaultKeyStore extends KeyStoreSpi { /** * Stores the key-store name. */ public static final String KEY_STORE_TYPE = "AzureKeyVault"; /** * Stores the algorithm name. */ public static final String ALGORITHM_NAME = KEY_STORE_TYPE; /** * Stores the logger. */ private static final Logger LOGGER = Logger.getLogger(KeyVaultKeyStore.class.getName()); /** * Stores the Jre key store certificates. */ private final JreCertificates jreCertificates; /** * Store well Know certificates loaded from specific path. */ private final SpecificPathCertificates wellKnowCertificates; /** * Store custom certificates loaded from specific path. */ private final SpecificPathCertificates customCertificates; /** * Store certificates loaded from KeyVault. */ private final KeyVaultCertificates keyVaultCertificates; /** * Store certificates loaded from classpath. */ private final ClasspathCertificates classpathCertificates; /** * Stores all the certificates. */ private final List<AzureCertificates> allCertificates; /** * Stores the creation date. */ private final Date creationDate; private final boolean refreshCertificatesWhenHaveUnTrustCertificate; /** * Store the path where the well know certificate is placed */ final String wellKnowPath = Optional.ofNullable(System.getProperty("azure.cert-path.well-known")) .orElse("/etc/certs/well-known/"); /** * Store the path where the custom certificate is placed */ final String customPath = Optional.ofNullable(System.getProperty("azure.cert-path.custom")) .orElse("/etc/certs/custom/"); /** * Constructor. * * <p> * The constructor uses System.getProperty for * <code>azure.keyvault.uri</code>, * <code>azure.keyvault.aadAuthenticationUrl</code>, * <code>azure.keyvault.tenantId</code>, * <code>azure.keyvault.clientId</code>, * <code>azure.keyvault.clientSecret</code> and * <code>azure.keyvault.managedIdentity</code> to initialize the * Key Vault client. * </p> */ Long getRefreshInterval() { return Stream.of("azure.keyvault.jca.certificates-refresh-interval-in-ms", "azure.keyvault.jca.certificates-refresh-interval") .map(System::getProperty) .filter(Objects::nonNull) .map(Long::valueOf) .findFirst() .orElse(0L); } /** * get key vault key store by system property * * @return KeyVault key store * @throws CertificateException if any of the certificates in the * keystore could not be loaded * @throws NoSuchAlgorithmException when algorithm is unavailable. * @throws KeyStoreException when no Provider supports a KeyStoreSpi implementation for the specified type * @throws IOException when an I/O error occurs. */ public static KeyStore getKeyVaultKeyStoreBySystemProperty() throws CertificateException, NoSuchAlgorithmException, KeyStoreException, IOException { KeyStore keyStore = KeyStore.getInstance(KeyVaultJcaProvider.PROVIDER_NAME); KeyVaultLoadStoreParameter parameter = new KeyVaultLoadStoreParameter( System.getProperty("azure.keyvault.uri"), System.getProperty("azure.keyvault.tenant-id"), System.getProperty("azure.keyvault.client-id"), System.getProperty("azure.keyvault.client-secret"), System.getProperty("azure.keyvault.managed-identity")); keyStore.load(parameter); return keyStore; } @Override public Enumeration<String> engineAliases() { return Collections.enumeration(getAllAliases()); } @Override public boolean engineContainsAlias(String alias) { return engineIsCertificateEntry(alias); } @Override public void engineDeleteEntry(String alias) { allCertificates.forEach(a -> a.deleteEntry(alias)); } @Override public boolean engineEntryInstanceOf(String alias, Class<? extends KeyStore.Entry> entryClass) { return super.engineEntryInstanceOf(alias, entryClass); } @Override public Certificate engineGetCertificate(String alias) { Certificate certificate = allCertificates.stream() .map(AzureCertificates::getCertificates) .filter(a -> a.containsKey(alias)) .findFirst() .map(certificates -> certificates.get(alias)) .orElse(null); if (refreshCertificatesWhenHaveUnTrustCertificate && certificate == null) { keyVaultCertificates.refreshCertificates(); certificate = keyVaultCertificates.getCertificates().get(alias); } return certificate; } @Override public String engineGetCertificateAlias(Certificate cert) { String alias = null; if (cert != null) { List<String> aliasList = getAllAliases(); for (String candidateAlias : aliasList) { Certificate certificate = engineGetCertificate(candidateAlias); if (certificate.equals(cert)) { alias = candidateAlias; break; } } } if (refreshCertificatesWhenHaveUnTrustCertificate && alias == null) { alias = keyVaultCertificates.refreshAndGetAliasByCertificate(cert); } return alias; } @Override public Certificate[] engineGetCertificateChain(String alias) { Certificate[] chain = null; Certificate certificate = engineGetCertificate(alias); if (certificate != null) { chain = new Certificate[1]; chain[0] = certificate; } return chain; } @Override public Date engineGetCreationDate(String alias) { return new Date(creationDate.getTime()); } @Override public KeyStore.Entry engineGetEntry(String alias, KeyStore.ProtectionParameter protParam) throws KeyStoreException, NoSuchAlgorithmException, UnrecoverableEntryException { return super.engineGetEntry(alias, protParam); } @Override public Key engineGetKey(String alias, char[] password) { return allCertificates.stream() .map(AzureCertificates::getCertificateKeys) .filter(a -> a.containsKey(alias)) .findFirst() .map(certificateKeys -> certificateKeys.get(alias)) .orElse(null); } @Override public boolean engineIsCertificateEntry(String alias) { return getAllAliases().contains(alias); } @Override public boolean engineIsKeyEntry(String alias) { return engineIsCertificateEntry(alias); } @Override public void engineLoad(KeyStore.LoadStoreParameter param) { if (param instanceof KeyVaultLoadStoreParameter) { KeyVaultLoadStoreParameter parameter = (KeyVaultLoadStoreParameter) param; keyVaultCertificates.updateKeyVaultClient(parameter.getUri(), parameter.getTenantId(), parameter.getClientId(), parameter.getClientSecret(), parameter.getManagedIdentity()); } classpathCertificates.loadCertificatesFromClasspath(); } @Override public void engineLoad(InputStream stream, char[] password) { classpathCertificates.loadCertificatesFromClasspath(); } private List<String> getAllAliases() { List<String> allAliases = new ArrayList<>(jreCertificates.getAliases()); Map<String, List<String>> aliasLists = new HashMap<>(); aliasLists.put("well known certificates", wellKnowCertificates.getAliases()); aliasLists.put("custom certificates", customCertificates.getAliases()); aliasLists.put("key vault certificates", keyVaultCertificates.getAliases()); aliasLists.put("class path certificates", classpathCertificates.getAliases()); aliasLists.forEach((certificatesType, certificates) -> certificates.forEach(alias -> { if (allAliases.contains(alias)) { LOGGER.log(FINE, String.format("The certificate %s under %s already exists", alias, certificatesType)); } else { allAliases.add(alias); } })); return allAliases; } @Override public void engineSetCertificateEntry(String alias, Certificate certificate) { if (getAllAliases().contains(alias)) { LOGGER.log(WARNING, "Cannot overwrite own certificate"); return; } classpathCertificates.setCertificateEntry(alias, certificate); } @Override public void engineSetEntry(String alias, KeyStore.Entry entry, KeyStore.ProtectionParameter protParam) throws KeyStoreException { super.engineSetEntry(alias, entry, protParam); } @Override public void engineSetKeyEntry(String alias, Key key, char[] password, Certificate[] chain) { } @Override public void engineSetKeyEntry(String alias, byte[] key, Certificate[] chain) { } @Override public int engineSize() { return getAllAliases().size(); } @Override public void engineStore(OutputStream stream, char[] password) { } @Override public void engineStore(KeyStore.LoadStoreParameter param) { } }
Changed.
public void canCreateVirtualMachineWithEphemeralOSDisk() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.STANDARD_D8S_V3) .withEphemeralOSDisk() .withPlacement(DiffDiskPlacement.CACHE_DISK) .withNewDataDisk(1, 1, CachingTypes.READ_WRITE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .create(); Assertions.assertNull(vm.osDiskDiskEncryptionSetId()); Assertions.assertTrue(vm.osDiskSize() > 0); Assertions.assertEquals(vm.osDiskDeleteOptions(), DeleteOptions.DELETE); Assertions.assertEquals(vm.osDiskCachingType(), CachingTypes.READ_ONLY); Assertions.assertFalse(CoreUtils.isNullOrEmpty(vm.dataDisks())); Assertions.assertTrue(vm.osDiskIsEphemeral()); Assertions.assertNotNull(vm.osDiskId()); String osDiskId = vm.osDiskId(); vm.update() .withoutDataDisk(1) .withNewDataDisk(1, 2, CachingTypes.NONE) .withNewDataDisk(1) .apply(); Assertions.assertEquals(vm.dataDisks().size(), 2); vm.powerOff(); vm.start(); vm.refresh(); Assertions.assertEquals(osDiskId, vm.osDiskId()); Assertions.assertThrows(Exception.class, vm::deallocate); }
.withSize(VirtualMachineSizeTypes.STANDARD_D8S_V3)
public void canCreateVirtualMachineWithEphemeralOSDisk() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.STANDARD_DS1_V2) .withEphemeralOSDisk() .withPlacement(DiffDiskPlacement.CACHE_DISK) .withNewDataDisk(1, 1, CachingTypes.READ_WRITE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .create(); Assertions.assertNull(vm.osDiskDiskEncryptionSetId()); Assertions.assertTrue(vm.osDiskSize() > 0); Assertions.assertEquals(vm.osDiskDeleteOptions(), DeleteOptions.DELETE); Assertions.assertEquals(vm.osDiskCachingType(), CachingTypes.READ_ONLY); Assertions.assertFalse(CoreUtils.isNullOrEmpty(vm.dataDisks())); Assertions.assertTrue(vm.isOSDiskEphemeral()); Assertions.assertNotNull(vm.osDiskId()); String osDiskId = vm.osDiskId(); vm.update() .withoutDataDisk(1) .withNewDataDisk(1, 2, CachingTypes.NONE) .withNewDataDisk(1) .apply(); Assertions.assertEquals(vm.dataDisks().size(), 2); vm.powerOff(); vm.start(); vm.refresh(); Assertions.assertEquals(osDiskId, vm.osDiskId()); Assertions.assertThrows(Exception.class, vm::deallocate); }
class VirtualMachineOperationsTests extends ComputeManagementTest { private String rgName = ""; private String rgName2 = ""; private final Region region = Region.US_EAST; private final Region regionProxPlacementGroup = Region.US_WEST; private final Region regionProxPlacementGroup2 = Region.US_EAST; private final String vmName = "javavm"; private final String proxGroupName = "testproxgroup1"; private final String proxGroupName2 = "testproxgroup2"; private final String availabilitySetName = "availset1"; private final String availabilitySetName2 = "availset2"; private final ProximityPlacementGroupType proxGroupType = ProximityPlacementGroupType.STANDARD; @Override protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) { rgName = generateRandomResourceName("javacsmrg", 15); rgName2 = generateRandomResourceName("javacsmrg2", 15); super.initializeClients(httpPipeline, profile); } @Override protected void cleanUpResources() { if (rgName != null) { resourceManager.resourceGroups().beginDeleteByName(rgName); } } @Test public void canCreateVirtualMachineWithNetworking() throws Exception { NetworkSecurityGroup nsg = this .networkManager .networkSecurityGroups() .define("nsg") .withRegion(region) .withNewResourceGroup(rgName) .defineRule("rule1") .allowInbound() .fromAnyAddress() .fromPort(80) .toAnyAddress() .toPort(80) .withProtocol(SecurityRuleProtocol.TCP) .attach() .create(); Creatable<Network> networkDefinition = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/28") .defineSubnet("subnet1") .withAddressPrefix("10.0.0.0/29") .withExistingNetworkSecurityGroup(nsg) .attach(); VirtualMachine vm = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork(networkDefinition) .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .create(); NetworkInterface primaryNic = vm.getPrimaryNetworkInterface(); Assertions.assertNotNull(primaryNic); NicIpConfiguration primaryIpConfig = primaryNic.primaryIPConfiguration(); Assertions.assertNotNull(primaryIpConfig); Assertions.assertNotNull(primaryIpConfig.networkId()); Network network = primaryIpConfig.getNetwork(); Assertions.assertNotNull(primaryIpConfig.subnetName()); Subnet subnet = network.subnets().get(primaryIpConfig.subnetName()); Assertions.assertNotNull(subnet); nsg = subnet.getNetworkSecurityGroup(); Assertions.assertNotNull(nsg); Assertions.assertEquals("nsg", nsg.name()); Assertions.assertEquals(1, nsg.securityRules().size()); nsg = primaryIpConfig.getNetworkSecurityGroup(); Assertions.assertEquals("nsg", nsg.name()); } @Test public void canCreateVirtualMachine() throws Exception { computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); computeManager.virtualMachines().deleteById(foundVM.id()); } @Test public void cannotCreateVirtualMachineSyncPoll() throws Exception { final String mySqlInstallScript = "https: final String installCommand = "bash install_mysql_server_5.6.sh Abc.123x("; Assertions.assertThrows(IllegalStateException.class, () -> { Accepted<VirtualMachine> acceptedVirtualMachine = this.computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .defineNewExtension("CustomScriptForLinux") .withPublisher("Microsoft.OSTCExtensions") .withType("CustomScriptForLinux") .withVersion("1.4") .withMinorVersionAutoUpgrade() .withPublicSetting("fileUris", Collections.singletonList(mySqlInstallScript)) .withPublicSetting("commandToExecute", installCommand) .attach() .beginCreate(); }); boolean dependentResourceCreated = computeManager.resourceManager().serviceClient().getResourceGroups().checkExistence(rgName); Assertions.assertFalse(dependentResourceCreated); rgName = null; } @Test public void canCreateVirtualMachineSyncPoll() throws Exception { final long defaultDelayInMillis = 10 * 1000; Accepted<VirtualMachine> acceptedVirtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2016_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .beginCreate(); VirtualMachine createdVirtualMachine = acceptedVirtualMachine.getActivationResponse().getValue(); Assertions.assertNotEquals("Succeeded", createdVirtualMachine.provisioningState()); LongRunningOperationStatus pollStatus = acceptedVirtualMachine.getActivationResponse().getStatus(); long delayInMills = acceptedVirtualMachine.getActivationResponse().getRetryAfter() == null ? defaultDelayInMillis : acceptedVirtualMachine.getActivationResponse().getRetryAfter().toMillis(); while (!pollStatus.isComplete()) { ResourceManagerUtils.sleep(Duration.ofMillis(delayInMills)); PollResponse<?> pollResponse = acceptedVirtualMachine.getSyncPoller().poll(); pollStatus = pollResponse.getStatus(); delayInMills = pollResponse.getRetryAfter() == null ? defaultDelayInMillis : pollResponse.getRetryAfter().toMillis(); } Assertions.assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, pollStatus); VirtualMachine virtualMachine = acceptedVirtualMachine.getFinalResult(); Assertions.assertEquals("Succeeded", virtualMachine.provisioningState()); Accepted<Void> acceptedDelete = computeManager.virtualMachines() .beginDeleteByResourceGroup(virtualMachine.resourceGroupName(), virtualMachine.name()); pollStatus = acceptedDelete.getActivationResponse().getStatus(); delayInMills = acceptedDelete.getActivationResponse().getRetryAfter() == null ? defaultDelayInMillis : (int) acceptedDelete.getActivationResponse().getRetryAfter().toMillis(); while (!pollStatus.isComplete()) { ResourceManagerUtils.sleep(Duration.ofMillis(delayInMills)); PollResponse<?> pollResponse = acceptedDelete.getSyncPoller().poll(); pollStatus = pollResponse.getStatus(); delayInMills = pollResponse.getRetryAfter() == null ? defaultDelayInMillis : (int) pollResponse.getRetryAfter().toMillis(); } boolean deleted = false; try { computeManager.virtualMachines().getById(virtualMachine.id()); } catch (ManagementException e) { if (e.getResponse().getStatusCode() == 404 && ("NotFound".equals(e.getValue().getCode()) || "ResourceNotFound".equals(e.getValue().getCode()))) { deleted = true; } } Assertions.assertTrue(deleted); } @Test public void canCreateUpdatePriorityAndPrice() throws Exception { computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2016_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLowPriority(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .withMaxPrice(1000.0) .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); Assertions.assertEquals((Double) 1000.0, foundVM.billingProfile().maxPrice()); Assertions.assertEquals(VirtualMachineEvictionPolicyTypes.DEALLOCATE, foundVM.evictionPolicy()); try { foundVM.update().withMaxPrice(1500.0).apply(); Assertions.assertEquals((Double) 1500.0, foundVM.billingProfile().maxPrice()); Assertions.fail(); } catch (ManagementException e) { } foundVM.deallocate(); foundVM.update().withMaxPrice(2000.0).apply(); foundVM.start(); Assertions.assertEquals((Double) 2000.0, foundVM.billingProfile().maxPrice()); foundVM = foundVM.update().withPriority(VirtualMachinePriorityTypes.SPOT).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.SPOT, foundVM.priority()); foundVM = foundVM.update().withPriority(VirtualMachinePriorityTypes.LOW).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.LOW, foundVM.priority()); try { foundVM.update().withPriority(VirtualMachinePriorityTypes.REGULAR).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.REGULAR, foundVM.priority()); Assertions.fail(); } catch (ManagementException e) { } computeManager.virtualMachines().deleteById(foundVM.id()); } @Test public void cannotUpdateProximityPlacementGroupForVirtualMachine() throws Exception { AvailabilitySet setCreated = computeManager .availabilitySets() .define(availabilitySetName) .withRegion(regionProxPlacementGroup) .withNewResourceGroup(rgName) .withNewProximityPlacementGroup(proxGroupName, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName, setCreated.name()); Assertions.assertNotNull(setCreated.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated.regionName(), setCreated.proximityPlacementGroup().location()); AvailabilitySet setCreated2 = computeManager .availabilitySets() .define(availabilitySetName2) .withRegion(regionProxPlacementGroup2) .withNewResourceGroup(rgName2) .withNewProximityPlacementGroup(proxGroupName2, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName2, setCreated2.name()); Assertions.assertNotNull(setCreated2.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated2.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated2.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated2.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated2.id().equalsIgnoreCase(setCreated2.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated2.regionName(), setCreated2.proximityPlacementGroup().location()); computeManager .virtualMachines() .define(vmName) .withRegion(regionProxPlacementGroup) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withProximityPlacementGroup(setCreated.proximityPlacementGroup().id()) .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); Assertions.assertNotNull(foundVM.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, foundVM.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(foundVM.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(foundVM.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertNotNull(foundVM.proximityPlacementGroup().virtualMachineIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().virtualMachineIds().isEmpty()); Assertions .assertTrue(foundVM.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().virtualMachineIds().get(0))); try { VirtualMachine updatedVm = foundVM.update().withProximityPlacementGroup(setCreated2.proximityPlacementGroup().id()).apply(); } catch (ManagementException clEx) { Assertions .assertTrue( clEx .getMessage() .contains( "Updating proximity placement group of VM javavm is not allowed while the VM is running." + " Please stop/deallocate the VM and retry the operation.")); } computeManager.virtualMachines().deleteById(foundVM.id()); computeManager.availabilitySets().deleteById(setCreated.id()); } @Test public void canCreateVirtualMachinesAndAvailabilitySetInSameProximityPlacementGroup() throws Exception { AvailabilitySet setCreated = computeManager .availabilitySets() .define(availabilitySetName) .withRegion(regionProxPlacementGroup) .withNewResourceGroup(rgName) .withNewProximityPlacementGroup(proxGroupName, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName, setCreated.name()); Assertions.assertNotNull(setCreated.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated.regionName(), setCreated.proximityPlacementGroup().location()); computeManager .virtualMachines() .define(vmName) .withRegion(regionProxPlacementGroup) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withProximityPlacementGroup(setCreated.proximityPlacementGroup().id()) .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); Assertions.assertNotNull(foundVM.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, foundVM.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(foundVM.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(foundVM.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertNotNull(foundVM.proximityPlacementGroup().virtualMachineIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().virtualMachineIds().isEmpty()); Assertions .assertTrue(foundVM.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().virtualMachineIds().get(0))); VirtualMachine updatedVm = foundVM.update().withoutProximityPlacementGroup().apply(); Assertions.assertNotNull(updatedVm.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, updatedVm.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(updatedVm.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(updatedVm.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(updatedVm.proximityPlacementGroup().availabilitySetIds().get(0))); computeManager.virtualMachines().deleteById(foundVM.id()); computeManager.availabilitySets().deleteById(setCreated.id()); } @Test public void canCreateVirtualMachinesAndRelatedResourcesInParallel() throws Exception { String vmNamePrefix = "vmz"; String publicIpNamePrefix = generateRandomResourceName("pip-", 15); String networkNamePrefix = generateRandomResourceName("vnet-", 15); int count = 5; CreatablesInfo creatablesInfo = prepareCreatableVirtualMachines(region, vmNamePrefix, networkNamePrefix, publicIpNamePrefix, count); List<Creatable<VirtualMachine>> virtualMachineCreatables = creatablesInfo.virtualMachineCreatables; List<String> networkCreatableKeys = creatablesInfo.networkCreatableKeys; List<String> publicIpCreatableKeys = creatablesInfo.publicIpCreatableKeys; CreatedResources<VirtualMachine> createdVirtualMachines = computeManager.virtualMachines().create(virtualMachineCreatables); Assertions.assertTrue(createdVirtualMachines.size() == count); Set<String> virtualMachineNames = new HashSet<>(); for (int i = 0; i < count; i++) { virtualMachineNames.add(String.format("%s-%d", vmNamePrefix, i)); } for (VirtualMachine virtualMachine : createdVirtualMachines.values()) { Assertions.assertTrue(virtualMachineNames.contains(virtualMachine.name())); Assertions.assertNotNull(virtualMachine.id()); } Set<String> networkNames = new HashSet<>(); for (int i = 0; i < count; i++) { networkNames.add(String.format("%s-%d", networkNamePrefix, i)); } for (String networkCreatableKey : networkCreatableKeys) { Network createdNetwork = (Network) createdVirtualMachines.createdRelatedResource(networkCreatableKey); Assertions.assertNotNull(createdNetwork); Assertions.assertTrue(networkNames.contains(createdNetwork.name())); } Set<String> publicIPAddressNames = new HashSet<>(); for (int i = 0; i < count; i++) { publicIPAddressNames.add(String.format("%s-%d", publicIpNamePrefix, i)); } for (String publicIpCreatableKey : publicIpCreatableKeys) { PublicIpAddress createdPublicIpAddress = (PublicIpAddress) createdVirtualMachines.createdRelatedResource(publicIpCreatableKey); Assertions.assertNotNull(createdPublicIpAddress); Assertions.assertTrue(publicIPAddressNames.contains(createdPublicIpAddress.name())); } } @Test public void canStreamParallelCreatedVirtualMachinesAndRelatedResources() throws Exception { String vmNamePrefix = "vmz"; String publicIpNamePrefix = generateRandomResourceName("pip-", 15); String networkNamePrefix = generateRandomResourceName("vnet-", 15); int count = 5; final Set<String> virtualMachineNames = new HashSet<>(); for (int i = 0; i < count; i++) { virtualMachineNames.add(String.format("%s-%d", vmNamePrefix, i)); } final Set<String> networkNames = new HashSet<>(); for (int i = 0; i < count; i++) { networkNames.add(String.format("%s-%d", networkNamePrefix, i)); } final Set<String> publicIPAddressNames = new HashSet<>(); for (int i = 0; i < count; i++) { publicIPAddressNames.add(String.format("%s-%d", publicIpNamePrefix, i)); } final CreatablesInfo creatablesInfo = prepareCreatableVirtualMachines(region, vmNamePrefix, networkNamePrefix, publicIpNamePrefix, count); final AtomicInteger resourceCount = new AtomicInteger(0); List<Creatable<VirtualMachine>> virtualMachineCreatables = creatablesInfo.virtualMachineCreatables; computeManager .virtualMachines() .createAsync(virtualMachineCreatables) .map( createdResource -> { if (createdResource instanceof Resource) { Resource resource = (Resource) createdResource; System.out.println("Created: " + resource.id()); if (resource instanceof VirtualMachine) { VirtualMachine virtualMachine = (VirtualMachine) resource; Assertions.assertTrue(virtualMachineNames.contains(virtualMachine.name())); Assertions.assertNotNull(virtualMachine.id()); } else if (resource instanceof Network) { Network network = (Network) resource; Assertions.assertTrue(networkNames.contains(network.name())); Assertions.assertNotNull(network.id()); } else if (resource instanceof PublicIpAddress) { PublicIpAddress publicIPAddress = (PublicIpAddress) resource; Assertions.assertTrue(publicIPAddressNames.contains(publicIPAddress.name())); Assertions.assertNotNull(publicIPAddress.id()); } } resourceCount.incrementAndGet(); return createdResource; }) .blockLast(); networkNames.forEach(name -> { Assertions.assertNotNull(networkManager.networks().getByResourceGroup(rgName, name)); }); publicIPAddressNames.forEach(name -> { Assertions.assertNotNull(networkManager.publicIpAddresses().getByResourceGroup(rgName, name)); }); Assertions.assertEquals(1, storageManager.storageAccounts().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(count, networkManager.networkInterfaces().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(count, resourceCount.get()); } @Test public void canSetStorageAccountForUnmanagedDisk() { final String storageName = generateRandomResourceName("st", 14); StorageAccount storageAccount = storageManager .storageAccounts() .define(storageName) .withRegion(region) .withNewResourceGroup(rgName) .withSku(StorageAccountSkuType.PREMIUM_LRS) .create(); VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withUnmanagedDisks() .defineUnmanagedDataDisk("disk1") .withNewVhd(100) .withLun(2) .storeAt(storageAccount.name(), "diskvhds", "datadisk1vhd.vhd") .attach() .defineUnmanagedDataDisk("disk2") .withNewVhd(100) .withLun(3) .storeAt(storageAccount.name(), "diskvhds", "datadisk2vhd.vhd") .attach() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2as_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .create(); Map<Integer, VirtualMachineUnmanagedDataDisk> unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(2, unmanagedDataDisks.size()); VirtualMachineUnmanagedDataDisk firstUnmanagedDataDisk = unmanagedDataDisks.get(2); Assertions.assertNotNull(firstUnmanagedDataDisk); VirtualMachineUnmanagedDataDisk secondUnmanagedDataDisk = unmanagedDataDisks.get(3); Assertions.assertNotNull(secondUnmanagedDataDisk); String createdVhdUri1 = firstUnmanagedDataDisk.vhdUri(); String createdVhdUri2 = secondUnmanagedDataDisk.vhdUri(); Assertions.assertNotNull(createdVhdUri1); Assertions.assertNotNull(createdVhdUri2); computeManager.virtualMachines().deleteById(virtualMachine.id()); virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withUnmanagedDisks() .withExistingUnmanagedDataDisk(storageAccount.name(), "diskvhds", "datadisk1vhd.vhd") .withSize(VirtualMachineSizeTypes.fromString("Standard_D2as_v4")) .create(); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(1, unmanagedDataDisks.size()); firstUnmanagedDataDisk = null; for (VirtualMachineUnmanagedDataDisk unmanagedDisk : unmanagedDataDisks.values()) { firstUnmanagedDataDisk = unmanagedDisk; break; } Assertions.assertNotNull(firstUnmanagedDataDisk.vhdUri()); Assertions.assertTrue(firstUnmanagedDataDisk.vhdUri().equalsIgnoreCase(createdVhdUri1)); virtualMachine .update() .withExistingUnmanagedDataDisk(storageAccount.name(), "diskvhds", "datadisk2vhd.vhd") .apply(); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(2, unmanagedDataDisks.size()); } @Test public void canUpdateTagsOnVM() { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); virtualMachine.update().withTag("test", "testValue").apply(); Assertions.assertEquals("testValue", virtualMachine.innerModel().tags().get("test")); Map<String, String> testTags = new HashMap<String, String>(); testTags.put("testTag", "testValue"); virtualMachine.update().withTags(testTags).apply(); Assertions.assertEquals(testTags.get("testTag"), virtualMachine.innerModel().tags().get("testTag")); } @Test public void canRunScriptOnVM() { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .create(); List<String> installGit = new ArrayList<>(); installGit.add("sudo apt-get update"); installGit.add("sudo apt-get install -y git"); RunCommandResult runResult = virtualMachine.runShellScript(installGit, new ArrayList<RunCommandInputParameter>()); Assertions.assertNotNull(runResult); Assertions.assertNotNull(runResult.value()); Assertions.assertTrue(runResult.value().size() > 0); } @Test @DoNotRecord(skipInPlayback = true) public void canPerformSimulateEvictionOnSpotVirtualMachine() { VirtualMachine virtualMachine = computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .withSpotPriority(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); Assertions.assertNotNull(virtualMachine.osDiskStorageAccountType()); Assertions.assertTrue(virtualMachine.osDiskSize() > 0); Disk disk = computeManager.disks().getById(virtualMachine.osDiskId()); Assertions.assertNotNull(disk); Assertions.assertEquals(DiskState.ATTACHED, disk.innerModel().diskState()); virtualMachine.simulateEviction(); boolean deallocated = false; int pollIntervalInMinutes = 5; for (int i = 0; i < 30; i += pollIntervalInMinutes) { ResourceManagerUtils.sleep(Duration.ofMinutes(pollIntervalInMinutes)); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); if (virtualMachine.powerState() == PowerState.DEALLOCATED) { deallocated = true; break; } } Assertions.assertTrue(deallocated); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); Assertions.assertNotNull(virtualMachine); Assertions.assertNull(virtualMachine.osDiskStorageAccountType()); Assertions.assertEquals(0, virtualMachine.osDiskSize()); disk = computeManager.disks().getById(virtualMachine.osDiskId()); Assertions.assertEquals(DiskState.RESERVED, disk.innerModel().diskState()); } @Test public void canForceDeleteVirtualMachine() { computeManager.virtualMachines() .define(vmName) .withRegion("eastus2euap") .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .create(); VirtualMachine virtualMachine = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(virtualMachine); Assertions.assertEquals(Region.fromName("eastus2euap"), virtualMachine.region()); String nicId = virtualMachine.primaryNetworkInterfaceId(); computeManager.virtualMachines().deleteById(virtualMachine.id(), true); try { virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); } catch (ManagementException ex) { virtualMachine = null; Assertions.assertEquals(404, ex.getResponse().getStatusCode()); } Assertions.assertNull(virtualMachine); NetworkInterface nic = networkManager.networkInterfaces().getById(nicId); Assertions.assertNotNull(nic); } @Test public void canCreateVirtualMachineWithDeleteOption() throws Exception { Region region = Region.US_WEST2; final String publicIpDnsLabel = generateRandomResourceName("pip", 20); Network network = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("default", "10.0.0.0/24") .create(); VirtualMachine vm1 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIpDnsLabel/*, DeleteOptions.DELETE*/) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withNewDataDisk(computeManager.disks() .define("datadisk2") .withRegion(region) .withExistingResourceGroup(rgName) .withData() .withSizeInGB(10)) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(DeleteOptions.DELETE, vm1.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm1.dataDisks().get(0).deleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm1.dataDisks().get(1).deleteOptions()); Assertions.assertEquals(vm1.id(), computeManager.virtualMachines().getById(vm1.id()).id()); computeManager.virtualMachines().deleteById(vm1.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(2, computeManager.resourceManager().genericResources().listByResourceGroup(rgName).stream().count()); PublicIpAddress publicIpAddress = computeManager.networkManager().publicIpAddresses().listByResourceGroup(rgName).stream().findFirst().get(); computeManager.networkManager().publicIpAddresses().deleteById(publicIpAddress.id()); String secondaryNicName = generateRandomResourceName("nic", 10); Creatable<NetworkInterface> secondaryNetworkInterfaceCreatable = this .networkManager .networkInterfaces() .define(secondaryNicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic(); VirtualMachine vm2 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withNewSecondaryNetworkInterface(secondaryNetworkInterfaceCreatable, DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); computeManager.virtualMachines().deleteById(vm2.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(1, computeManager.resourceManager().genericResources().listByResourceGroup(rgName).stream().count()); secondaryNetworkInterfaceCreatable = this .networkManager .networkInterfaces() .define(secondaryNicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic(); VirtualMachine vm3 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withNewDataDisk(computeManager.disks() .define("datadisk2") .withRegion(region) .withExistingResourceGroup(rgName) .withData() .withSizeInGB(10)) .withNewSecondaryNetworkInterface(secondaryNetworkInterfaceCreatable) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(DeleteOptions.DETACH, vm3.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm3.dataDisks().get(0).deleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm3.dataDisks().get(1).deleteOptions()); computeManager.virtualMachines().deleteById(vm3.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(3, computeManager.disks().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(2, computeManager.networkManager().networkInterfaces().listByResourceGroup(rgName).stream().count()); } @Test public void canUpdateVirtualMachineWithDeleteOption() throws Exception { Region region = Region.US_WEST2; Network network = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("default", "10.0.0.0/24") .create(); VirtualMachine vm1 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); vm1.update() .withNewDataDisk(10) .apply(); computeManager.virtualMachines().deleteById(vm1.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(1, computeManager.disks().listByResourceGroup(rgName).stream().count()); Disk disk = computeManager.disks().listByResourceGroup(rgName).stream().findFirst().get(); computeManager.disks().deleteById(disk.id()); VirtualMachine vm2 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); vm2.update() .withNewDataDisk(10) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .apply(); computeManager.virtualMachines().deleteById(vm2.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(2, computeManager.disks().listByResourceGroup(rgName).stream().count()); } @Test public void canHibernateVirtualMachine() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion("eastus2euap") .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withSize(VirtualMachineSizeTypes.STANDARD_D2S_V3) .enableHibernation() .create(); Assertions.assertTrue(vm.isHibernationEnabled()); vm.deallocate(true); InstanceViewStatus hibernationStatus = vm.instanceView().statuses().stream() .filter(status -> "HibernationState/Hibernated".equals(status.code())) .findFirst().orElse(null); Assertions.assertNotNull(hibernationStatus); vm.start(); vm.deallocate(); vm.update() .disableHibernation() .apply(); Assertions.assertFalse(vm.isHibernationEnabled()); } @Test public void canOperateVirtualMachine() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.redeploy(); vm.powerOff(true); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.STOPPED, vm.powerState()); vm.start(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.restart(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.deallocate(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.DEALLOCATED, vm.powerState()); } @Test private CreatablesInfo prepareCreatableVirtualMachines( Region region, String vmNamePrefix, String networkNamePrefix, String publicIpNamePrefix, int vmCount) { Creatable<ResourceGroup> resourceGroupCreatable = resourceManager.resourceGroups().define(rgName).withRegion(region); Creatable<StorageAccount> storageAccountCreatable = storageManager .storageAccounts() .define(generateRandomResourceName("stg", 20)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable); List<String> networkCreatableKeys = new ArrayList<>(); List<String> publicIpCreatableKeys = new ArrayList<>(); List<Creatable<VirtualMachine>> virtualMachineCreatables = new ArrayList<>(); for (int i = 0; i < vmCount; i++) { Creatable<Network> networkCreatable = networkManager .networks() .define(String.format("%s-%d", networkNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable) .withAddressSpace("10.0.0.0/28"); networkCreatableKeys.add(networkCreatable.key()); Creatable<PublicIpAddress> publicIPAddressCreatable = networkManager .publicIpAddresses() .define(String.format("%s-%d", publicIpNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable); publicIpCreatableKeys.add(publicIPAddressCreatable.key()); Creatable<VirtualMachine> virtualMachineCreatable = computeManager .virtualMachines() .define(String.format("%s-%d", vmNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable) .withNewPrimaryNetwork(networkCreatable) .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIPAddressCreatable) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("tirekicker") .withSsh(sshPublicKey()) .withUnmanagedDisks() .withNewStorageAccount(storageAccountCreatable); virtualMachineCreatables.add(virtualMachineCreatable); } CreatablesInfo creatablesInfo = new CreatablesInfo(); creatablesInfo.virtualMachineCreatables = virtualMachineCreatables; creatablesInfo.networkCreatableKeys = networkCreatableKeys; creatablesInfo.publicIpCreatableKeys = publicIpCreatableKeys; return creatablesInfo; } class CreatablesInfo { private List<Creatable<VirtualMachine>> virtualMachineCreatables; List<String> networkCreatableKeys; List<String> publicIpCreatableKeys; } }
class VirtualMachineOperationsTests extends ComputeManagementTest { private String rgName = ""; private String rgName2 = ""; private final Region region = Region.US_EAST; private final Region regionProxPlacementGroup = Region.US_WEST; private final Region regionProxPlacementGroup2 = Region.US_EAST; private final String vmName = "javavm"; private final String proxGroupName = "testproxgroup1"; private final String proxGroupName2 = "testproxgroup2"; private final String availabilitySetName = "availset1"; private final String availabilitySetName2 = "availset2"; private final ProximityPlacementGroupType proxGroupType = ProximityPlacementGroupType.STANDARD; @Override protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) { rgName = generateRandomResourceName("javacsmrg", 15); rgName2 = generateRandomResourceName("javacsmrg2", 15); super.initializeClients(httpPipeline, profile); } @Override protected void cleanUpResources() { if (rgName != null) { resourceManager.resourceGroups().beginDeleteByName(rgName); } } @Test public void canCreateVirtualMachineWithNetworking() throws Exception { NetworkSecurityGroup nsg = this .networkManager .networkSecurityGroups() .define("nsg") .withRegion(region) .withNewResourceGroup(rgName) .defineRule("rule1") .allowInbound() .fromAnyAddress() .fromPort(80) .toAnyAddress() .toPort(80) .withProtocol(SecurityRuleProtocol.TCP) .attach() .create(); Creatable<Network> networkDefinition = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/28") .defineSubnet("subnet1") .withAddressPrefix("10.0.0.0/29") .withExistingNetworkSecurityGroup(nsg) .attach(); VirtualMachine vm = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork(networkDefinition) .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .create(); NetworkInterface primaryNic = vm.getPrimaryNetworkInterface(); Assertions.assertNotNull(primaryNic); NicIpConfiguration primaryIpConfig = primaryNic.primaryIPConfiguration(); Assertions.assertNotNull(primaryIpConfig); Assertions.assertNotNull(primaryIpConfig.networkId()); Network network = primaryIpConfig.getNetwork(); Assertions.assertNotNull(primaryIpConfig.subnetName()); Subnet subnet = network.subnets().get(primaryIpConfig.subnetName()); Assertions.assertNotNull(subnet); nsg = subnet.getNetworkSecurityGroup(); Assertions.assertNotNull(nsg); Assertions.assertEquals("nsg", nsg.name()); Assertions.assertEquals(1, nsg.securityRules().size()); nsg = primaryIpConfig.getNetworkSecurityGroup(); Assertions.assertEquals("nsg", nsg.name()); } @Test public void canCreateVirtualMachine() throws Exception { computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); computeManager.virtualMachines().deleteById(foundVM.id()); } @Test public void cannotCreateVirtualMachineSyncPoll() throws Exception { final String mySqlInstallScript = "https: final String installCommand = "bash install_mysql_server_5.6.sh Abc.123x("; Assertions.assertThrows(IllegalStateException.class, () -> { Accepted<VirtualMachine> acceptedVirtualMachine = this.computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .defineNewExtension("CustomScriptForLinux") .withPublisher("Microsoft.OSTCExtensions") .withType("CustomScriptForLinux") .withVersion("1.4") .withMinorVersionAutoUpgrade() .withPublicSetting("fileUris", Collections.singletonList(mySqlInstallScript)) .withPublicSetting("commandToExecute", installCommand) .attach() .beginCreate(); }); boolean dependentResourceCreated = computeManager.resourceManager().serviceClient().getResourceGroups().checkExistence(rgName); Assertions.assertFalse(dependentResourceCreated); rgName = null; } @Test public void canCreateVirtualMachineSyncPoll() throws Exception { final long defaultDelayInMillis = 10 * 1000; Accepted<VirtualMachine> acceptedVirtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2016_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .beginCreate(); VirtualMachine createdVirtualMachine = acceptedVirtualMachine.getActivationResponse().getValue(); Assertions.assertNotEquals("Succeeded", createdVirtualMachine.provisioningState()); LongRunningOperationStatus pollStatus = acceptedVirtualMachine.getActivationResponse().getStatus(); long delayInMills = acceptedVirtualMachine.getActivationResponse().getRetryAfter() == null ? defaultDelayInMillis : acceptedVirtualMachine.getActivationResponse().getRetryAfter().toMillis(); while (!pollStatus.isComplete()) { ResourceManagerUtils.sleep(Duration.ofMillis(delayInMills)); PollResponse<?> pollResponse = acceptedVirtualMachine.getSyncPoller().poll(); pollStatus = pollResponse.getStatus(); delayInMills = pollResponse.getRetryAfter() == null ? defaultDelayInMillis : pollResponse.getRetryAfter().toMillis(); } Assertions.assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, pollStatus); VirtualMachine virtualMachine = acceptedVirtualMachine.getFinalResult(); Assertions.assertEquals("Succeeded", virtualMachine.provisioningState()); Accepted<Void> acceptedDelete = computeManager.virtualMachines() .beginDeleteByResourceGroup(virtualMachine.resourceGroupName(), virtualMachine.name()); pollStatus = acceptedDelete.getActivationResponse().getStatus(); delayInMills = acceptedDelete.getActivationResponse().getRetryAfter() == null ? defaultDelayInMillis : (int) acceptedDelete.getActivationResponse().getRetryAfter().toMillis(); while (!pollStatus.isComplete()) { ResourceManagerUtils.sleep(Duration.ofMillis(delayInMills)); PollResponse<?> pollResponse = acceptedDelete.getSyncPoller().poll(); pollStatus = pollResponse.getStatus(); delayInMills = pollResponse.getRetryAfter() == null ? defaultDelayInMillis : (int) pollResponse.getRetryAfter().toMillis(); } boolean deleted = false; try { computeManager.virtualMachines().getById(virtualMachine.id()); } catch (ManagementException e) { if (e.getResponse().getStatusCode() == 404 && ("NotFound".equals(e.getValue().getCode()) || "ResourceNotFound".equals(e.getValue().getCode()))) { deleted = true; } } Assertions.assertTrue(deleted); } @Test public void canCreateUpdatePriorityAndPrice() throws Exception { computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2016_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLowPriority(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .withMaxPrice(1000.0) .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); Assertions.assertEquals((Double) 1000.0, foundVM.billingProfile().maxPrice()); Assertions.assertEquals(VirtualMachineEvictionPolicyTypes.DEALLOCATE, foundVM.evictionPolicy()); try { foundVM.update().withMaxPrice(1500.0).apply(); Assertions.assertEquals((Double) 1500.0, foundVM.billingProfile().maxPrice()); Assertions.fail(); } catch (ManagementException e) { } foundVM.deallocate(); foundVM.update().withMaxPrice(2000.0).apply(); foundVM.start(); Assertions.assertEquals((Double) 2000.0, foundVM.billingProfile().maxPrice()); foundVM = foundVM.update().withPriority(VirtualMachinePriorityTypes.SPOT).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.SPOT, foundVM.priority()); foundVM = foundVM.update().withPriority(VirtualMachinePriorityTypes.LOW).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.LOW, foundVM.priority()); try { foundVM.update().withPriority(VirtualMachinePriorityTypes.REGULAR).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.REGULAR, foundVM.priority()); Assertions.fail(); } catch (ManagementException e) { } computeManager.virtualMachines().deleteById(foundVM.id()); } @Test public void cannotUpdateProximityPlacementGroupForVirtualMachine() throws Exception { AvailabilitySet setCreated = computeManager .availabilitySets() .define(availabilitySetName) .withRegion(regionProxPlacementGroup) .withNewResourceGroup(rgName) .withNewProximityPlacementGroup(proxGroupName, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName, setCreated.name()); Assertions.assertNotNull(setCreated.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated.regionName(), setCreated.proximityPlacementGroup().location()); AvailabilitySet setCreated2 = computeManager .availabilitySets() .define(availabilitySetName2) .withRegion(regionProxPlacementGroup2) .withNewResourceGroup(rgName2) .withNewProximityPlacementGroup(proxGroupName2, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName2, setCreated2.name()); Assertions.assertNotNull(setCreated2.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated2.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated2.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated2.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated2.id().equalsIgnoreCase(setCreated2.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated2.regionName(), setCreated2.proximityPlacementGroup().location()); computeManager .virtualMachines() .define(vmName) .withRegion(regionProxPlacementGroup) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withProximityPlacementGroup(setCreated.proximityPlacementGroup().id()) .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); Assertions.assertNotNull(foundVM.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, foundVM.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(foundVM.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(foundVM.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertNotNull(foundVM.proximityPlacementGroup().virtualMachineIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().virtualMachineIds().isEmpty()); Assertions .assertTrue(foundVM.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().virtualMachineIds().get(0))); try { VirtualMachine updatedVm = foundVM.update().withProximityPlacementGroup(setCreated2.proximityPlacementGroup().id()).apply(); } catch (ManagementException clEx) { Assertions .assertTrue( clEx .getMessage() .contains( "Updating proximity placement group of VM javavm is not allowed while the VM is running." + " Please stop/deallocate the VM and retry the operation.")); } computeManager.virtualMachines().deleteById(foundVM.id()); computeManager.availabilitySets().deleteById(setCreated.id()); } @Test public void canCreateVirtualMachinesAndAvailabilitySetInSameProximityPlacementGroup() throws Exception { AvailabilitySet setCreated = computeManager .availabilitySets() .define(availabilitySetName) .withRegion(regionProxPlacementGroup) .withNewResourceGroup(rgName) .withNewProximityPlacementGroup(proxGroupName, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName, setCreated.name()); Assertions.assertNotNull(setCreated.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated.regionName(), setCreated.proximityPlacementGroup().location()); computeManager .virtualMachines() .define(vmName) .withRegion(regionProxPlacementGroup) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withProximityPlacementGroup(setCreated.proximityPlacementGroup().id()) .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); Assertions.assertNotNull(foundVM.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, foundVM.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(foundVM.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(foundVM.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertNotNull(foundVM.proximityPlacementGroup().virtualMachineIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().virtualMachineIds().isEmpty()); Assertions .assertTrue(foundVM.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().virtualMachineIds().get(0))); VirtualMachine updatedVm = foundVM.update().withoutProximityPlacementGroup().apply(); Assertions.assertNotNull(updatedVm.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, updatedVm.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(updatedVm.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(updatedVm.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(updatedVm.proximityPlacementGroup().availabilitySetIds().get(0))); computeManager.virtualMachines().deleteById(foundVM.id()); computeManager.availabilitySets().deleteById(setCreated.id()); } @Test public void canCreateVirtualMachinesAndRelatedResourcesInParallel() throws Exception { String vmNamePrefix = "vmz"; String publicIpNamePrefix = generateRandomResourceName("pip-", 15); String networkNamePrefix = generateRandomResourceName("vnet-", 15); int count = 5; CreatablesInfo creatablesInfo = prepareCreatableVirtualMachines(region, vmNamePrefix, networkNamePrefix, publicIpNamePrefix, count); List<Creatable<VirtualMachine>> virtualMachineCreatables = creatablesInfo.virtualMachineCreatables; List<String> networkCreatableKeys = creatablesInfo.networkCreatableKeys; List<String> publicIpCreatableKeys = creatablesInfo.publicIpCreatableKeys; CreatedResources<VirtualMachine> createdVirtualMachines = computeManager.virtualMachines().create(virtualMachineCreatables); Assertions.assertTrue(createdVirtualMachines.size() == count); Set<String> virtualMachineNames = new HashSet<>(); for (int i = 0; i < count; i++) { virtualMachineNames.add(String.format("%s-%d", vmNamePrefix, i)); } for (VirtualMachine virtualMachine : createdVirtualMachines.values()) { Assertions.assertTrue(virtualMachineNames.contains(virtualMachine.name())); Assertions.assertNotNull(virtualMachine.id()); } Set<String> networkNames = new HashSet<>(); for (int i = 0; i < count; i++) { networkNames.add(String.format("%s-%d", networkNamePrefix, i)); } for (String networkCreatableKey : networkCreatableKeys) { Network createdNetwork = (Network) createdVirtualMachines.createdRelatedResource(networkCreatableKey); Assertions.assertNotNull(createdNetwork); Assertions.assertTrue(networkNames.contains(createdNetwork.name())); } Set<String> publicIPAddressNames = new HashSet<>(); for (int i = 0; i < count; i++) { publicIPAddressNames.add(String.format("%s-%d", publicIpNamePrefix, i)); } for (String publicIpCreatableKey : publicIpCreatableKeys) { PublicIpAddress createdPublicIpAddress = (PublicIpAddress) createdVirtualMachines.createdRelatedResource(publicIpCreatableKey); Assertions.assertNotNull(createdPublicIpAddress); Assertions.assertTrue(publicIPAddressNames.contains(createdPublicIpAddress.name())); } } @Test public void canStreamParallelCreatedVirtualMachinesAndRelatedResources() throws Exception { String vmNamePrefix = "vmz"; String publicIpNamePrefix = generateRandomResourceName("pip-", 15); String networkNamePrefix = generateRandomResourceName("vnet-", 15); int count = 5; final Set<String> virtualMachineNames = new HashSet<>(); for (int i = 0; i < count; i++) { virtualMachineNames.add(String.format("%s-%d", vmNamePrefix, i)); } final Set<String> networkNames = new HashSet<>(); for (int i = 0; i < count; i++) { networkNames.add(String.format("%s-%d", networkNamePrefix, i)); } final Set<String> publicIPAddressNames = new HashSet<>(); for (int i = 0; i < count; i++) { publicIPAddressNames.add(String.format("%s-%d", publicIpNamePrefix, i)); } final CreatablesInfo creatablesInfo = prepareCreatableVirtualMachines(region, vmNamePrefix, networkNamePrefix, publicIpNamePrefix, count); final AtomicInteger resourceCount = new AtomicInteger(0); List<Creatable<VirtualMachine>> virtualMachineCreatables = creatablesInfo.virtualMachineCreatables; computeManager .virtualMachines() .createAsync(virtualMachineCreatables) .map( createdResource -> { if (createdResource instanceof Resource) { Resource resource = (Resource) createdResource; System.out.println("Created: " + resource.id()); if (resource instanceof VirtualMachine) { VirtualMachine virtualMachine = (VirtualMachine) resource; Assertions.assertTrue(virtualMachineNames.contains(virtualMachine.name())); Assertions.assertNotNull(virtualMachine.id()); } else if (resource instanceof Network) { Network network = (Network) resource; Assertions.assertTrue(networkNames.contains(network.name())); Assertions.assertNotNull(network.id()); } else if (resource instanceof PublicIpAddress) { PublicIpAddress publicIPAddress = (PublicIpAddress) resource; Assertions.assertTrue(publicIPAddressNames.contains(publicIPAddress.name())); Assertions.assertNotNull(publicIPAddress.id()); } } resourceCount.incrementAndGet(); return createdResource; }) .blockLast(); networkNames.forEach(name -> { Assertions.assertNotNull(networkManager.networks().getByResourceGroup(rgName, name)); }); publicIPAddressNames.forEach(name -> { Assertions.assertNotNull(networkManager.publicIpAddresses().getByResourceGroup(rgName, name)); }); Assertions.assertEquals(1, storageManager.storageAccounts().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(count, networkManager.networkInterfaces().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(count, resourceCount.get()); } @Test public void canSetStorageAccountForUnmanagedDisk() { final String storageName = generateRandomResourceName("st", 14); StorageAccount storageAccount = storageManager .storageAccounts() .define(storageName) .withRegion(region) .withNewResourceGroup(rgName) .withSku(StorageAccountSkuType.PREMIUM_LRS) .create(); VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withUnmanagedDisks() .defineUnmanagedDataDisk("disk1") .withNewVhd(100) .withLun(2) .storeAt(storageAccount.name(), "diskvhds", "datadisk1vhd.vhd") .attach() .defineUnmanagedDataDisk("disk2") .withNewVhd(100) .withLun(3) .storeAt(storageAccount.name(), "diskvhds", "datadisk2vhd.vhd") .attach() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2as_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .create(); Map<Integer, VirtualMachineUnmanagedDataDisk> unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(2, unmanagedDataDisks.size()); VirtualMachineUnmanagedDataDisk firstUnmanagedDataDisk = unmanagedDataDisks.get(2); Assertions.assertNotNull(firstUnmanagedDataDisk); VirtualMachineUnmanagedDataDisk secondUnmanagedDataDisk = unmanagedDataDisks.get(3); Assertions.assertNotNull(secondUnmanagedDataDisk); String createdVhdUri1 = firstUnmanagedDataDisk.vhdUri(); String createdVhdUri2 = secondUnmanagedDataDisk.vhdUri(); Assertions.assertNotNull(createdVhdUri1); Assertions.assertNotNull(createdVhdUri2); computeManager.virtualMachines().deleteById(virtualMachine.id()); virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withUnmanagedDisks() .withExistingUnmanagedDataDisk(storageAccount.name(), "diskvhds", "datadisk1vhd.vhd") .withSize(VirtualMachineSizeTypes.fromString("Standard_D2as_v4")) .create(); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(1, unmanagedDataDisks.size()); firstUnmanagedDataDisk = null; for (VirtualMachineUnmanagedDataDisk unmanagedDisk : unmanagedDataDisks.values()) { firstUnmanagedDataDisk = unmanagedDisk; break; } Assertions.assertNotNull(firstUnmanagedDataDisk.vhdUri()); Assertions.assertTrue(firstUnmanagedDataDisk.vhdUri().equalsIgnoreCase(createdVhdUri1)); virtualMachine .update() .withExistingUnmanagedDataDisk(storageAccount.name(), "diskvhds", "datadisk2vhd.vhd") .apply(); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(2, unmanagedDataDisks.size()); } @Test public void canUpdateTagsOnVM() { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); virtualMachine.update().withTag("test", "testValue").apply(); Assertions.assertEquals("testValue", virtualMachine.innerModel().tags().get("test")); Map<String, String> testTags = new HashMap<String, String>(); testTags.put("testTag", "testValue"); virtualMachine.update().withTags(testTags).apply(); Assertions.assertEquals(testTags.get("testTag"), virtualMachine.innerModel().tags().get("testTag")); } @Test public void canRunScriptOnVM() { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .create(); List<String> installGit = new ArrayList<>(); installGit.add("sudo apt-get update"); installGit.add("sudo apt-get install -y git"); RunCommandResult runResult = virtualMachine.runShellScript(installGit, new ArrayList<RunCommandInputParameter>()); Assertions.assertNotNull(runResult); Assertions.assertNotNull(runResult.value()); Assertions.assertTrue(runResult.value().size() > 0); } @Test @DoNotRecord(skipInPlayback = true) public void canPerformSimulateEvictionOnSpotVirtualMachine() { VirtualMachine virtualMachine = computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .withSpotPriority(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); Assertions.assertNotNull(virtualMachine.osDiskStorageAccountType()); Assertions.assertTrue(virtualMachine.osDiskSize() > 0); Disk disk = computeManager.disks().getById(virtualMachine.osDiskId()); Assertions.assertNotNull(disk); Assertions.assertEquals(DiskState.ATTACHED, disk.innerModel().diskState()); virtualMachine.simulateEviction(); boolean deallocated = false; int pollIntervalInMinutes = 5; for (int i = 0; i < 30; i += pollIntervalInMinutes) { ResourceManagerUtils.sleep(Duration.ofMinutes(pollIntervalInMinutes)); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); if (virtualMachine.powerState() == PowerState.DEALLOCATED) { deallocated = true; break; } } Assertions.assertTrue(deallocated); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); Assertions.assertNotNull(virtualMachine); Assertions.assertNull(virtualMachine.osDiskStorageAccountType()); Assertions.assertEquals(0, virtualMachine.osDiskSize()); disk = computeManager.disks().getById(virtualMachine.osDiskId()); Assertions.assertEquals(DiskState.RESERVED, disk.innerModel().diskState()); } @Test public void canForceDeleteVirtualMachine() { computeManager.virtualMachines() .define(vmName) .withRegion("eastus2euap") .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .create(); VirtualMachine virtualMachine = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(virtualMachine); Assertions.assertEquals(Region.fromName("eastus2euap"), virtualMachine.region()); String nicId = virtualMachine.primaryNetworkInterfaceId(); computeManager.virtualMachines().deleteById(virtualMachine.id(), true); try { virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); } catch (ManagementException ex) { virtualMachine = null; Assertions.assertEquals(404, ex.getResponse().getStatusCode()); } Assertions.assertNull(virtualMachine); NetworkInterface nic = networkManager.networkInterfaces().getById(nicId); Assertions.assertNotNull(nic); } @Test public void canCreateVirtualMachineWithDeleteOption() throws Exception { Region region = Region.US_WEST2; final String publicIpDnsLabel = generateRandomResourceName("pip", 20); Network network = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("default", "10.0.0.0/24") .create(); VirtualMachine vm1 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIpDnsLabel/*, DeleteOptions.DELETE*/) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withNewDataDisk(computeManager.disks() .define("datadisk2") .withRegion(region) .withExistingResourceGroup(rgName) .withData() .withSizeInGB(10)) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(DeleteOptions.DELETE, vm1.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm1.dataDisks().get(0).deleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm1.dataDisks().get(1).deleteOptions()); Assertions.assertEquals(vm1.id(), computeManager.virtualMachines().getById(vm1.id()).id()); computeManager.virtualMachines().deleteById(vm1.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(2, computeManager.resourceManager().genericResources().listByResourceGroup(rgName).stream().count()); PublicIpAddress publicIpAddress = computeManager.networkManager().publicIpAddresses().listByResourceGroup(rgName).stream().findFirst().get(); computeManager.networkManager().publicIpAddresses().deleteById(publicIpAddress.id()); String secondaryNicName = generateRandomResourceName("nic", 10); Creatable<NetworkInterface> secondaryNetworkInterfaceCreatable = this .networkManager .networkInterfaces() .define(secondaryNicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic(); VirtualMachine vm2 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withNewSecondaryNetworkInterface(secondaryNetworkInterfaceCreatable, DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); computeManager.virtualMachines().deleteById(vm2.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(1, computeManager.resourceManager().genericResources().listByResourceGroup(rgName).stream().count()); secondaryNetworkInterfaceCreatable = this .networkManager .networkInterfaces() .define(secondaryNicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic(); VirtualMachine vm3 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withNewDataDisk(computeManager.disks() .define("datadisk2") .withRegion(region) .withExistingResourceGroup(rgName) .withData() .withSizeInGB(10)) .withNewSecondaryNetworkInterface(secondaryNetworkInterfaceCreatable) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(DeleteOptions.DETACH, vm3.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm3.dataDisks().get(0).deleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm3.dataDisks().get(1).deleteOptions()); computeManager.virtualMachines().deleteById(vm3.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(3, computeManager.disks().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(2, computeManager.networkManager().networkInterfaces().listByResourceGroup(rgName).stream().count()); } @Test public void canUpdateVirtualMachineWithDeleteOption() throws Exception { Region region = Region.US_WEST2; Network network = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("default", "10.0.0.0/24") .create(); VirtualMachine vm1 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); vm1.update() .withNewDataDisk(10) .apply(); computeManager.virtualMachines().deleteById(vm1.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(1, computeManager.disks().listByResourceGroup(rgName).stream().count()); Disk disk = computeManager.disks().listByResourceGroup(rgName).stream().findFirst().get(); computeManager.disks().deleteById(disk.id()); VirtualMachine vm2 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); vm2.update() .withNewDataDisk(10) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .apply(); computeManager.virtualMachines().deleteById(vm2.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(2, computeManager.disks().listByResourceGroup(rgName).stream().count()); } @Test public void canHibernateVirtualMachine() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion("eastus2euap") .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withSize(VirtualMachineSizeTypes.STANDARD_D2S_V3) .enableHibernation() .create(); Assertions.assertTrue(vm.isHibernationEnabled()); vm.deallocate(true); InstanceViewStatus hibernationStatus = vm.instanceView().statuses().stream() .filter(status -> "HibernationState/Hibernated".equals(status.code())) .findFirst().orElse(null); Assertions.assertNotNull(hibernationStatus); vm.start(); vm.deallocate(); vm.update() .disableHibernation() .apply(); Assertions.assertFalse(vm.isHibernationEnabled()); } @Test public void canOperateVirtualMachine() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.redeploy(); vm.powerOff(true); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.STOPPED, vm.powerState()); vm.start(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.restart(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.deallocate(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.DEALLOCATED, vm.powerState()); } @Test private CreatablesInfo prepareCreatableVirtualMachines( Region region, String vmNamePrefix, String networkNamePrefix, String publicIpNamePrefix, int vmCount) { Creatable<ResourceGroup> resourceGroupCreatable = resourceManager.resourceGroups().define(rgName).withRegion(region); Creatable<StorageAccount> storageAccountCreatable = storageManager .storageAccounts() .define(generateRandomResourceName("stg", 20)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable); List<String> networkCreatableKeys = new ArrayList<>(); List<String> publicIpCreatableKeys = new ArrayList<>(); List<Creatable<VirtualMachine>> virtualMachineCreatables = new ArrayList<>(); for (int i = 0; i < vmCount; i++) { Creatable<Network> networkCreatable = networkManager .networks() .define(String.format("%s-%d", networkNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable) .withAddressSpace("10.0.0.0/28"); networkCreatableKeys.add(networkCreatable.key()); Creatable<PublicIpAddress> publicIPAddressCreatable = networkManager .publicIpAddresses() .define(String.format("%s-%d", publicIpNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable); publicIpCreatableKeys.add(publicIPAddressCreatable.key()); Creatable<VirtualMachine> virtualMachineCreatable = computeManager .virtualMachines() .define(String.format("%s-%d", vmNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable) .withNewPrimaryNetwork(networkCreatable) .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIPAddressCreatable) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("tirekicker") .withSsh(sshPublicKey()) .withUnmanagedDisks() .withNewStorageAccount(storageAccountCreatable); virtualMachineCreatables.add(virtualMachineCreatable); } CreatablesInfo creatablesInfo = new CreatablesInfo(); creatablesInfo.virtualMachineCreatables = virtualMachineCreatables; creatablesInfo.networkCreatableKeys = networkCreatableKeys; creatablesInfo.publicIpCreatableKeys = publicIpCreatableKeys; return creatablesInfo; } class CreatablesInfo { private List<Creatable<VirtualMachine>> virtualMachineCreatables; List<String> networkCreatableKeys; List<String> publicIpCreatableKeys; } }
Just keep same style is OK. There are 2 options: 1. option 1: change all log to this format: wellKnownCertificates, customCertificates, keyVaultCertificates, classpathCertificates 2. option 2: keep current implementation. Either is OK.
public KeyVaultKeyStore() { LOGGER.log(FINE, "Constructing KeyVaultKeyStore."); creationDate = new Date(); String keyVaultUri = System.getProperty("azure.keyvault.uri"); String tenantId = System.getProperty("azure.keyvault.tenant-id"); String clientId = System.getProperty("azure.keyvault.client-id"); String clientSecret = System.getProperty("azure.keyvault.client-secret"); String managedIdentity = System.getProperty("azure.keyvault.managed-identity"); long refreshInterval = getRefreshInterval(); refreshCertificatesWhenHaveUnTrustCertificate = Optional.of("azure.keyvault.jca.refresh-certificates-when-have-un-trust-certificate") .map(System::getProperty) .map(Boolean::parseBoolean) .orElse(false); jreCertificates = JreCertificates.getInstance(); LOGGER.log(FINE, String.format("Loaded jre certificates: %s.", jreCertificates.getAliases())); wellKnowCertificates = SpecificPathCertificates.getSpecificPathCertificates(wellKnowPath); LOGGER.log(FINE, String.format("Loaded well known certificates: %s.", wellKnowCertificates.getAliases())); customCertificates = SpecificPathCertificates.getSpecificPathCertificates(customPath); LOGGER.log(FINE, String.format("Loaded custom certificates: %s.", customCertificates.getAliases())); keyVaultCertificates = new KeyVaultCertificates( refreshInterval, keyVaultUri, tenantId, clientId, clientSecret, managedIdentity); LOGGER.log(FINE, String.format("Loaded Key Vault certificates: %s.", keyVaultCertificates.getAliases())); classpathCertificates = new ClasspathCertificates(); LOGGER.log(FINE, String.format("Loaded classpath certificates: %s.", classpathCertificates.getAliases())); allCertificates = Arrays.asList( jreCertificates, wellKnowCertificates, customCertificates, keyVaultCertificates, classpathCertificates); }
LOGGER.log(FINE, String.format("Loaded well known certificates: %s.", wellKnowCertificates.getAliases()));
public KeyVaultKeyStore() { LOGGER.log(FINE, "Constructing KeyVaultKeyStore."); creationDate = new Date(); String keyVaultUri = System.getProperty("azure.keyvault.uri"); String tenantId = System.getProperty("azure.keyvault.tenant-id"); String clientId = System.getProperty("azure.keyvault.client-id"); String clientSecret = System.getProperty("azure.keyvault.client-secret"); String managedIdentity = System.getProperty("azure.keyvault.managed-identity"); long refreshInterval = getRefreshInterval(); refreshCertificatesWhenHaveUnTrustCertificate = Optional.of("azure.keyvault.jca.refresh-certificates-when-have-un-trust-certificate") .map(System::getProperty) .map(Boolean::parseBoolean) .orElse(false); jreCertificates = JreCertificates.getInstance(); LOGGER.log(FINE, String.format("Loaded jre certificates: %s.", jreCertificates.getAliases())); wellKnowCertificates = SpecificPathCertificates.getSpecificPathCertificates(wellKnowPath); LOGGER.log(FINE, String.format("Loaded well known certificates: %s.", wellKnowCertificates.getAliases())); customCertificates = SpecificPathCertificates.getSpecificPathCertificates(customPath); LOGGER.log(FINE, String.format("Loaded custom certificates: %s.", customCertificates.getAliases())); keyVaultCertificates = new KeyVaultCertificates( refreshInterval, keyVaultUri, tenantId, clientId, clientSecret, managedIdentity); LOGGER.log(FINE, String.format("Loaded Key Vault certificates: %s.", keyVaultCertificates.getAliases())); classpathCertificates = new ClasspathCertificates(); LOGGER.log(FINE, String.format("Loaded classpath certificates: %s.", classpathCertificates.getAliases())); allCertificates = Arrays.asList( jreCertificates, wellKnowCertificates, customCertificates, keyVaultCertificates, classpathCertificates); }
class KeyVaultKeyStore extends KeyStoreSpi { /** * Stores the key-store name. */ public static final String KEY_STORE_TYPE = "AzureKeyVault"; /** * Stores the algorithm name. */ public static final String ALGORITHM_NAME = KEY_STORE_TYPE; /** * Stores the logger. */ private static final Logger LOGGER = Logger.getLogger(KeyVaultKeyStore.class.getName()); /** * Stores the Jre key store certificates. */ private final JreCertificates jreCertificates; /** * Store well Know certificates loaded from specific path. */ private final SpecificPathCertificates wellKnowCertificates; /** * Store custom certificates loaded from specific path. */ private final SpecificPathCertificates customCertificates; /** * Store certificates loaded from KeyVault. */ private final KeyVaultCertificates keyVaultCertificates; /** * Store certificates loaded from classpath. */ private final ClasspathCertificates classpathCertificates; /** * Stores all the certificates. */ private final List<AzureCertificates> allCertificates; /** * Stores the creation date. */ private final Date creationDate; private final boolean refreshCertificatesWhenHaveUnTrustCertificate; /** * Store the path where the well know certificate is placed */ final String wellKnowPath = Optional.ofNullable(System.getProperty("azure.cert-path.well-known")) .orElse("/etc/certs/well-known/"); /** * Store the path where the custom certificate is placed */ final String customPath = Optional.ofNullable(System.getProperty("azure.cert-path.custom")) .orElse("/etc/certs/custom/"); /** * Constructor. * * <p> * The constructor uses System.getProperty for * <code>azure.keyvault.uri</code>, * <code>azure.keyvault.aadAuthenticationUrl</code>, * <code>azure.keyvault.tenantId</code>, * <code>azure.keyvault.clientId</code>, * <code>azure.keyvault.clientSecret</code> and * <code>azure.keyvault.managedIdentity</code> to initialize the * Key Vault client. * </p> */ Long getRefreshInterval() { return Stream.of("azure.keyvault.jca.certificates-refresh-interval-in-ms", "azure.keyvault.jca.certificates-refresh-interval") .map(System::getProperty) .filter(Objects::nonNull) .map(Long::valueOf) .findFirst() .orElse(0L); } /** * get key vault key store by system property * * @return KeyVault key store * @throws CertificateException if any of the certificates in the * keystore could not be loaded * @throws NoSuchAlgorithmException when algorithm is unavailable. * @throws KeyStoreException when no Provider supports a KeyStoreSpi implementation for the specified type * @throws IOException when an I/O error occurs. */ public static KeyStore getKeyVaultKeyStoreBySystemProperty() throws CertificateException, NoSuchAlgorithmException, KeyStoreException, IOException { KeyStore keyStore = KeyStore.getInstance(KeyVaultJcaProvider.PROVIDER_NAME); KeyVaultLoadStoreParameter parameter = new KeyVaultLoadStoreParameter( System.getProperty("azure.keyvault.uri"), System.getProperty("azure.keyvault.tenant-id"), System.getProperty("azure.keyvault.client-id"), System.getProperty("azure.keyvault.client-secret"), System.getProperty("azure.keyvault.managed-identity")); keyStore.load(parameter); return keyStore; } @Override public Enumeration<String> engineAliases() { return Collections.enumeration(getAllAliases()); } @Override public boolean engineContainsAlias(String alias) { return engineIsCertificateEntry(alias); } @Override public void engineDeleteEntry(String alias) { allCertificates.forEach(a -> a.deleteEntry(alias)); } @Override public boolean engineEntryInstanceOf(String alias, Class<? extends KeyStore.Entry> entryClass) { return super.engineEntryInstanceOf(alias, entryClass); } @Override public Certificate engineGetCertificate(String alias) { Certificate certificate = allCertificates.stream() .map(AzureCertificates::getCertificates) .filter(a -> a.containsKey(alias)) .findFirst() .map(certificates -> certificates.get(alias)) .orElse(null); if (refreshCertificatesWhenHaveUnTrustCertificate && certificate == null) { keyVaultCertificates.refreshCertificates(); certificate = keyVaultCertificates.getCertificates().get(alias); } return certificate; } @Override public String engineGetCertificateAlias(Certificate cert) { String alias = null; if (cert != null) { List<String> aliasList = getAllAliases(); for (String candidateAlias : aliasList) { Certificate certificate = engineGetCertificate(candidateAlias); if (certificate.equals(cert)) { alias = candidateAlias; break; } } } if (refreshCertificatesWhenHaveUnTrustCertificate && alias == null) { alias = keyVaultCertificates.refreshAndGetAliasByCertificate(cert); } return alias; } @Override public Certificate[] engineGetCertificateChain(String alias) { Certificate[] chain = null; Certificate certificate = engineGetCertificate(alias); if (certificate != null) { chain = new Certificate[1]; chain[0] = certificate; } return chain; } @Override public Date engineGetCreationDate(String alias) { return new Date(creationDate.getTime()); } @Override public KeyStore.Entry engineGetEntry(String alias, KeyStore.ProtectionParameter protParam) throws KeyStoreException, NoSuchAlgorithmException, UnrecoverableEntryException { return super.engineGetEntry(alias, protParam); } @Override public Key engineGetKey(String alias, char[] password) { return allCertificates.stream() .map(AzureCertificates::getCertificateKeys) .filter(a -> a.containsKey(alias)) .findFirst() .map(certificateKeys -> certificateKeys.get(alias)) .orElse(null); } @Override public boolean engineIsCertificateEntry(String alias) { return getAllAliases().contains(alias); } @Override public boolean engineIsKeyEntry(String alias) { return engineIsCertificateEntry(alias); } @Override public void engineLoad(KeyStore.LoadStoreParameter param) { if (param instanceof KeyVaultLoadStoreParameter) { KeyVaultLoadStoreParameter parameter = (KeyVaultLoadStoreParameter) param; keyVaultCertificates.updateKeyVaultClient(parameter.getUri(), parameter.getTenantId(), parameter.getClientId(), parameter.getClientSecret(), parameter.getManagedIdentity()); } classpathCertificates.loadCertificatesFromClasspath(); } @Override public void engineLoad(InputStream stream, char[] password) { classpathCertificates.loadCertificatesFromClasspath(); } private List<String> getAllAliases() { List<String> allAliases = new ArrayList<>(jreCertificates.getAliases()); Map<String, List<String>> aliasLists = new HashMap<>(); aliasLists.put("well known certificates", wellKnowCertificates.getAliases()); aliasLists.put("custom certificates", customCertificates.getAliases()); aliasLists.put("key vault certificates", keyVaultCertificates.getAliases()); aliasLists.put("class path certificates", classpathCertificates.getAliases()); aliasLists.forEach((certificatesType, certificates) -> certificates.forEach(alias -> { if (allAliases.contains(alias)) { LOGGER.log(FINE, String.format("The certificate %s under %s already exists", alias, certificatesType)); } else { allAliases.add(alias); } })); return allAliases; } @Override public void engineSetCertificateEntry(String alias, Certificate certificate) { if (getAllAliases().contains(alias)) { LOGGER.log(WARNING, "Cannot overwrite own certificate"); return; } classpathCertificates.setCertificateEntry(alias, certificate); } @Override public void engineSetEntry(String alias, KeyStore.Entry entry, KeyStore.ProtectionParameter protParam) throws KeyStoreException { super.engineSetEntry(alias, entry, protParam); } @Override public void engineSetKeyEntry(String alias, Key key, char[] password, Certificate[] chain) { } @Override public void engineSetKeyEntry(String alias, byte[] key, Certificate[] chain) { } @Override public int engineSize() { return getAllAliases().size(); } @Override public void engineStore(OutputStream stream, char[] password) { } @Override public void engineStore(KeyStore.LoadStoreParameter param) { } }
class KeyVaultKeyStore extends KeyStoreSpi { /** * Stores the key-store name. */ public static final String KEY_STORE_TYPE = "AzureKeyVault"; /** * Stores the algorithm name. */ public static final String ALGORITHM_NAME = KEY_STORE_TYPE; /** * Stores the logger. */ private static final Logger LOGGER = Logger.getLogger(KeyVaultKeyStore.class.getName()); /** * Stores the Jre key store certificates. */ private final JreCertificates jreCertificates; /** * Store well Know certificates loaded from specific path. */ private final SpecificPathCertificates wellKnowCertificates; /** * Store custom certificates loaded from specific path. */ private final SpecificPathCertificates customCertificates; /** * Store certificates loaded from KeyVault. */ private final KeyVaultCertificates keyVaultCertificates; /** * Store certificates loaded from classpath. */ private final ClasspathCertificates classpathCertificates; /** * Stores all the certificates. */ private final List<AzureCertificates> allCertificates; /** * Stores the creation date. */ private final Date creationDate; private final boolean refreshCertificatesWhenHaveUnTrustCertificate; /** * Store the path where the well know certificate is placed */ final String wellKnowPath = Optional.ofNullable(System.getProperty("azure.cert-path.well-known")) .orElse("/etc/certs/well-known/"); /** * Store the path where the custom certificate is placed */ final String customPath = Optional.ofNullable(System.getProperty("azure.cert-path.custom")) .orElse("/etc/certs/custom/"); /** * Constructor. * * <p> * The constructor uses System.getProperty for * <code>azure.keyvault.uri</code>, * <code>azure.keyvault.aadAuthenticationUrl</code>, * <code>azure.keyvault.tenantId</code>, * <code>azure.keyvault.clientId</code>, * <code>azure.keyvault.clientSecret</code> and * <code>azure.keyvault.managedIdentity</code> to initialize the * Key Vault client. * </p> */ Long getRefreshInterval() { return Stream.of("azure.keyvault.jca.certificates-refresh-interval-in-ms", "azure.keyvault.jca.certificates-refresh-interval") .map(System::getProperty) .filter(Objects::nonNull) .map(Long::valueOf) .findFirst() .orElse(0L); } /** * get key vault key store by system property * * @return KeyVault key store * @throws CertificateException if any of the certificates in the * keystore could not be loaded * @throws NoSuchAlgorithmException when algorithm is unavailable. * @throws KeyStoreException when no Provider supports a KeyStoreSpi implementation for the specified type * @throws IOException when an I/O error occurs. */ public static KeyStore getKeyVaultKeyStoreBySystemProperty() throws CertificateException, NoSuchAlgorithmException, KeyStoreException, IOException { KeyStore keyStore = KeyStore.getInstance(KeyVaultJcaProvider.PROVIDER_NAME); KeyVaultLoadStoreParameter parameter = new KeyVaultLoadStoreParameter( System.getProperty("azure.keyvault.uri"), System.getProperty("azure.keyvault.tenant-id"), System.getProperty("azure.keyvault.client-id"), System.getProperty("azure.keyvault.client-secret"), System.getProperty("azure.keyvault.managed-identity")); keyStore.load(parameter); return keyStore; } @Override public Enumeration<String> engineAliases() { return Collections.enumeration(getAllAliases()); } @Override public boolean engineContainsAlias(String alias) { return engineIsCertificateEntry(alias); } @Override public void engineDeleteEntry(String alias) { allCertificates.forEach(a -> a.deleteEntry(alias)); } @Override public boolean engineEntryInstanceOf(String alias, Class<? extends KeyStore.Entry> entryClass) { return super.engineEntryInstanceOf(alias, entryClass); } @Override public Certificate engineGetCertificate(String alias) { Certificate certificate = allCertificates.stream() .map(AzureCertificates::getCertificates) .filter(a -> a.containsKey(alias)) .findFirst() .map(certificates -> certificates.get(alias)) .orElse(null); if (refreshCertificatesWhenHaveUnTrustCertificate && certificate == null) { keyVaultCertificates.refreshCertificates(); certificate = keyVaultCertificates.getCertificates().get(alias); } return certificate; } @Override public String engineGetCertificateAlias(Certificate cert) { String alias = null; if (cert != null) { List<String> aliasList = getAllAliases(); for (String candidateAlias : aliasList) { Certificate certificate = engineGetCertificate(candidateAlias); if (certificate.equals(cert)) { alias = candidateAlias; break; } } } if (refreshCertificatesWhenHaveUnTrustCertificate && alias == null) { alias = keyVaultCertificates.refreshAndGetAliasByCertificate(cert); } return alias; } @Override public Certificate[] engineGetCertificateChain(String alias) { Certificate[] chain = null; Certificate certificate = engineGetCertificate(alias); if (certificate != null) { chain = new Certificate[1]; chain[0] = certificate; } return chain; } @Override public Date engineGetCreationDate(String alias) { return new Date(creationDate.getTime()); } @Override public KeyStore.Entry engineGetEntry(String alias, KeyStore.ProtectionParameter protParam) throws KeyStoreException, NoSuchAlgorithmException, UnrecoverableEntryException { return super.engineGetEntry(alias, protParam); } @Override public Key engineGetKey(String alias, char[] password) { return allCertificates.stream() .map(AzureCertificates::getCertificateKeys) .filter(a -> a.containsKey(alias)) .findFirst() .map(certificateKeys -> certificateKeys.get(alias)) .orElse(null); } @Override public boolean engineIsCertificateEntry(String alias) { return getAllAliases().contains(alias); } @Override public boolean engineIsKeyEntry(String alias) { return engineIsCertificateEntry(alias); } @Override public void engineLoad(KeyStore.LoadStoreParameter param) { if (param instanceof KeyVaultLoadStoreParameter) { KeyVaultLoadStoreParameter parameter = (KeyVaultLoadStoreParameter) param; keyVaultCertificates.updateKeyVaultClient(parameter.getUri(), parameter.getTenantId(), parameter.getClientId(), parameter.getClientSecret(), parameter.getManagedIdentity()); } classpathCertificates.loadCertificatesFromClasspath(); } @Override public void engineLoad(InputStream stream, char[] password) { classpathCertificates.loadCertificatesFromClasspath(); } private List<String> getAllAliases() { List<String> allAliases = new ArrayList<>(jreCertificates.getAliases()); Map<String, List<String>> aliasLists = new HashMap<>(); aliasLists.put("well known certificates", wellKnowCertificates.getAliases()); aliasLists.put("custom certificates", customCertificates.getAliases()); aliasLists.put("key vault certificates", keyVaultCertificates.getAliases()); aliasLists.put("class path certificates", classpathCertificates.getAliases()); aliasLists.forEach((certificatesType, certificates) -> certificates.forEach(alias -> { if (allAliases.contains(alias)) { LOGGER.log(FINE, String.format("The certificate %s under %s already exists", alias, certificatesType)); } else { allAliases.add(alias); } })); return allAliases; } @Override public void engineSetCertificateEntry(String alias, Certificate certificate) { if (getAllAliases().contains(alias)) { LOGGER.log(WARNING, "Cannot overwrite own certificate"); return; } classpathCertificates.setCertificateEntry(alias, certificate); } @Override public void engineSetEntry(String alias, KeyStore.Entry entry, KeyStore.ProtectionParameter protParam) throws KeyStoreException { super.engineSetEntry(alias, entry, protParam); } @Override public void engineSetKeyEntry(String alias, Key key, char[] password, Certificate[] chain) { } @Override public void engineSetKeyEntry(String alias, byte[] key, Certificate[] chain) { } @Override public int engineSize() { return getAllAliases().size(); } @Override public void engineStore(OutputStream stream, char[] password) { } @Override public void engineStore(KeyStore.LoadStoreParameter param) { } }
`put` is deprecated, use `set` instead
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { HttpHeaders requestHeaders = context.getHttpRequest().getHeaders(); EncryptedBlobRange encryptedRange = EncryptedBlobRange.getEncryptedBlobRangeFromHeader( requestHeaders.getValue(CryptographyConstants.RANGE_HEADER)); if (requestHeaders.getValue(CryptographyConstants.RANGE_HEADER) != null) { requestHeaders.set(CryptographyConstants.RANGE_HEADER, encryptedRange.toBlobRange().toString()); } return next.process().flatMap(httpResponse -> { if (httpResponse.getRequest().getHttpMethod() == HttpMethod.GET && httpResponse.getBody() != null) { HttpHeaders responseHeaders = httpResponse.getHeaders(); /* * We will need to know the total size of the data to know when to finalize the decryption. If it was * not set originally with the intent of downloading the whole blob, update it here. */ encryptedRange.setAdjustedDownloadCount(Long.parseLong(responseHeaders.getValue( CryptographyConstants.CONTENT_LENGTH))); /* * We expect padding only if we are at the end of a blob and it is not a multiple of the encryption * block size */ boolean padding = encryptedRange.toBlobRange().getOffset() + encryptedRange.toBlobRange().getCount() > (blobSize(responseHeaders) - ENCRYPTION_BLOCK_SIZE); String encryptedDataString = responseHeaders .getValue(Constants.HeaderConstants.X_MS_META + "-" + CryptographyConstants.ENCRYPTION_DATA_KEY); Flux<ByteBuffer> plainTextData = this.decryptBlob(encryptedDataString, httpResponse.getBody(), encryptedRange, padding); return Mono.just(new BlobDecryptionPolicy.DecryptedResponse(httpResponse, plainTextData)); } else { return Mono.just(httpResponse); } }); }
requestHeaders.set(CryptographyConstants.RANGE_HEADER, encryptedRange.toBlobRange().toString());
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { HttpHeaders requestHeaders = context.getHttpRequest().getHeaders(); EncryptedBlobRange encryptedRange = EncryptedBlobRange.getEncryptedBlobRangeFromHeader( requestHeaders.getValue(CryptographyConstants.RANGE_HEADER)); if (requestHeaders.getValue(CryptographyConstants.RANGE_HEADER) != null) { requestHeaders.set(CryptographyConstants.RANGE_HEADER, encryptedRange.toBlobRange().toString()); } return next.process().flatMap(httpResponse -> { if (httpResponse.getRequest().getHttpMethod() == HttpMethod.GET && httpResponse.getBody() != null) { HttpHeaders responseHeaders = httpResponse.getHeaders(); /* * We will need to know the total size of the data to know when to finalize the decryption. If it was * not set originally with the intent of downloading the whole blob, update it here. */ encryptedRange.setAdjustedDownloadCount(Long.parseLong(responseHeaders.getValue( CryptographyConstants.CONTENT_LENGTH))); /* * We expect padding only if we are at the end of a blob and it is not a multiple of the encryption * block size */ boolean padding = encryptedRange.toBlobRange().getOffset() + encryptedRange.toBlobRange().getCount() > (blobSize(responseHeaders) - ENCRYPTION_BLOCK_SIZE); String encryptedDataString = responseHeaders .getValue(Constants.HeaderConstants.X_MS_META + "-" + CryptographyConstants.ENCRYPTION_DATA_KEY); Flux<ByteBuffer> plainTextData = this.decryptBlob(encryptedDataString, httpResponse.getBody(), encryptedRange, padding); return Mono.just(new BlobDecryptionPolicy.DecryptedResponse(httpResponse, plainTextData)); } else { return Mono.just(httpResponse); } }); }
class with the specified key and resolver. * <p> * If the generated policy is intended to be used for encryption, users are expected to provide a key at the * minimum. The absence of key will cause an exception to be thrown during encryption. If the generated policy is * intended to be used for decryption, users can provide a keyResolver. The client library will - 1. Invoke the key * resolver if specified to get the key. 2. If resolver is not specified but a key is specified, match the key id on * the key and use it. * * @param key An object of type {@link AsyncKeyEncryptionKey}
class with the specified key and resolver. * <p> * If the generated policy is intended to be used for encryption, users are expected to provide a key at the * minimum. The absence of key will cause an exception to be thrown during encryption. If the generated policy is * intended to be used for decryption, users can provide a keyResolver. The client library will - 1. Invoke the key * resolver if specified to get the key. 2. If resolver is not specified but a key is specified, match the key id on * the key and use it. * * @param key An object of type {@link AsyncKeyEncryptionKey}
why did we remove catch blocks somewhere and added them in couple of places?
public Mono<AppendBlobItem> create() { return create(false); }
}
public Mono<AppendBlobItem> create() { return create(false); }
class AppendBlobAsyncClient extends BlobAsyncClientBase { private static final ClientLogger LOGGER = new ClientLogger(AppendBlobAsyncClient.class); /** * Indicates the maximum number of bytes that can be sent in a call to appendBlock. */ public static final int MAX_APPEND_BLOCK_BYTES = 4 * Constants.MB; /** * Indicates the maximum number of blocks allowed in an append blob. */ public static final int MAX_BLOCKS = 50000; /** * Package-private constructor for use by {@link SpecializedBlobClientBuilder}. * * @param pipeline The pipeline used to send and receive service requests. * @param url The endpoint where to send service requests. * @param serviceVersion The version of the service to receive requests. * @param accountName The storage account name. * @param containerName The container name. * @param blobName The blob name. * @param snapshot The snapshot identifier for the blob, pass {@code null} to interact with the blob directly. * @param customerProvidedKey Customer provided key used during encryption of the blob's data on the server, pass * {@code null} to allow the service to use its own encryption. * @param encryptionScope Encryption scope used during encryption of the blob's data on the server, pass * {@code null} to allow the service to use its own encryption. * @param versionId The version identifier for the blob, pass {@code null} to interact with the latest blob version. */ AppendBlobAsyncClient(HttpPipeline pipeline, String url, BlobServiceVersion serviceVersion, String accountName, String containerName, String blobName, String snapshot, CpkInfo customerProvidedKey, EncryptionScope encryptionScope, String versionId) { super(pipeline, url, serviceVersion, accountName, containerName, blobName, snapshot, customerProvidedKey, encryptionScope, versionId); } /** * Creates a new {@link AppendBlobAsyncClient} with the specified {@code encryptionScope}. * * @param encryptionScope the encryption scope for the blob, pass {@code null} to use no encryption scope. * @return a {@link AppendBlobAsyncClient} with the specified {@code encryptionScope}. */ @Override public AppendBlobAsyncClient getEncryptionScopeAsyncClient(String encryptionScope) { EncryptionScope finalEncryptionScope = null; if (encryptionScope != null) { finalEncryptionScope = new EncryptionScope().setEncryptionScope(encryptionScope); } return new AppendBlobAsyncClient(getHttpPipeline(), getAccountUrl(), getServiceVersion(), getAccountName(), getContainerName(), getBlobName(), getSnapshotId(), getCustomerProvidedKey(), finalEncryptionScope, getVersionId()); } /** * Creates a new {@link AppendBlobAsyncClient} with the specified {@code customerProvidedKey}. * * @param customerProvidedKey the {@link CustomerProvidedKey} for the blob, * pass {@code null} to use no customer provided key. * @return a {@link AppendBlobAsyncClient} with the specified {@code customerProvidedKey}. */ @Override public AppendBlobAsyncClient getCustomerProvidedKeyAsyncClient(CustomerProvidedKey customerProvidedKey) { CpkInfo finalCustomerProvidedKey = null; if (customerProvidedKey != null) { finalCustomerProvidedKey = new CpkInfo() .setEncryptionKey(customerProvidedKey.getKey()) .setEncryptionKeySha256(customerProvidedKey.getKeySha256()) .setEncryptionAlgorithm(customerProvidedKey.getEncryptionAlgorithm()); } return new AppendBlobAsyncClient(getHttpPipeline(), getAccountUrl(), getServiceVersion(), getAccountName(), getContainerName(), getBlobName(), getSnapshotId(), finalCustomerProvidedKey, encryptionScope, getVersionId()); } /** * Creates a 0-length append blob. Call appendBlock to append data to an append blob. By default this method will * not overwrite an existing blob. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.AppendBlobAsyncClient.create --> * <pre> * client.create& * System.out.printf& * </pre> * <!-- end com.azure.storage.blob.specialized.AppendBlobAsyncClient.create --> * * @return A {@link Mono} containing the information of the created appended blob. */ @ServiceMethod(returns = ReturnType.SINGLE) /** * Creates a 0-length append blob. Call appendBlock to append data to an append blob. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.AppendBlobAsyncClient.create * <pre> * boolean overwrite = false; & * client.create& * System.out.printf& * </pre> * <!-- end com.azure.storage.blob.specialized.AppendBlobAsyncClient.create * * @param overwrite Whether to overwrite, should data exist on the blob. * * @return A {@link Mono} containing the information of the created appended blob. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<AppendBlobItem> create(boolean overwrite) { BlobRequestConditions blobRequestConditions = new BlobRequestConditions(); if (!overwrite) { blobRequestConditions.setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD); } return createWithResponse(null, null, blobRequestConditions).flatMap(FluxUtil::toMono); } /** * Creates a 0-length append blob. Call appendBlock to append data to an append blob. * <p> * To avoid overwriting, pass "*" to {@link BlobRequestConditions * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.AppendBlobAsyncClient.createWithResponse * <pre> * BlobHttpHeaders headers = new BlobHttpHeaders& * .setContentType& * .setContentLanguage& * Map&lt;String, String&gt; metadata = Collections.singletonMap& * BlobRequestConditions requestConditions = new BlobRequestConditions& * .setIfUnmodifiedSince& * * client.createWithResponse& * System.out.printf& * </pre> * <!-- end com.azure.storage.blob.specialized.AppendBlobAsyncClient.createWithResponse * * @param headers {@link BlobHttpHeaders} * @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any * metadata key or value, it must be removed or encoded. * @param requestConditions {@link BlobRequestConditions} * @return A {@link Mono} containing {@link Response} whose {@link Response * appended blob. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<AppendBlobItem>> createWithResponse(BlobHttpHeaders headers, Map<String, String> metadata, BlobRequestConditions requestConditions) { return this.createWithResponse(new AppendBlobCreateOptions().setHeaders(headers).setMetadata(metadata) .setRequestConditions(requestConditions)); } /** * Creates a 0-length append blob. Call appendBlock to append data to an append blob. * <p> * To avoid overwriting, pass "*" to {@link BlobRequestConditions * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.AppendBlobAsyncClient.createWithResponse * <pre> * BlobHttpHeaders headers = new BlobHttpHeaders& * .setContentType& * .setContentLanguage& * Map&lt;String, String&gt; metadata = Collections.singletonMap& * Map&lt;String, String&gt; tags = Collections.singletonMap& * BlobRequestConditions requestConditions = new BlobRequestConditions& * .setIfUnmodifiedSince& * * client.createWithResponse& * .setTags& * System.out.printf& * </pre> * <!-- end com.azure.storage.blob.specialized.AppendBlobAsyncClient.createWithResponse * * @param options {@link AppendBlobCreateOptions} * @return A {@link Mono} containing {@link Response} whose {@link Response * appended blob. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<AppendBlobItem>> createWithResponse(AppendBlobCreateOptions options) { try { return withContext(context -> createWithResponse(options, context)); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } Mono<Response<AppendBlobItem>> createWithResponse(AppendBlobCreateOptions options, Context context) { options = (options == null) ? new AppendBlobCreateOptions() : options; BlobRequestConditions requestConditions = options.getRequestConditions(); requestConditions = (requestConditions == null) ? new BlobRequestConditions() : requestConditions; context = context == null ? Context.NONE : context; BlobImmutabilityPolicy immutabilityPolicy = options.getImmutabilityPolicy() == null ? new BlobImmutabilityPolicy() : options.getImmutabilityPolicy(); return this.azureBlobStorage.getAppendBlobs().createWithResponseAsync(containerName, blobName, 0, null, options.getMetadata(), requestConditions.getLeaseId(), requestConditions.getIfModifiedSince(), requestConditions.getIfUnmodifiedSince(), requestConditions.getIfMatch(), requestConditions.getIfNoneMatch(), requestConditions.getTagsConditions(), null, tagsToString(options.getTags()), immutabilityPolicy.getExpiryTime(), immutabilityPolicy.getPolicyMode(), options.hasLegalHold(), options.getHeaders(), getCustomerProvidedKey(), encryptionScope, context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE)) .map(rb -> { AppendBlobsCreateHeaders hd = rb.getDeserializedHeaders(); AppendBlobItem item = new AppendBlobItem(hd.getETag(), hd.getLastModified(), hd.getContentMD5(), hd.isXMsRequestServerEncrypted(), hd.getXMsEncryptionKeySha256(), hd.getXMsEncryptionScope(), null, null, hd.getXMsVersionId()); return new SimpleResponse<>(rb, item); }); } /** * Commits a new block of data to the end of the existing append blob. * <p> * Note that the data passed must be replayable if retries are enabled (the default). In other words, the * {@code Flux} must produce the same data each time it is subscribed to. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.AppendBlobAsyncClient.appendBlock * <pre> * client.appendBlock& * System.out.printf& * </pre> * <!-- end com.azure.storage.blob.specialized.AppendBlobAsyncClient.appendBlock * * @param data The data to write to the blob. Note that this {@code Flux} must be replayable if retries are enabled * (the default). In other words, the Flux must produce the same data each time it is subscribed to. * @param length The exact length of the data. It is important that this value match precisely the length of the * data emitted by the {@code Flux}. * @return {@link Mono} containing the information of the append blob operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<AppendBlobItem> appendBlock(Flux<ByteBuffer> data, long length) { return appendBlockWithResponse(data, length, null, null).flatMap(FluxUtil::toMono); } /** * Commits a new block of data to the end of the existing append blob. * <p> * Note that the data passed must be replayable if retries are enabled (the default). In other words, the * {@code Flux} must produce the same data each time it is subscribed to. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.AppendBlobAsyncClient.appendBlockWithResponse * <pre> * byte[] md5 = MessageDigest.getInstance& * AppendBlobRequestConditions requestConditions = new AppendBlobRequestConditions& * .setAppendPosition& * .setMaxSize& * * client.appendBlockWithResponse& * System.out.printf& * </pre> * <!-- end com.azure.storage.blob.specialized.AppendBlobAsyncClient.appendBlockWithResponse * * @param data The data to write to the blob. Note that this {@code Flux} must be replayable if retries are enabled * (the default). In other words, the Flux must produce the same data each time it is subscribed to. * @param length The exact length of the data. It is important that this value match precisely the length of the * data emitted by the {@code Flux}. * @param contentMd5 An MD5 hash of the block content. This hash is used to verify the integrity of the block during * transport. When this header is specified, the storage service compares the hash of the content that has arrived * with this header value. Note that this MD5 hash is not stored with the blob. If the two hashes do not match, the * operation will fail. * @param appendBlobRequestConditions {@link AppendBlobRequestConditions} * @return A {@link Mono} containing {@link Response} whose {@link Response * blob operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<AppendBlobItem>> appendBlockWithResponse(Flux<ByteBuffer> data, long length, byte[] contentMd5, AppendBlobRequestConditions appendBlobRequestConditions) { try { return withContext(context -> appendBlockWithResponse(data, length, contentMd5, appendBlobRequestConditions, context)); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } Mono<Response<AppendBlobItem>> appendBlockWithResponse(Flux<ByteBuffer> data, long length, byte[] contentMd5, AppendBlobRequestConditions appendBlobRequestConditions, Context context) { appendBlobRequestConditions = appendBlobRequestConditions == null ? new AppendBlobRequestConditions() : appendBlobRequestConditions; context = context == null ? Context.NONE : context; return this.azureBlobStorage.getAppendBlobs().appendBlockWithResponseAsync( containerName, blobName, length, data, null, contentMd5, null, appendBlobRequestConditions.getLeaseId(), appendBlobRequestConditions.getMaxSize(), appendBlobRequestConditions.getAppendPosition(), appendBlobRequestConditions.getIfModifiedSince(), appendBlobRequestConditions.getIfUnmodifiedSince(), appendBlobRequestConditions.getIfMatch(), appendBlobRequestConditions.getIfNoneMatch(), appendBlobRequestConditions.getTagsConditions(), null, getCustomerProvidedKey(), encryptionScope, context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE)) .map(rb -> { AppendBlobsAppendBlockHeaders hd = rb.getDeserializedHeaders(); AppendBlobItem item = new AppendBlobItem(hd.getETag(), hd.getLastModified(), hd.getContentMD5(), hd.isXMsRequestServerEncrypted(), hd.getXMsEncryptionKeySha256(), hd.getXMsEncryptionScope(), hd.getXMsBlobAppendOffset(), hd.getXMsBlobCommittedBlockCount()); return new SimpleResponse<>(rb, item); }); } /** * Commits a new block of data from another blob to the end of this append blob. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.AppendBlobAsyncClient.appendBlockFromUrl * <pre> * client.appendBlockFromUrl& * System.out.printf& * </pre> * <!-- end com.azure.storage.blob.specialized.AppendBlobAsyncClient.appendBlockFromUrl * * @param sourceUrl The url to the blob that will be the source of the copy. A source blob in the same storage * account can be authenticated via Shared Key. However, if the source is a blob in another account, the source blob * must either be public or must be authenticated via a shared access signature. If the source blob is public, no * authentication is required to perform the operation. * @param sourceRange The source {@link BlobRange} to copy. * @return {@link Mono} containing the information of the append blob operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<AppendBlobItem> appendBlockFromUrl(String sourceUrl, BlobRange sourceRange) { return appendBlockFromUrlWithResponse(sourceUrl, sourceRange, null, null, null).flatMap(FluxUtil::toMono); } /** * Commits a new block of data from another blob to the end of this append blob. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.AppendBlobAsyncClient.appendBlockFromUrlWithResponse * <pre> * AppendBlobRequestConditions appendBlobRequestConditions = new AppendBlobRequestConditions& * .setAppendPosition& * .setMaxSize& * * BlobRequestConditions modifiedRequestConditions = new BlobRequestConditions& * .setIfUnmodifiedSince& * * client.appendBlockFromUrlWithResponse& * appendBlobRequestConditions, modifiedRequestConditions& * System.out.printf& * </pre> * <!-- end com.azure.storage.blob.specialized.AppendBlobAsyncClient.appendBlockFromUrlWithResponse * * @param sourceUrl The url to the blob that will be the source of the copy. A source blob in the same storage * account can be authenticated via Shared Key. However, if the source is a blob in another account, the source blob * must either be public or must be authenticated via a shared access signature. If the source blob is public, no * authentication is required to perform the operation. * @param sourceRange {@link BlobRange} * @param sourceContentMD5 An MD5 hash of the block content from the source blob. If specified, the service will * calculate the MD5 of the received data and fail the request if it does not match the provided MD5. * @param destRequestConditions {@link AppendBlobRequestConditions} * @param sourceRequestConditions {@link BlobRequestConditions} * @return A {@link Mono} containing {@link Response} whose {@link Response * blob operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<AppendBlobItem>> appendBlockFromUrlWithResponse(String sourceUrl, BlobRange sourceRange, byte[] sourceContentMD5, AppendBlobRequestConditions destRequestConditions, BlobRequestConditions sourceRequestConditions) { return appendBlockFromUrlWithResponse(new AppendBlobAppendBlockFromUrlOptions(sourceUrl) .setSourceRange(sourceRange).setSourceContentMd5(sourceContentMD5) .setDestinationRequestConditions(destRequestConditions) .setSourceRequestConditions(sourceRequestConditions)); } /** * Commits a new block of data from another blob to the end of this append blob. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.AppendBlobAsyncClient.appendBlockFromUrlWithResponse * <pre> * AppendBlobRequestConditions appendBlobRequestConditions = new AppendBlobRequestConditions& * .setAppendPosition& * .setMaxSize& * * BlobRequestConditions modifiedRequestConditions = new BlobRequestConditions& * .setIfUnmodifiedSince& * * client.appendBlockFromUrlWithResponse& * .setSourceRange& * .setDestinationRequestConditions& * .setSourceRequestConditions& * System.out.printf& * </pre> * <!-- end com.azure.storage.blob.specialized.AppendBlobAsyncClient.appendBlockFromUrlWithResponse * * @param options Parameters for the operation. * @return A {@link Mono} containing {@link Response} whose {@link Response * blob operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<AppendBlobItem>> appendBlockFromUrlWithResponse(AppendBlobAppendBlockFromUrlOptions options) { try { return withContext(context -> appendBlockFromUrlWithResponse(options, context)); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } Mono<Response<AppendBlobItem>> appendBlockFromUrlWithResponse(AppendBlobAppendBlockFromUrlOptions options, Context context) { BlobRange sourceRange = (options.getSourceRange() == null) ? new BlobRange(0) : options.getSourceRange(); AppendBlobRequestConditions destRequestConditions = (options.getDestinationRequestConditions() == null) ? new AppendBlobRequestConditions() : options.getDestinationRequestConditions(); RequestConditions sourceRequestConditions = (options.getSourceRequestConditions() == null) ? new RequestConditions() : options.getSourceRequestConditions(); try { new URL(options.getSourceUrl()); } catch (MalformedURLException ex) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("'sourceUrl' is not a valid url.")); } context = context == null ? Context.NONE : context; String sourceAuth = options.getSourceAuthorization() == null ? null : options.getSourceAuthorization().toString(); return this.azureBlobStorage.getAppendBlobs().appendBlockFromUrlWithResponseAsync(containerName, blobName, options.getSourceUrl(), 0, sourceRange.toString(), options.getSourceContentMd5(), null, null, null, destRequestConditions.getLeaseId(), destRequestConditions.getMaxSize(), destRequestConditions.getAppendPosition(), destRequestConditions.getIfModifiedSince(), destRequestConditions.getIfUnmodifiedSince(), destRequestConditions.getIfMatch(), destRequestConditions.getIfNoneMatch(), destRequestConditions.getTagsConditions(), sourceRequestConditions.getIfModifiedSince(), sourceRequestConditions.getIfUnmodifiedSince(), sourceRequestConditions.getIfMatch(), sourceRequestConditions.getIfNoneMatch(), null, sourceAuth, getCustomerProvidedKey(), encryptionScope, context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE)) .map(rb -> { AppendBlobsAppendBlockFromUrlHeaders hd = rb.getDeserializedHeaders(); AppendBlobItem item = new AppendBlobItem(hd.getETag(), hd.getLastModified(), hd.getContentMD5(), hd.isXMsRequestServerEncrypted(), hd.getXMsEncryptionKeySha256(), hd.getXMsEncryptionScope(), hd.getXMsBlobAppendOffset(), hd.getXMsBlobCommittedBlockCount()); return new SimpleResponse<>(rb, item); }); } /** * Seals an append blob, making it read only. Any subsequent appends will fail. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.AppendBlobAsyncClient.seal --> * <pre> * client.seal& * </pre> * <!-- end com.azure.storage.blob.specialized.AppendBlobAsyncClient.seal --> * * @return A reactive response signalling completion. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> seal() { return sealWithResponse(new AppendBlobSealOptions()).flatMap(FluxUtil::toMono); } /** * Seals an append blob, making it read only. Any subsequent appends will fail. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.AppendBlobAsyncClient.sealWithResponse * <pre> * AppendBlobRequestConditions requestConditions = new AppendBlobRequestConditions& * .setIfUnmodifiedSince& * * client.sealWithResponse& * .subscribe& * </pre> * <!-- end com.azure.storage.blob.specialized.AppendBlobAsyncClient.sealWithResponse * * @param options {@link AppendBlobSealOptions} * @return A reactive response signalling completion. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> sealWithResponse(AppendBlobSealOptions options) { try { return withContext(context -> sealWithResponse(options, context)); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } Mono<Response<Void>> sealWithResponse(AppendBlobSealOptions options, Context context) { options = (options == null) ? new AppendBlobSealOptions() : options; AppendBlobRequestConditions requestConditions = options.getRequestConditions(); requestConditions = (requestConditions == null) ? new AppendBlobRequestConditions() : requestConditions; context = context == null ? Context.NONE : context; return this.azureBlobStorage.getAppendBlobs().sealWithResponseAsync(containerName, blobName, null, null, requestConditions.getLeaseId(), requestConditions.getIfModifiedSince(), requestConditions.getIfUnmodifiedSince(), requestConditions.getIfMatch(), requestConditions.getIfNoneMatch(), requestConditions.getAppendPosition(), context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE)) .map(response -> new SimpleResponse<>(response, null)); } }
class AppendBlobAsyncClient extends BlobAsyncClientBase { private static final ClientLogger LOGGER = new ClientLogger(AppendBlobAsyncClient.class); /** * Indicates the maximum number of bytes that can be sent in a call to appendBlock. */ public static final int MAX_APPEND_BLOCK_BYTES = 4 * Constants.MB; /** * Indicates the maximum number of blocks allowed in an append blob. */ public static final int MAX_BLOCKS = 50000; /** * Package-private constructor for use by {@link SpecializedBlobClientBuilder}. * * @param pipeline The pipeline used to send and receive service requests. * @param url The endpoint where to send service requests. * @param serviceVersion The version of the service to receive requests. * @param accountName The storage account name. * @param containerName The container name. * @param blobName The blob name. * @param snapshot The snapshot identifier for the blob, pass {@code null} to interact with the blob directly. * @param customerProvidedKey Customer provided key used during encryption of the blob's data on the server, pass * {@code null} to allow the service to use its own encryption. * @param encryptionScope Encryption scope used during encryption of the blob's data on the server, pass * {@code null} to allow the service to use its own encryption. * @param versionId The version identifier for the blob, pass {@code null} to interact with the latest blob version. */ AppendBlobAsyncClient(HttpPipeline pipeline, String url, BlobServiceVersion serviceVersion, String accountName, String containerName, String blobName, String snapshot, CpkInfo customerProvidedKey, EncryptionScope encryptionScope, String versionId) { super(pipeline, url, serviceVersion, accountName, containerName, blobName, snapshot, customerProvidedKey, encryptionScope, versionId); } /** * Creates a new {@link AppendBlobAsyncClient} with the specified {@code encryptionScope}. * * @param encryptionScope the encryption scope for the blob, pass {@code null} to use no encryption scope. * @return a {@link AppendBlobAsyncClient} with the specified {@code encryptionScope}. */ @Override public AppendBlobAsyncClient getEncryptionScopeAsyncClient(String encryptionScope) { EncryptionScope finalEncryptionScope = null; if (encryptionScope != null) { finalEncryptionScope = new EncryptionScope().setEncryptionScope(encryptionScope); } return new AppendBlobAsyncClient(getHttpPipeline(), getAccountUrl(), getServiceVersion(), getAccountName(), getContainerName(), getBlobName(), getSnapshotId(), getCustomerProvidedKey(), finalEncryptionScope, getVersionId()); } /** * Creates a new {@link AppendBlobAsyncClient} with the specified {@code customerProvidedKey}. * * @param customerProvidedKey the {@link CustomerProvidedKey} for the blob, * pass {@code null} to use no customer provided key. * @return a {@link AppendBlobAsyncClient} with the specified {@code customerProvidedKey}. */ @Override public AppendBlobAsyncClient getCustomerProvidedKeyAsyncClient(CustomerProvidedKey customerProvidedKey) { CpkInfo finalCustomerProvidedKey = null; if (customerProvidedKey != null) { finalCustomerProvidedKey = new CpkInfo() .setEncryptionKey(customerProvidedKey.getKey()) .setEncryptionKeySha256(customerProvidedKey.getKeySha256()) .setEncryptionAlgorithm(customerProvidedKey.getEncryptionAlgorithm()); } return new AppendBlobAsyncClient(getHttpPipeline(), getAccountUrl(), getServiceVersion(), getAccountName(), getContainerName(), getBlobName(), getSnapshotId(), finalCustomerProvidedKey, encryptionScope, getVersionId()); } /** * Creates a 0-length append blob. Call appendBlock to append data to an append blob. By default this method will * not overwrite an existing blob. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.AppendBlobAsyncClient.create --> * <pre> * client.create& * System.out.printf& * </pre> * <!-- end com.azure.storage.blob.specialized.AppendBlobAsyncClient.create --> * * @return A {@link Mono} containing the information of the created appended blob. */ @ServiceMethod(returns = ReturnType.SINGLE) /** * Creates a 0-length append blob. Call appendBlock to append data to an append blob. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.AppendBlobAsyncClient.create * <pre> * boolean overwrite = false; & * client.create& * System.out.printf& * </pre> * <!-- end com.azure.storage.blob.specialized.AppendBlobAsyncClient.create * * @param overwrite Whether to overwrite, should data exist on the blob. * * @return A {@link Mono} containing the information of the created appended blob. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<AppendBlobItem> create(boolean overwrite) { BlobRequestConditions blobRequestConditions = new BlobRequestConditions(); if (!overwrite) { blobRequestConditions.setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD); } return createWithResponse(null, null, blobRequestConditions).flatMap(FluxUtil::toMono); } /** * Creates a 0-length append blob. Call appendBlock to append data to an append blob. * <p> * To avoid overwriting, pass "*" to {@link BlobRequestConditions * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.AppendBlobAsyncClient.createWithResponse * <pre> * BlobHttpHeaders headers = new BlobHttpHeaders& * .setContentType& * .setContentLanguage& * Map&lt;String, String&gt; metadata = Collections.singletonMap& * BlobRequestConditions requestConditions = new BlobRequestConditions& * .setIfUnmodifiedSince& * * client.createWithResponse& * System.out.printf& * </pre> * <!-- end com.azure.storage.blob.specialized.AppendBlobAsyncClient.createWithResponse * * @param headers {@link BlobHttpHeaders} * @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any * metadata key or value, it must be removed or encoded. * @param requestConditions {@link BlobRequestConditions} * @return A {@link Mono} containing {@link Response} whose {@link Response * appended blob. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<AppendBlobItem>> createWithResponse(BlobHttpHeaders headers, Map<String, String> metadata, BlobRequestConditions requestConditions) { return this.createWithResponse(new AppendBlobCreateOptions().setHeaders(headers).setMetadata(metadata) .setRequestConditions(requestConditions)); } /** * Creates a 0-length append blob. Call appendBlock to append data to an append blob. * <p> * To avoid overwriting, pass "*" to {@link BlobRequestConditions * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.AppendBlobAsyncClient.createWithResponse * <pre> * BlobHttpHeaders headers = new BlobHttpHeaders& * .setContentType& * .setContentLanguage& * Map&lt;String, String&gt; metadata = Collections.singletonMap& * Map&lt;String, String&gt; tags = Collections.singletonMap& * BlobRequestConditions requestConditions = new BlobRequestConditions& * .setIfUnmodifiedSince& * * client.createWithResponse& * .setTags& * System.out.printf& * </pre> * <!-- end com.azure.storage.blob.specialized.AppendBlobAsyncClient.createWithResponse * * @param options {@link AppendBlobCreateOptions} * @return A {@link Mono} containing {@link Response} whose {@link Response * appended blob. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<AppendBlobItem>> createWithResponse(AppendBlobCreateOptions options) { try { return withContext(context -> createWithResponse(options, context)); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } Mono<Response<AppendBlobItem>> createWithResponse(AppendBlobCreateOptions options, Context context) { options = (options == null) ? new AppendBlobCreateOptions() : options; BlobRequestConditions requestConditions = options.getRequestConditions(); requestConditions = (requestConditions == null) ? new BlobRequestConditions() : requestConditions; context = context == null ? Context.NONE : context; BlobImmutabilityPolicy immutabilityPolicy = options.getImmutabilityPolicy() == null ? new BlobImmutabilityPolicy() : options.getImmutabilityPolicy(); return this.azureBlobStorage.getAppendBlobs().createWithResponseAsync(containerName, blobName, 0, null, options.getMetadata(), requestConditions.getLeaseId(), requestConditions.getIfModifiedSince(), requestConditions.getIfUnmodifiedSince(), requestConditions.getIfMatch(), requestConditions.getIfNoneMatch(), requestConditions.getTagsConditions(), null, tagsToString(options.getTags()), immutabilityPolicy.getExpiryTime(), immutabilityPolicy.getPolicyMode(), options.hasLegalHold(), options.getHeaders(), getCustomerProvidedKey(), encryptionScope, context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE)) .map(rb -> { AppendBlobsCreateHeaders hd = rb.getDeserializedHeaders(); AppendBlobItem item = new AppendBlobItem(hd.getETag(), hd.getLastModified(), hd.getContentMD5(), hd.isXMsRequestServerEncrypted(), hd.getXMsEncryptionKeySha256(), hd.getXMsEncryptionScope(), null, null, hd.getXMsVersionId()); return new SimpleResponse<>(rb, item); }); } /** * Commits a new block of data to the end of the existing append blob. * <p> * Note that the data passed must be replayable if retries are enabled (the default). In other words, the * {@code Flux} must produce the same data each time it is subscribed to. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.AppendBlobAsyncClient.appendBlock * <pre> * client.appendBlock& * System.out.printf& * </pre> * <!-- end com.azure.storage.blob.specialized.AppendBlobAsyncClient.appendBlock * * @param data The data to write to the blob. Note that this {@code Flux} must be replayable if retries are enabled * (the default). In other words, the Flux must produce the same data each time it is subscribed to. * @param length The exact length of the data. It is important that this value match precisely the length of the * data emitted by the {@code Flux}. * @return {@link Mono} containing the information of the append blob operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<AppendBlobItem> appendBlock(Flux<ByteBuffer> data, long length) { return appendBlockWithResponse(data, length, null, null).flatMap(FluxUtil::toMono); } /** * Commits a new block of data to the end of the existing append blob. * <p> * Note that the data passed must be replayable if retries are enabled (the default). In other words, the * {@code Flux} must produce the same data each time it is subscribed to. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.AppendBlobAsyncClient.appendBlockWithResponse * <pre> * byte[] md5 = MessageDigest.getInstance& * AppendBlobRequestConditions requestConditions = new AppendBlobRequestConditions& * .setAppendPosition& * .setMaxSize& * * client.appendBlockWithResponse& * System.out.printf& * </pre> * <!-- end com.azure.storage.blob.specialized.AppendBlobAsyncClient.appendBlockWithResponse * * @param data The data to write to the blob. Note that this {@code Flux} must be replayable if retries are enabled * (the default). In other words, the Flux must produce the same data each time it is subscribed to. * @param length The exact length of the data. It is important that this value match precisely the length of the * data emitted by the {@code Flux}. * @param contentMd5 An MD5 hash of the block content. This hash is used to verify the integrity of the block during * transport. When this header is specified, the storage service compares the hash of the content that has arrived * with this header value. Note that this MD5 hash is not stored with the blob. If the two hashes do not match, the * operation will fail. * @param appendBlobRequestConditions {@link AppendBlobRequestConditions} * @return A {@link Mono} containing {@link Response} whose {@link Response * blob operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<AppendBlobItem>> appendBlockWithResponse(Flux<ByteBuffer> data, long length, byte[] contentMd5, AppendBlobRequestConditions appendBlobRequestConditions) { try { return withContext(context -> appendBlockWithResponse(data, length, contentMd5, appendBlobRequestConditions, context)); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } Mono<Response<AppendBlobItem>> appendBlockWithResponse(Flux<ByteBuffer> data, long length, byte[] contentMd5, AppendBlobRequestConditions appendBlobRequestConditions, Context context) { appendBlobRequestConditions = appendBlobRequestConditions == null ? new AppendBlobRequestConditions() : appendBlobRequestConditions; context = context == null ? Context.NONE : context; return this.azureBlobStorage.getAppendBlobs().appendBlockWithResponseAsync( containerName, blobName, length, data, null, contentMd5, null, appendBlobRequestConditions.getLeaseId(), appendBlobRequestConditions.getMaxSize(), appendBlobRequestConditions.getAppendPosition(), appendBlobRequestConditions.getIfModifiedSince(), appendBlobRequestConditions.getIfUnmodifiedSince(), appendBlobRequestConditions.getIfMatch(), appendBlobRequestConditions.getIfNoneMatch(), appendBlobRequestConditions.getTagsConditions(), null, getCustomerProvidedKey(), encryptionScope, context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE)) .map(rb -> { AppendBlobsAppendBlockHeaders hd = rb.getDeserializedHeaders(); AppendBlobItem item = new AppendBlobItem(hd.getETag(), hd.getLastModified(), hd.getContentMD5(), hd.isXMsRequestServerEncrypted(), hd.getXMsEncryptionKeySha256(), hd.getXMsEncryptionScope(), hd.getXMsBlobAppendOffset(), hd.getXMsBlobCommittedBlockCount()); return new SimpleResponse<>(rb, item); }); } /** * Commits a new block of data from another blob to the end of this append blob. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.AppendBlobAsyncClient.appendBlockFromUrl * <pre> * client.appendBlockFromUrl& * System.out.printf& * </pre> * <!-- end com.azure.storage.blob.specialized.AppendBlobAsyncClient.appendBlockFromUrl * * @param sourceUrl The url to the blob that will be the source of the copy. A source blob in the same storage * account can be authenticated via Shared Key. However, if the source is a blob in another account, the source blob * must either be public or must be authenticated via a shared access signature. If the source blob is public, no * authentication is required to perform the operation. * @param sourceRange The source {@link BlobRange} to copy. * @return {@link Mono} containing the information of the append blob operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<AppendBlobItem> appendBlockFromUrl(String sourceUrl, BlobRange sourceRange) { return appendBlockFromUrlWithResponse(sourceUrl, sourceRange, null, null, null).flatMap(FluxUtil::toMono); } /** * Commits a new block of data from another blob to the end of this append blob. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.AppendBlobAsyncClient.appendBlockFromUrlWithResponse * <pre> * AppendBlobRequestConditions appendBlobRequestConditions = new AppendBlobRequestConditions& * .setAppendPosition& * .setMaxSize& * * BlobRequestConditions modifiedRequestConditions = new BlobRequestConditions& * .setIfUnmodifiedSince& * * client.appendBlockFromUrlWithResponse& * appendBlobRequestConditions, modifiedRequestConditions& * System.out.printf& * </pre> * <!-- end com.azure.storage.blob.specialized.AppendBlobAsyncClient.appendBlockFromUrlWithResponse * * @param sourceUrl The url to the blob that will be the source of the copy. A source blob in the same storage * account can be authenticated via Shared Key. However, if the source is a blob in another account, the source blob * must either be public or must be authenticated via a shared access signature. If the source blob is public, no * authentication is required to perform the operation. * @param sourceRange {@link BlobRange} * @param sourceContentMD5 An MD5 hash of the block content from the source blob. If specified, the service will * calculate the MD5 of the received data and fail the request if it does not match the provided MD5. * @param destRequestConditions {@link AppendBlobRequestConditions} * @param sourceRequestConditions {@link BlobRequestConditions} * @return A {@link Mono} containing {@link Response} whose {@link Response * blob operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<AppendBlobItem>> appendBlockFromUrlWithResponse(String sourceUrl, BlobRange sourceRange, byte[] sourceContentMD5, AppendBlobRequestConditions destRequestConditions, BlobRequestConditions sourceRequestConditions) { return appendBlockFromUrlWithResponse(new AppendBlobAppendBlockFromUrlOptions(sourceUrl) .setSourceRange(sourceRange).setSourceContentMd5(sourceContentMD5) .setDestinationRequestConditions(destRequestConditions) .setSourceRequestConditions(sourceRequestConditions)); } /** * Commits a new block of data from another blob to the end of this append blob. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.AppendBlobAsyncClient.appendBlockFromUrlWithResponse * <pre> * AppendBlobRequestConditions appendBlobRequestConditions = new AppendBlobRequestConditions& * .setAppendPosition& * .setMaxSize& * * BlobRequestConditions modifiedRequestConditions = new BlobRequestConditions& * .setIfUnmodifiedSince& * * client.appendBlockFromUrlWithResponse& * .setSourceRange& * .setDestinationRequestConditions& * .setSourceRequestConditions& * System.out.printf& * </pre> * <!-- end com.azure.storage.blob.specialized.AppendBlobAsyncClient.appendBlockFromUrlWithResponse * * @param options Parameters for the operation. * @return A {@link Mono} containing {@link Response} whose {@link Response * blob operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<AppendBlobItem>> appendBlockFromUrlWithResponse(AppendBlobAppendBlockFromUrlOptions options) { try { return withContext(context -> appendBlockFromUrlWithResponse(options, context)); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } Mono<Response<AppendBlobItem>> appendBlockFromUrlWithResponse(AppendBlobAppendBlockFromUrlOptions options, Context context) { BlobRange sourceRange = (options.getSourceRange() == null) ? new BlobRange(0) : options.getSourceRange(); AppendBlobRequestConditions destRequestConditions = (options.getDestinationRequestConditions() == null) ? new AppendBlobRequestConditions() : options.getDestinationRequestConditions(); RequestConditions sourceRequestConditions = (options.getSourceRequestConditions() == null) ? new RequestConditions() : options.getSourceRequestConditions(); try { new URL(options.getSourceUrl()); } catch (MalformedURLException ex) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("'sourceUrl' is not a valid url.")); } context = context == null ? Context.NONE : context; String sourceAuth = options.getSourceAuthorization() == null ? null : options.getSourceAuthorization().toString(); return this.azureBlobStorage.getAppendBlobs().appendBlockFromUrlWithResponseAsync(containerName, blobName, options.getSourceUrl(), 0, sourceRange.toString(), options.getSourceContentMd5(), null, null, null, destRequestConditions.getLeaseId(), destRequestConditions.getMaxSize(), destRequestConditions.getAppendPosition(), destRequestConditions.getIfModifiedSince(), destRequestConditions.getIfUnmodifiedSince(), destRequestConditions.getIfMatch(), destRequestConditions.getIfNoneMatch(), destRequestConditions.getTagsConditions(), sourceRequestConditions.getIfModifiedSince(), sourceRequestConditions.getIfUnmodifiedSince(), sourceRequestConditions.getIfMatch(), sourceRequestConditions.getIfNoneMatch(), null, sourceAuth, getCustomerProvidedKey(), encryptionScope, context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE)) .map(rb -> { AppendBlobsAppendBlockFromUrlHeaders hd = rb.getDeserializedHeaders(); AppendBlobItem item = new AppendBlobItem(hd.getETag(), hd.getLastModified(), hd.getContentMD5(), hd.isXMsRequestServerEncrypted(), hd.getXMsEncryptionKeySha256(), hd.getXMsEncryptionScope(), hd.getXMsBlobAppendOffset(), hd.getXMsBlobCommittedBlockCount()); return new SimpleResponse<>(rb, item); }); } /** * Seals an append blob, making it read only. Any subsequent appends will fail. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.AppendBlobAsyncClient.seal --> * <pre> * client.seal& * </pre> * <!-- end com.azure.storage.blob.specialized.AppendBlobAsyncClient.seal --> * * @return A reactive response signalling completion. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> seal() { return sealWithResponse(new AppendBlobSealOptions()).flatMap(FluxUtil::toMono); } /** * Seals an append blob, making it read only. Any subsequent appends will fail. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.AppendBlobAsyncClient.sealWithResponse * <pre> * AppendBlobRequestConditions requestConditions = new AppendBlobRequestConditions& * .setIfUnmodifiedSince& * * client.sealWithResponse& * .subscribe& * </pre> * <!-- end com.azure.storage.blob.specialized.AppendBlobAsyncClient.sealWithResponse * * @param options {@link AppendBlobSealOptions} * @return A reactive response signalling completion. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> sealWithResponse(AppendBlobSealOptions options) { try { return withContext(context -> sealWithResponse(options, context)); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } Mono<Response<Void>> sealWithResponse(AppendBlobSealOptions options, Context context) { options = (options == null) ? new AppendBlobSealOptions() : options; AppendBlobRequestConditions requestConditions = options.getRequestConditions(); requestConditions = (requestConditions == null) ? new AppendBlobRequestConditions() : requestConditions; context = context == null ? Context.NONE : context; return this.azureBlobStorage.getAppendBlobs().sealWithResponseAsync(containerName, blobName, null, null, requestConditions.getLeaseId(), requestConditions.getIfModifiedSince(), requestConditions.getIfUnmodifiedSince(), requestConditions.getIfMatch(), requestConditions.getIfNoneMatch(), requestConditions.getAppendPosition(), context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE)) .map(response -> new SimpleResponse<>(response, null)); } }
Places that had try/catch removed called into other APIs that already handled it for them. The places that weren't maximal overload that continue to have it generally create option bags that may throw in the constructor.
public Mono<AppendBlobItem> create() { return create(false); }
}
public Mono<AppendBlobItem> create() { return create(false); }
class AppendBlobAsyncClient extends BlobAsyncClientBase { private static final ClientLogger LOGGER = new ClientLogger(AppendBlobAsyncClient.class); /** * Indicates the maximum number of bytes that can be sent in a call to appendBlock. */ public static final int MAX_APPEND_BLOCK_BYTES = 4 * Constants.MB; /** * Indicates the maximum number of blocks allowed in an append blob. */ public static final int MAX_BLOCKS = 50000; /** * Package-private constructor for use by {@link SpecializedBlobClientBuilder}. * * @param pipeline The pipeline used to send and receive service requests. * @param url The endpoint where to send service requests. * @param serviceVersion The version of the service to receive requests. * @param accountName The storage account name. * @param containerName The container name. * @param blobName The blob name. * @param snapshot The snapshot identifier for the blob, pass {@code null} to interact with the blob directly. * @param customerProvidedKey Customer provided key used during encryption of the blob's data on the server, pass * {@code null} to allow the service to use its own encryption. * @param encryptionScope Encryption scope used during encryption of the blob's data on the server, pass * {@code null} to allow the service to use its own encryption. * @param versionId The version identifier for the blob, pass {@code null} to interact with the latest blob version. */ AppendBlobAsyncClient(HttpPipeline pipeline, String url, BlobServiceVersion serviceVersion, String accountName, String containerName, String blobName, String snapshot, CpkInfo customerProvidedKey, EncryptionScope encryptionScope, String versionId) { super(pipeline, url, serviceVersion, accountName, containerName, blobName, snapshot, customerProvidedKey, encryptionScope, versionId); } /** * Creates a new {@link AppendBlobAsyncClient} with the specified {@code encryptionScope}. * * @param encryptionScope the encryption scope for the blob, pass {@code null} to use no encryption scope. * @return a {@link AppendBlobAsyncClient} with the specified {@code encryptionScope}. */ @Override public AppendBlobAsyncClient getEncryptionScopeAsyncClient(String encryptionScope) { EncryptionScope finalEncryptionScope = null; if (encryptionScope != null) { finalEncryptionScope = new EncryptionScope().setEncryptionScope(encryptionScope); } return new AppendBlobAsyncClient(getHttpPipeline(), getAccountUrl(), getServiceVersion(), getAccountName(), getContainerName(), getBlobName(), getSnapshotId(), getCustomerProvidedKey(), finalEncryptionScope, getVersionId()); } /** * Creates a new {@link AppendBlobAsyncClient} with the specified {@code customerProvidedKey}. * * @param customerProvidedKey the {@link CustomerProvidedKey} for the blob, * pass {@code null} to use no customer provided key. * @return a {@link AppendBlobAsyncClient} with the specified {@code customerProvidedKey}. */ @Override public AppendBlobAsyncClient getCustomerProvidedKeyAsyncClient(CustomerProvidedKey customerProvidedKey) { CpkInfo finalCustomerProvidedKey = null; if (customerProvidedKey != null) { finalCustomerProvidedKey = new CpkInfo() .setEncryptionKey(customerProvidedKey.getKey()) .setEncryptionKeySha256(customerProvidedKey.getKeySha256()) .setEncryptionAlgorithm(customerProvidedKey.getEncryptionAlgorithm()); } return new AppendBlobAsyncClient(getHttpPipeline(), getAccountUrl(), getServiceVersion(), getAccountName(), getContainerName(), getBlobName(), getSnapshotId(), finalCustomerProvidedKey, encryptionScope, getVersionId()); } /** * Creates a 0-length append blob. Call appendBlock to append data to an append blob. By default this method will * not overwrite an existing blob. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.AppendBlobAsyncClient.create --> * <pre> * client.create& * System.out.printf& * </pre> * <!-- end com.azure.storage.blob.specialized.AppendBlobAsyncClient.create --> * * @return A {@link Mono} containing the information of the created appended blob. */ @ServiceMethod(returns = ReturnType.SINGLE) /** * Creates a 0-length append blob. Call appendBlock to append data to an append blob. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.AppendBlobAsyncClient.create * <pre> * boolean overwrite = false; & * client.create& * System.out.printf& * </pre> * <!-- end com.azure.storage.blob.specialized.AppendBlobAsyncClient.create * * @param overwrite Whether to overwrite, should data exist on the blob. * * @return A {@link Mono} containing the information of the created appended blob. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<AppendBlobItem> create(boolean overwrite) { BlobRequestConditions blobRequestConditions = new BlobRequestConditions(); if (!overwrite) { blobRequestConditions.setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD); } return createWithResponse(null, null, blobRequestConditions).flatMap(FluxUtil::toMono); } /** * Creates a 0-length append blob. Call appendBlock to append data to an append blob. * <p> * To avoid overwriting, pass "*" to {@link BlobRequestConditions * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.AppendBlobAsyncClient.createWithResponse * <pre> * BlobHttpHeaders headers = new BlobHttpHeaders& * .setContentType& * .setContentLanguage& * Map&lt;String, String&gt; metadata = Collections.singletonMap& * BlobRequestConditions requestConditions = new BlobRequestConditions& * .setIfUnmodifiedSince& * * client.createWithResponse& * System.out.printf& * </pre> * <!-- end com.azure.storage.blob.specialized.AppendBlobAsyncClient.createWithResponse * * @param headers {@link BlobHttpHeaders} * @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any * metadata key or value, it must be removed or encoded. * @param requestConditions {@link BlobRequestConditions} * @return A {@link Mono} containing {@link Response} whose {@link Response * appended blob. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<AppendBlobItem>> createWithResponse(BlobHttpHeaders headers, Map<String, String> metadata, BlobRequestConditions requestConditions) { return this.createWithResponse(new AppendBlobCreateOptions().setHeaders(headers).setMetadata(metadata) .setRequestConditions(requestConditions)); } /** * Creates a 0-length append blob. Call appendBlock to append data to an append blob. * <p> * To avoid overwriting, pass "*" to {@link BlobRequestConditions * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.AppendBlobAsyncClient.createWithResponse * <pre> * BlobHttpHeaders headers = new BlobHttpHeaders& * .setContentType& * .setContentLanguage& * Map&lt;String, String&gt; metadata = Collections.singletonMap& * Map&lt;String, String&gt; tags = Collections.singletonMap& * BlobRequestConditions requestConditions = new BlobRequestConditions& * .setIfUnmodifiedSince& * * client.createWithResponse& * .setTags& * System.out.printf& * </pre> * <!-- end com.azure.storage.blob.specialized.AppendBlobAsyncClient.createWithResponse * * @param options {@link AppendBlobCreateOptions} * @return A {@link Mono} containing {@link Response} whose {@link Response * appended blob. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<AppendBlobItem>> createWithResponse(AppendBlobCreateOptions options) { try { return withContext(context -> createWithResponse(options, context)); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } Mono<Response<AppendBlobItem>> createWithResponse(AppendBlobCreateOptions options, Context context) { options = (options == null) ? new AppendBlobCreateOptions() : options; BlobRequestConditions requestConditions = options.getRequestConditions(); requestConditions = (requestConditions == null) ? new BlobRequestConditions() : requestConditions; context = context == null ? Context.NONE : context; BlobImmutabilityPolicy immutabilityPolicy = options.getImmutabilityPolicy() == null ? new BlobImmutabilityPolicy() : options.getImmutabilityPolicy(); return this.azureBlobStorage.getAppendBlobs().createWithResponseAsync(containerName, blobName, 0, null, options.getMetadata(), requestConditions.getLeaseId(), requestConditions.getIfModifiedSince(), requestConditions.getIfUnmodifiedSince(), requestConditions.getIfMatch(), requestConditions.getIfNoneMatch(), requestConditions.getTagsConditions(), null, tagsToString(options.getTags()), immutabilityPolicy.getExpiryTime(), immutabilityPolicy.getPolicyMode(), options.hasLegalHold(), options.getHeaders(), getCustomerProvidedKey(), encryptionScope, context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE)) .map(rb -> { AppendBlobsCreateHeaders hd = rb.getDeserializedHeaders(); AppendBlobItem item = new AppendBlobItem(hd.getETag(), hd.getLastModified(), hd.getContentMD5(), hd.isXMsRequestServerEncrypted(), hd.getXMsEncryptionKeySha256(), hd.getXMsEncryptionScope(), null, null, hd.getXMsVersionId()); return new SimpleResponse<>(rb, item); }); } /** * Commits a new block of data to the end of the existing append blob. * <p> * Note that the data passed must be replayable if retries are enabled (the default). In other words, the * {@code Flux} must produce the same data each time it is subscribed to. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.AppendBlobAsyncClient.appendBlock * <pre> * client.appendBlock& * System.out.printf& * </pre> * <!-- end com.azure.storage.blob.specialized.AppendBlobAsyncClient.appendBlock * * @param data The data to write to the blob. Note that this {@code Flux} must be replayable if retries are enabled * (the default). In other words, the Flux must produce the same data each time it is subscribed to. * @param length The exact length of the data. It is important that this value match precisely the length of the * data emitted by the {@code Flux}. * @return {@link Mono} containing the information of the append blob operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<AppendBlobItem> appendBlock(Flux<ByteBuffer> data, long length) { return appendBlockWithResponse(data, length, null, null).flatMap(FluxUtil::toMono); } /** * Commits a new block of data to the end of the existing append blob. * <p> * Note that the data passed must be replayable if retries are enabled (the default). In other words, the * {@code Flux} must produce the same data each time it is subscribed to. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.AppendBlobAsyncClient.appendBlockWithResponse * <pre> * byte[] md5 = MessageDigest.getInstance& * AppendBlobRequestConditions requestConditions = new AppendBlobRequestConditions& * .setAppendPosition& * .setMaxSize& * * client.appendBlockWithResponse& * System.out.printf& * </pre> * <!-- end com.azure.storage.blob.specialized.AppendBlobAsyncClient.appendBlockWithResponse * * @param data The data to write to the blob. Note that this {@code Flux} must be replayable if retries are enabled * (the default). In other words, the Flux must produce the same data each time it is subscribed to. * @param length The exact length of the data. It is important that this value match precisely the length of the * data emitted by the {@code Flux}. * @param contentMd5 An MD5 hash of the block content. This hash is used to verify the integrity of the block during * transport. When this header is specified, the storage service compares the hash of the content that has arrived * with this header value. Note that this MD5 hash is not stored with the blob. If the two hashes do not match, the * operation will fail. * @param appendBlobRequestConditions {@link AppendBlobRequestConditions} * @return A {@link Mono} containing {@link Response} whose {@link Response * blob operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<AppendBlobItem>> appendBlockWithResponse(Flux<ByteBuffer> data, long length, byte[] contentMd5, AppendBlobRequestConditions appendBlobRequestConditions) { try { return withContext(context -> appendBlockWithResponse(data, length, contentMd5, appendBlobRequestConditions, context)); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } Mono<Response<AppendBlobItem>> appendBlockWithResponse(Flux<ByteBuffer> data, long length, byte[] contentMd5, AppendBlobRequestConditions appendBlobRequestConditions, Context context) { appendBlobRequestConditions = appendBlobRequestConditions == null ? new AppendBlobRequestConditions() : appendBlobRequestConditions; context = context == null ? Context.NONE : context; return this.azureBlobStorage.getAppendBlobs().appendBlockWithResponseAsync( containerName, blobName, length, data, null, contentMd5, null, appendBlobRequestConditions.getLeaseId(), appendBlobRequestConditions.getMaxSize(), appendBlobRequestConditions.getAppendPosition(), appendBlobRequestConditions.getIfModifiedSince(), appendBlobRequestConditions.getIfUnmodifiedSince(), appendBlobRequestConditions.getIfMatch(), appendBlobRequestConditions.getIfNoneMatch(), appendBlobRequestConditions.getTagsConditions(), null, getCustomerProvidedKey(), encryptionScope, context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE)) .map(rb -> { AppendBlobsAppendBlockHeaders hd = rb.getDeserializedHeaders(); AppendBlobItem item = new AppendBlobItem(hd.getETag(), hd.getLastModified(), hd.getContentMD5(), hd.isXMsRequestServerEncrypted(), hd.getXMsEncryptionKeySha256(), hd.getXMsEncryptionScope(), hd.getXMsBlobAppendOffset(), hd.getXMsBlobCommittedBlockCount()); return new SimpleResponse<>(rb, item); }); } /** * Commits a new block of data from another blob to the end of this append blob. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.AppendBlobAsyncClient.appendBlockFromUrl * <pre> * client.appendBlockFromUrl& * System.out.printf& * </pre> * <!-- end com.azure.storage.blob.specialized.AppendBlobAsyncClient.appendBlockFromUrl * * @param sourceUrl The url to the blob that will be the source of the copy. A source blob in the same storage * account can be authenticated via Shared Key. However, if the source is a blob in another account, the source blob * must either be public or must be authenticated via a shared access signature. If the source blob is public, no * authentication is required to perform the operation. * @param sourceRange The source {@link BlobRange} to copy. * @return {@link Mono} containing the information of the append blob operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<AppendBlobItem> appendBlockFromUrl(String sourceUrl, BlobRange sourceRange) { return appendBlockFromUrlWithResponse(sourceUrl, sourceRange, null, null, null).flatMap(FluxUtil::toMono); } /** * Commits a new block of data from another blob to the end of this append blob. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.AppendBlobAsyncClient.appendBlockFromUrlWithResponse * <pre> * AppendBlobRequestConditions appendBlobRequestConditions = new AppendBlobRequestConditions& * .setAppendPosition& * .setMaxSize& * * BlobRequestConditions modifiedRequestConditions = new BlobRequestConditions& * .setIfUnmodifiedSince& * * client.appendBlockFromUrlWithResponse& * appendBlobRequestConditions, modifiedRequestConditions& * System.out.printf& * </pre> * <!-- end com.azure.storage.blob.specialized.AppendBlobAsyncClient.appendBlockFromUrlWithResponse * * @param sourceUrl The url to the blob that will be the source of the copy. A source blob in the same storage * account can be authenticated via Shared Key. However, if the source is a blob in another account, the source blob * must either be public or must be authenticated via a shared access signature. If the source blob is public, no * authentication is required to perform the operation. * @param sourceRange {@link BlobRange} * @param sourceContentMD5 An MD5 hash of the block content from the source blob. If specified, the service will * calculate the MD5 of the received data and fail the request if it does not match the provided MD5. * @param destRequestConditions {@link AppendBlobRequestConditions} * @param sourceRequestConditions {@link BlobRequestConditions} * @return A {@link Mono} containing {@link Response} whose {@link Response * blob operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<AppendBlobItem>> appendBlockFromUrlWithResponse(String sourceUrl, BlobRange sourceRange, byte[] sourceContentMD5, AppendBlobRequestConditions destRequestConditions, BlobRequestConditions sourceRequestConditions) { return appendBlockFromUrlWithResponse(new AppendBlobAppendBlockFromUrlOptions(sourceUrl) .setSourceRange(sourceRange).setSourceContentMd5(sourceContentMD5) .setDestinationRequestConditions(destRequestConditions) .setSourceRequestConditions(sourceRequestConditions)); } /** * Commits a new block of data from another blob to the end of this append blob. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.AppendBlobAsyncClient.appendBlockFromUrlWithResponse * <pre> * AppendBlobRequestConditions appendBlobRequestConditions = new AppendBlobRequestConditions& * .setAppendPosition& * .setMaxSize& * * BlobRequestConditions modifiedRequestConditions = new BlobRequestConditions& * .setIfUnmodifiedSince& * * client.appendBlockFromUrlWithResponse& * .setSourceRange& * .setDestinationRequestConditions& * .setSourceRequestConditions& * System.out.printf& * </pre> * <!-- end com.azure.storage.blob.specialized.AppendBlobAsyncClient.appendBlockFromUrlWithResponse * * @param options Parameters for the operation. * @return A {@link Mono} containing {@link Response} whose {@link Response * blob operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<AppendBlobItem>> appendBlockFromUrlWithResponse(AppendBlobAppendBlockFromUrlOptions options) { try { return withContext(context -> appendBlockFromUrlWithResponse(options, context)); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } Mono<Response<AppendBlobItem>> appendBlockFromUrlWithResponse(AppendBlobAppendBlockFromUrlOptions options, Context context) { BlobRange sourceRange = (options.getSourceRange() == null) ? new BlobRange(0) : options.getSourceRange(); AppendBlobRequestConditions destRequestConditions = (options.getDestinationRequestConditions() == null) ? new AppendBlobRequestConditions() : options.getDestinationRequestConditions(); RequestConditions sourceRequestConditions = (options.getSourceRequestConditions() == null) ? new RequestConditions() : options.getSourceRequestConditions(); try { new URL(options.getSourceUrl()); } catch (MalformedURLException ex) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("'sourceUrl' is not a valid url.")); } context = context == null ? Context.NONE : context; String sourceAuth = options.getSourceAuthorization() == null ? null : options.getSourceAuthorization().toString(); return this.azureBlobStorage.getAppendBlobs().appendBlockFromUrlWithResponseAsync(containerName, blobName, options.getSourceUrl(), 0, sourceRange.toString(), options.getSourceContentMd5(), null, null, null, destRequestConditions.getLeaseId(), destRequestConditions.getMaxSize(), destRequestConditions.getAppendPosition(), destRequestConditions.getIfModifiedSince(), destRequestConditions.getIfUnmodifiedSince(), destRequestConditions.getIfMatch(), destRequestConditions.getIfNoneMatch(), destRequestConditions.getTagsConditions(), sourceRequestConditions.getIfModifiedSince(), sourceRequestConditions.getIfUnmodifiedSince(), sourceRequestConditions.getIfMatch(), sourceRequestConditions.getIfNoneMatch(), null, sourceAuth, getCustomerProvidedKey(), encryptionScope, context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE)) .map(rb -> { AppendBlobsAppendBlockFromUrlHeaders hd = rb.getDeserializedHeaders(); AppendBlobItem item = new AppendBlobItem(hd.getETag(), hd.getLastModified(), hd.getContentMD5(), hd.isXMsRequestServerEncrypted(), hd.getXMsEncryptionKeySha256(), hd.getXMsEncryptionScope(), hd.getXMsBlobAppendOffset(), hd.getXMsBlobCommittedBlockCount()); return new SimpleResponse<>(rb, item); }); } /** * Seals an append blob, making it read only. Any subsequent appends will fail. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.AppendBlobAsyncClient.seal --> * <pre> * client.seal& * </pre> * <!-- end com.azure.storage.blob.specialized.AppendBlobAsyncClient.seal --> * * @return A reactive response signalling completion. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> seal() { return sealWithResponse(new AppendBlobSealOptions()).flatMap(FluxUtil::toMono); } /** * Seals an append blob, making it read only. Any subsequent appends will fail. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.AppendBlobAsyncClient.sealWithResponse * <pre> * AppendBlobRequestConditions requestConditions = new AppendBlobRequestConditions& * .setIfUnmodifiedSince& * * client.sealWithResponse& * .subscribe& * </pre> * <!-- end com.azure.storage.blob.specialized.AppendBlobAsyncClient.sealWithResponse * * @param options {@link AppendBlobSealOptions} * @return A reactive response signalling completion. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> sealWithResponse(AppendBlobSealOptions options) { try { return withContext(context -> sealWithResponse(options, context)); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } Mono<Response<Void>> sealWithResponse(AppendBlobSealOptions options, Context context) { options = (options == null) ? new AppendBlobSealOptions() : options; AppendBlobRequestConditions requestConditions = options.getRequestConditions(); requestConditions = (requestConditions == null) ? new AppendBlobRequestConditions() : requestConditions; context = context == null ? Context.NONE : context; return this.azureBlobStorage.getAppendBlobs().sealWithResponseAsync(containerName, blobName, null, null, requestConditions.getLeaseId(), requestConditions.getIfModifiedSince(), requestConditions.getIfUnmodifiedSince(), requestConditions.getIfMatch(), requestConditions.getIfNoneMatch(), requestConditions.getAppendPosition(), context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE)) .map(response -> new SimpleResponse<>(response, null)); } }
class AppendBlobAsyncClient extends BlobAsyncClientBase { private static final ClientLogger LOGGER = new ClientLogger(AppendBlobAsyncClient.class); /** * Indicates the maximum number of bytes that can be sent in a call to appendBlock. */ public static final int MAX_APPEND_BLOCK_BYTES = 4 * Constants.MB; /** * Indicates the maximum number of blocks allowed in an append blob. */ public static final int MAX_BLOCKS = 50000; /** * Package-private constructor for use by {@link SpecializedBlobClientBuilder}. * * @param pipeline The pipeline used to send and receive service requests. * @param url The endpoint where to send service requests. * @param serviceVersion The version of the service to receive requests. * @param accountName The storage account name. * @param containerName The container name. * @param blobName The blob name. * @param snapshot The snapshot identifier for the blob, pass {@code null} to interact with the blob directly. * @param customerProvidedKey Customer provided key used during encryption of the blob's data on the server, pass * {@code null} to allow the service to use its own encryption. * @param encryptionScope Encryption scope used during encryption of the blob's data on the server, pass * {@code null} to allow the service to use its own encryption. * @param versionId The version identifier for the blob, pass {@code null} to interact with the latest blob version. */ AppendBlobAsyncClient(HttpPipeline pipeline, String url, BlobServiceVersion serviceVersion, String accountName, String containerName, String blobName, String snapshot, CpkInfo customerProvidedKey, EncryptionScope encryptionScope, String versionId) { super(pipeline, url, serviceVersion, accountName, containerName, blobName, snapshot, customerProvidedKey, encryptionScope, versionId); } /** * Creates a new {@link AppendBlobAsyncClient} with the specified {@code encryptionScope}. * * @param encryptionScope the encryption scope for the blob, pass {@code null} to use no encryption scope. * @return a {@link AppendBlobAsyncClient} with the specified {@code encryptionScope}. */ @Override public AppendBlobAsyncClient getEncryptionScopeAsyncClient(String encryptionScope) { EncryptionScope finalEncryptionScope = null; if (encryptionScope != null) { finalEncryptionScope = new EncryptionScope().setEncryptionScope(encryptionScope); } return new AppendBlobAsyncClient(getHttpPipeline(), getAccountUrl(), getServiceVersion(), getAccountName(), getContainerName(), getBlobName(), getSnapshotId(), getCustomerProvidedKey(), finalEncryptionScope, getVersionId()); } /** * Creates a new {@link AppendBlobAsyncClient} with the specified {@code customerProvidedKey}. * * @param customerProvidedKey the {@link CustomerProvidedKey} for the blob, * pass {@code null} to use no customer provided key. * @return a {@link AppendBlobAsyncClient} with the specified {@code customerProvidedKey}. */ @Override public AppendBlobAsyncClient getCustomerProvidedKeyAsyncClient(CustomerProvidedKey customerProvidedKey) { CpkInfo finalCustomerProvidedKey = null; if (customerProvidedKey != null) { finalCustomerProvidedKey = new CpkInfo() .setEncryptionKey(customerProvidedKey.getKey()) .setEncryptionKeySha256(customerProvidedKey.getKeySha256()) .setEncryptionAlgorithm(customerProvidedKey.getEncryptionAlgorithm()); } return new AppendBlobAsyncClient(getHttpPipeline(), getAccountUrl(), getServiceVersion(), getAccountName(), getContainerName(), getBlobName(), getSnapshotId(), finalCustomerProvidedKey, encryptionScope, getVersionId()); } /** * Creates a 0-length append blob. Call appendBlock to append data to an append blob. By default this method will * not overwrite an existing blob. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.AppendBlobAsyncClient.create --> * <pre> * client.create& * System.out.printf& * </pre> * <!-- end com.azure.storage.blob.specialized.AppendBlobAsyncClient.create --> * * @return A {@link Mono} containing the information of the created appended blob. */ @ServiceMethod(returns = ReturnType.SINGLE) /** * Creates a 0-length append blob. Call appendBlock to append data to an append blob. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.AppendBlobAsyncClient.create * <pre> * boolean overwrite = false; & * client.create& * System.out.printf& * </pre> * <!-- end com.azure.storage.blob.specialized.AppendBlobAsyncClient.create * * @param overwrite Whether to overwrite, should data exist on the blob. * * @return A {@link Mono} containing the information of the created appended blob. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<AppendBlobItem> create(boolean overwrite) { BlobRequestConditions blobRequestConditions = new BlobRequestConditions(); if (!overwrite) { blobRequestConditions.setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD); } return createWithResponse(null, null, blobRequestConditions).flatMap(FluxUtil::toMono); } /** * Creates a 0-length append blob. Call appendBlock to append data to an append blob. * <p> * To avoid overwriting, pass "*" to {@link BlobRequestConditions * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.AppendBlobAsyncClient.createWithResponse * <pre> * BlobHttpHeaders headers = new BlobHttpHeaders& * .setContentType& * .setContentLanguage& * Map&lt;String, String&gt; metadata = Collections.singletonMap& * BlobRequestConditions requestConditions = new BlobRequestConditions& * .setIfUnmodifiedSince& * * client.createWithResponse& * System.out.printf& * </pre> * <!-- end com.azure.storage.blob.specialized.AppendBlobAsyncClient.createWithResponse * * @param headers {@link BlobHttpHeaders} * @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any * metadata key or value, it must be removed or encoded. * @param requestConditions {@link BlobRequestConditions} * @return A {@link Mono} containing {@link Response} whose {@link Response * appended blob. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<AppendBlobItem>> createWithResponse(BlobHttpHeaders headers, Map<String, String> metadata, BlobRequestConditions requestConditions) { return this.createWithResponse(new AppendBlobCreateOptions().setHeaders(headers).setMetadata(metadata) .setRequestConditions(requestConditions)); } /** * Creates a 0-length append blob. Call appendBlock to append data to an append blob. * <p> * To avoid overwriting, pass "*" to {@link BlobRequestConditions * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.AppendBlobAsyncClient.createWithResponse * <pre> * BlobHttpHeaders headers = new BlobHttpHeaders& * .setContentType& * .setContentLanguage& * Map&lt;String, String&gt; metadata = Collections.singletonMap& * Map&lt;String, String&gt; tags = Collections.singletonMap& * BlobRequestConditions requestConditions = new BlobRequestConditions& * .setIfUnmodifiedSince& * * client.createWithResponse& * .setTags& * System.out.printf& * </pre> * <!-- end com.azure.storage.blob.specialized.AppendBlobAsyncClient.createWithResponse * * @param options {@link AppendBlobCreateOptions} * @return A {@link Mono} containing {@link Response} whose {@link Response * appended blob. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<AppendBlobItem>> createWithResponse(AppendBlobCreateOptions options) { try { return withContext(context -> createWithResponse(options, context)); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } Mono<Response<AppendBlobItem>> createWithResponse(AppendBlobCreateOptions options, Context context) { options = (options == null) ? new AppendBlobCreateOptions() : options; BlobRequestConditions requestConditions = options.getRequestConditions(); requestConditions = (requestConditions == null) ? new BlobRequestConditions() : requestConditions; context = context == null ? Context.NONE : context; BlobImmutabilityPolicy immutabilityPolicy = options.getImmutabilityPolicy() == null ? new BlobImmutabilityPolicy() : options.getImmutabilityPolicy(); return this.azureBlobStorage.getAppendBlobs().createWithResponseAsync(containerName, blobName, 0, null, options.getMetadata(), requestConditions.getLeaseId(), requestConditions.getIfModifiedSince(), requestConditions.getIfUnmodifiedSince(), requestConditions.getIfMatch(), requestConditions.getIfNoneMatch(), requestConditions.getTagsConditions(), null, tagsToString(options.getTags()), immutabilityPolicy.getExpiryTime(), immutabilityPolicy.getPolicyMode(), options.hasLegalHold(), options.getHeaders(), getCustomerProvidedKey(), encryptionScope, context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE)) .map(rb -> { AppendBlobsCreateHeaders hd = rb.getDeserializedHeaders(); AppendBlobItem item = new AppendBlobItem(hd.getETag(), hd.getLastModified(), hd.getContentMD5(), hd.isXMsRequestServerEncrypted(), hd.getXMsEncryptionKeySha256(), hd.getXMsEncryptionScope(), null, null, hd.getXMsVersionId()); return new SimpleResponse<>(rb, item); }); } /** * Commits a new block of data to the end of the existing append blob. * <p> * Note that the data passed must be replayable if retries are enabled (the default). In other words, the * {@code Flux} must produce the same data each time it is subscribed to. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.AppendBlobAsyncClient.appendBlock * <pre> * client.appendBlock& * System.out.printf& * </pre> * <!-- end com.azure.storage.blob.specialized.AppendBlobAsyncClient.appendBlock * * @param data The data to write to the blob. Note that this {@code Flux} must be replayable if retries are enabled * (the default). In other words, the Flux must produce the same data each time it is subscribed to. * @param length The exact length of the data. It is important that this value match precisely the length of the * data emitted by the {@code Flux}. * @return {@link Mono} containing the information of the append blob operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<AppendBlobItem> appendBlock(Flux<ByteBuffer> data, long length) { return appendBlockWithResponse(data, length, null, null).flatMap(FluxUtil::toMono); } /** * Commits a new block of data to the end of the existing append blob. * <p> * Note that the data passed must be replayable if retries are enabled (the default). In other words, the * {@code Flux} must produce the same data each time it is subscribed to. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.AppendBlobAsyncClient.appendBlockWithResponse * <pre> * byte[] md5 = MessageDigest.getInstance& * AppendBlobRequestConditions requestConditions = new AppendBlobRequestConditions& * .setAppendPosition& * .setMaxSize& * * client.appendBlockWithResponse& * System.out.printf& * </pre> * <!-- end com.azure.storage.blob.specialized.AppendBlobAsyncClient.appendBlockWithResponse * * @param data The data to write to the blob. Note that this {@code Flux} must be replayable if retries are enabled * (the default). In other words, the Flux must produce the same data each time it is subscribed to. * @param length The exact length of the data. It is important that this value match precisely the length of the * data emitted by the {@code Flux}. * @param contentMd5 An MD5 hash of the block content. This hash is used to verify the integrity of the block during * transport. When this header is specified, the storage service compares the hash of the content that has arrived * with this header value. Note that this MD5 hash is not stored with the blob. If the two hashes do not match, the * operation will fail. * @param appendBlobRequestConditions {@link AppendBlobRequestConditions} * @return A {@link Mono} containing {@link Response} whose {@link Response * blob operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<AppendBlobItem>> appendBlockWithResponse(Flux<ByteBuffer> data, long length, byte[] contentMd5, AppendBlobRequestConditions appendBlobRequestConditions) { try { return withContext(context -> appendBlockWithResponse(data, length, contentMd5, appendBlobRequestConditions, context)); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } Mono<Response<AppendBlobItem>> appendBlockWithResponse(Flux<ByteBuffer> data, long length, byte[] contentMd5, AppendBlobRequestConditions appendBlobRequestConditions, Context context) { appendBlobRequestConditions = appendBlobRequestConditions == null ? new AppendBlobRequestConditions() : appendBlobRequestConditions; context = context == null ? Context.NONE : context; return this.azureBlobStorage.getAppendBlobs().appendBlockWithResponseAsync( containerName, blobName, length, data, null, contentMd5, null, appendBlobRequestConditions.getLeaseId(), appendBlobRequestConditions.getMaxSize(), appendBlobRequestConditions.getAppendPosition(), appendBlobRequestConditions.getIfModifiedSince(), appendBlobRequestConditions.getIfUnmodifiedSince(), appendBlobRequestConditions.getIfMatch(), appendBlobRequestConditions.getIfNoneMatch(), appendBlobRequestConditions.getTagsConditions(), null, getCustomerProvidedKey(), encryptionScope, context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE)) .map(rb -> { AppendBlobsAppendBlockHeaders hd = rb.getDeserializedHeaders(); AppendBlobItem item = new AppendBlobItem(hd.getETag(), hd.getLastModified(), hd.getContentMD5(), hd.isXMsRequestServerEncrypted(), hd.getXMsEncryptionKeySha256(), hd.getXMsEncryptionScope(), hd.getXMsBlobAppendOffset(), hd.getXMsBlobCommittedBlockCount()); return new SimpleResponse<>(rb, item); }); } /** * Commits a new block of data from another blob to the end of this append blob. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.AppendBlobAsyncClient.appendBlockFromUrl * <pre> * client.appendBlockFromUrl& * System.out.printf& * </pre> * <!-- end com.azure.storage.blob.specialized.AppendBlobAsyncClient.appendBlockFromUrl * * @param sourceUrl The url to the blob that will be the source of the copy. A source blob in the same storage * account can be authenticated via Shared Key. However, if the source is a blob in another account, the source blob * must either be public or must be authenticated via a shared access signature. If the source blob is public, no * authentication is required to perform the operation. * @param sourceRange The source {@link BlobRange} to copy. * @return {@link Mono} containing the information of the append blob operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<AppendBlobItem> appendBlockFromUrl(String sourceUrl, BlobRange sourceRange) { return appendBlockFromUrlWithResponse(sourceUrl, sourceRange, null, null, null).flatMap(FluxUtil::toMono); } /** * Commits a new block of data from another blob to the end of this append blob. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.AppendBlobAsyncClient.appendBlockFromUrlWithResponse * <pre> * AppendBlobRequestConditions appendBlobRequestConditions = new AppendBlobRequestConditions& * .setAppendPosition& * .setMaxSize& * * BlobRequestConditions modifiedRequestConditions = new BlobRequestConditions& * .setIfUnmodifiedSince& * * client.appendBlockFromUrlWithResponse& * appendBlobRequestConditions, modifiedRequestConditions& * System.out.printf& * </pre> * <!-- end com.azure.storage.blob.specialized.AppendBlobAsyncClient.appendBlockFromUrlWithResponse * * @param sourceUrl The url to the blob that will be the source of the copy. A source blob in the same storage * account can be authenticated via Shared Key. However, if the source is a blob in another account, the source blob * must either be public or must be authenticated via a shared access signature. If the source blob is public, no * authentication is required to perform the operation. * @param sourceRange {@link BlobRange} * @param sourceContentMD5 An MD5 hash of the block content from the source blob. If specified, the service will * calculate the MD5 of the received data and fail the request if it does not match the provided MD5. * @param destRequestConditions {@link AppendBlobRequestConditions} * @param sourceRequestConditions {@link BlobRequestConditions} * @return A {@link Mono} containing {@link Response} whose {@link Response * blob operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<AppendBlobItem>> appendBlockFromUrlWithResponse(String sourceUrl, BlobRange sourceRange, byte[] sourceContentMD5, AppendBlobRequestConditions destRequestConditions, BlobRequestConditions sourceRequestConditions) { return appendBlockFromUrlWithResponse(new AppendBlobAppendBlockFromUrlOptions(sourceUrl) .setSourceRange(sourceRange).setSourceContentMd5(sourceContentMD5) .setDestinationRequestConditions(destRequestConditions) .setSourceRequestConditions(sourceRequestConditions)); } /** * Commits a new block of data from another blob to the end of this append blob. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.AppendBlobAsyncClient.appendBlockFromUrlWithResponse * <pre> * AppendBlobRequestConditions appendBlobRequestConditions = new AppendBlobRequestConditions& * .setAppendPosition& * .setMaxSize& * * BlobRequestConditions modifiedRequestConditions = new BlobRequestConditions& * .setIfUnmodifiedSince& * * client.appendBlockFromUrlWithResponse& * .setSourceRange& * .setDestinationRequestConditions& * .setSourceRequestConditions& * System.out.printf& * </pre> * <!-- end com.azure.storage.blob.specialized.AppendBlobAsyncClient.appendBlockFromUrlWithResponse * * @param options Parameters for the operation. * @return A {@link Mono} containing {@link Response} whose {@link Response * blob operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<AppendBlobItem>> appendBlockFromUrlWithResponse(AppendBlobAppendBlockFromUrlOptions options) { try { return withContext(context -> appendBlockFromUrlWithResponse(options, context)); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } Mono<Response<AppendBlobItem>> appendBlockFromUrlWithResponse(AppendBlobAppendBlockFromUrlOptions options, Context context) { BlobRange sourceRange = (options.getSourceRange() == null) ? new BlobRange(0) : options.getSourceRange(); AppendBlobRequestConditions destRequestConditions = (options.getDestinationRequestConditions() == null) ? new AppendBlobRequestConditions() : options.getDestinationRequestConditions(); RequestConditions sourceRequestConditions = (options.getSourceRequestConditions() == null) ? new RequestConditions() : options.getSourceRequestConditions(); try { new URL(options.getSourceUrl()); } catch (MalformedURLException ex) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("'sourceUrl' is not a valid url.")); } context = context == null ? Context.NONE : context; String sourceAuth = options.getSourceAuthorization() == null ? null : options.getSourceAuthorization().toString(); return this.azureBlobStorage.getAppendBlobs().appendBlockFromUrlWithResponseAsync(containerName, blobName, options.getSourceUrl(), 0, sourceRange.toString(), options.getSourceContentMd5(), null, null, null, destRequestConditions.getLeaseId(), destRequestConditions.getMaxSize(), destRequestConditions.getAppendPosition(), destRequestConditions.getIfModifiedSince(), destRequestConditions.getIfUnmodifiedSince(), destRequestConditions.getIfMatch(), destRequestConditions.getIfNoneMatch(), destRequestConditions.getTagsConditions(), sourceRequestConditions.getIfModifiedSince(), sourceRequestConditions.getIfUnmodifiedSince(), sourceRequestConditions.getIfMatch(), sourceRequestConditions.getIfNoneMatch(), null, sourceAuth, getCustomerProvidedKey(), encryptionScope, context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE)) .map(rb -> { AppendBlobsAppendBlockFromUrlHeaders hd = rb.getDeserializedHeaders(); AppendBlobItem item = new AppendBlobItem(hd.getETag(), hd.getLastModified(), hd.getContentMD5(), hd.isXMsRequestServerEncrypted(), hd.getXMsEncryptionKeySha256(), hd.getXMsEncryptionScope(), hd.getXMsBlobAppendOffset(), hd.getXMsBlobCommittedBlockCount()); return new SimpleResponse<>(rb, item); }); } /** * Seals an append blob, making it read only. Any subsequent appends will fail. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.AppendBlobAsyncClient.seal --> * <pre> * client.seal& * </pre> * <!-- end com.azure.storage.blob.specialized.AppendBlobAsyncClient.seal --> * * @return A reactive response signalling completion. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> seal() { return sealWithResponse(new AppendBlobSealOptions()).flatMap(FluxUtil::toMono); } /** * Seals an append blob, making it read only. Any subsequent appends will fail. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.AppendBlobAsyncClient.sealWithResponse * <pre> * AppendBlobRequestConditions requestConditions = new AppendBlobRequestConditions& * .setIfUnmodifiedSince& * * client.sealWithResponse& * .subscribe& * </pre> * <!-- end com.azure.storage.blob.specialized.AppendBlobAsyncClient.sealWithResponse * * @param options {@link AppendBlobSealOptions} * @return A reactive response signalling completion. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> sealWithResponse(AppendBlobSealOptions options) { try { return withContext(context -> sealWithResponse(options, context)); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } Mono<Response<Void>> sealWithResponse(AppendBlobSealOptions options, Context context) { options = (options == null) ? new AppendBlobSealOptions() : options; AppendBlobRequestConditions requestConditions = options.getRequestConditions(); requestConditions = (requestConditions == null) ? new AppendBlobRequestConditions() : requestConditions; context = context == null ? Context.NONE : context; return this.azureBlobStorage.getAppendBlobs().sealWithResponseAsync(containerName, blobName, null, null, requestConditions.getLeaseId(), requestConditions.getIfModifiedSince(), requestConditions.getIfUnmodifiedSince(), requestConditions.getIfMatch(), requestConditions.getIfNoneMatch(), requestConditions.getAppendPosition(), context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE)) .map(response -> new SimpleResponse<>(response, null)); } }
A bit strange to only return the first. User can still get all if call `userKubeConfigs(format)`?
public byte[] userKubeConfigContent(Format format) { if (format == null) { return userKubeConfigContent(); } for (CredentialResult config : userKubeConfigs(format)) { return config.value(); } return new byte[0]; }
}
public byte[] userKubeConfigContent(Format format) { if (format == null) { return userKubeConfigContent(); } for (CredentialResult config : userKubeConfigs(format)) { return config.value(); } return new byte[0]; }
class KubernetesClusterImpl extends GroupableResourceImpl< KubernetesCluster, ManagedClusterInner, KubernetesClusterImpl, ContainerServiceManager> implements KubernetesCluster, KubernetesCluster.Definition, KubernetesCluster.Update { private final ClientLogger logger = new ClientLogger(getClass()); private List<CredentialResult> adminKubeConfigs; private List<CredentialResult> userKubeConfigs; private final Map<Format, List<CredentialResult>> formatUserKubeConfigsMap = new ConcurrentHashMap<>(); protected KubernetesClusterImpl(String name, ManagedClusterInner innerObject, ContainerServiceManager manager) { super(name, innerObject, manager); if (this.innerModel().agentPoolProfiles() == null) { this.innerModel().withAgentPoolProfiles(new ArrayList<>()); } this.adminKubeConfigs = null; this.userKubeConfigs = null; } @Override public String provisioningState() { return this.innerModel().provisioningState(); } @Override public String dnsPrefix() { return this.innerModel().dnsPrefix(); } @Override public String fqdn() { return this.innerModel().fqdn(); } @Override public String version() { return this.innerModel().kubernetesVersion(); } @Override public List<CredentialResult> adminKubeConfigs() { if (this.adminKubeConfigs == null || this.adminKubeConfigs.size() == 0) { this.adminKubeConfigs = this.manager().kubernetesClusters().listAdminKubeConfigContent(this.resourceGroupName(), this.name()); } return Collections.unmodifiableList(this.adminKubeConfigs); } @Override public List<CredentialResult> userKubeConfigs() { if (this.userKubeConfigs == null || this.userKubeConfigs.size() == 0) { this.userKubeConfigs = this.manager().kubernetesClusters().listUserKubeConfigContent(this.resourceGroupName(), this.name()); } return Collections.unmodifiableList(this.userKubeConfigs); } @Override public List<CredentialResult> userKubeConfigs(Format format) { if (format == null) { return userKubeConfigs(); } return Collections.unmodifiableList( this.formatUserKubeConfigsMap.computeIfAbsent( format, key -> KubernetesClusterImpl.this .manager() .kubernetesClusters() .listUserKubeConfigContent( KubernetesClusterImpl.this.resourceGroupName(), KubernetesClusterImpl.this.name(), format )) ); } @Override public byte[] adminKubeConfigContent() { for (CredentialResult config : adminKubeConfigs()) { return config.value(); } return new byte[0]; } @Override public byte[] userKubeConfigContent() { for (CredentialResult config : userKubeConfigs()) { return config.value(); } return new byte[0]; } @Override @Override public String servicePrincipalClientId() { if (this.innerModel().servicePrincipalProfile() != null) { return this.innerModel().servicePrincipalProfile().clientId(); } else { return null; } } @Override public String servicePrincipalSecret() { if (this.innerModel().servicePrincipalProfile() != null) { return this.innerModel().servicePrincipalProfile().secret(); } else { return null; } } @Override public String linuxRootUsername() { if (this.innerModel().linuxProfile() != null) { return this.innerModel().linuxProfile().adminUsername(); } else { return null; } } @Override public String sshKey() { if (this.innerModel().linuxProfile() == null || this.innerModel().linuxProfile().ssh() == null || this.innerModel().linuxProfile().ssh().publicKeys() == null || this.innerModel().linuxProfile().ssh().publicKeys().size() == 0) { return null; } else { return this.innerModel().linuxProfile().ssh().publicKeys().get(0).keyData(); } } @Override public Map<String, KubernetesClusterAgentPool> agentPools() { Map<String, KubernetesClusterAgentPool> agentPoolMap = new HashMap<>(); if (this.innerModel().agentPoolProfiles() != null && this.innerModel().agentPoolProfiles().size() > 0) { for (ManagedClusterAgentPoolProfile agentPoolProfile : this.innerModel().agentPoolProfiles()) { agentPoolMap.put(agentPoolProfile.name(), new KubernetesClusterAgentPoolImpl(agentPoolProfile, this)); } } return Collections.unmodifiableMap(agentPoolMap); } @Override public ContainerServiceNetworkProfile networkProfile() { return this.innerModel().networkProfile(); } @Override public Map<String, ManagedClusterAddonProfile> addonProfiles() { return Collections.unmodifiableMap(this.innerModel().addonProfiles()); } @Override public String nodeResourceGroup() { return this.innerModel().nodeResourceGroup(); } @Override public boolean enableRBAC() { return this.innerModel().enableRbac(); } @Override public PowerState powerState() { return this.innerModel().powerState(); } @Override public String systemAssignedManagedServiceIdentityPrincipalId() { String objectId = null; if (this.innerModel().identityProfile() != null) { UserAssignedIdentity identity = this.innerModel().identityProfile().get("kubeletidentity"); if (identity != null) { objectId = identity.objectId(); } } return objectId; } @Override public void start() { this.startAsync().block(); } @Override public Mono<Void> startAsync() { return manager().kubernetesClusters().startAsync(this.resourceGroupName(), this.name()); } @Override public void stop() { this.stopAsync().block(); } @Override public Mono<Void> stopAsync() { return manager().kubernetesClusters().stopAsync(this.resourceGroupName(), this.name()); } private Mono<List<CredentialResult>> listAdminConfig(final KubernetesClusterImpl self) { return this .manager() .kubernetesClusters() .listAdminKubeConfigContentAsync(self.resourceGroupName(), self.name()) .map( kubeConfigs -> { self.adminKubeConfigs = kubeConfigs; return self.adminKubeConfigs; }); } private Mono<List<CredentialResult>> listUserConfig(final KubernetesClusterImpl self) { return this .manager() .kubernetesClusters() .listUserKubeConfigContentAsync(self.resourceGroupName(), self.name()) .map( kubeConfigs -> { self.userKubeConfigs = kubeConfigs; return self.userKubeConfigs; }); } @Override protected Mono<ManagedClusterInner> getInnerAsync() { final KubernetesClusterImpl self = this; final Mono<List<CredentialResult>> adminConfig = listAdminConfig(self); final Mono<List<CredentialResult>> userConfig = listUserConfig(self); return this .manager() .serviceClient() .getManagedClusters() .getByResourceGroupAsync(this.resourceGroupName(), this.name()) .flatMap( managedClusterInner -> Flux.merge(adminConfig, userConfig).last().map(bytes -> managedClusterInner)); } @Override public Mono<KubernetesCluster> createResourceAsync() { final KubernetesClusterImpl self = this; if (!this.isInCreateMode()) { this.innerModel().withServicePrincipalProfile(null); } final Mono<List<CredentialResult>> adminConfig = listAdminConfig(self); final Mono<List<CredentialResult>> userConfig = listUserConfig(self); return this .manager() .serviceClient() .getManagedClusters() .createOrUpdateAsync(self.resourceGroupName(), self.name(), self.innerModel()) .flatMap( inner -> Flux .merge(adminConfig, userConfig) .last() .map( bytes -> { self.setInner(inner); return self; })); } @Override public KubernetesClusterImpl withVersion(String kubernetesVersion) { this.innerModel().withKubernetesVersion(kubernetesVersion); return this; } @Override public KubernetesClusterImpl withDefaultVersion() { this.innerModel().withKubernetesVersion(""); return this; } @Override public KubernetesClusterImpl withRootUsername(String rootUserName) { if (this.innerModel().linuxProfile() == null) { this.innerModel().withLinuxProfile(new ContainerServiceLinuxProfile()); } this.innerModel().linuxProfile().withAdminUsername(rootUserName); return this; } @Override public KubernetesClusterImpl withSshKey(String sshKeyData) { this .innerModel() .linuxProfile() .withSsh( new ContainerServiceSshConfiguration().withPublicKeys(new ArrayList<ContainerServiceSshPublicKey>())); this.innerModel().linuxProfile().ssh().publicKeys().add( new ContainerServiceSshPublicKey().withKeyData(sshKeyData)); return this; } @Override public KubernetesClusterImpl withServicePrincipalClientId(String clientId) { this.innerModel().withServicePrincipalProfile( new ManagedClusterServicePrincipalProfile().withClientId(clientId)); return this; } @Override public KubernetesClusterImpl withSystemAssignedManagedServiceIdentity() { this.innerModel().withIdentity(new ManagedClusterIdentity().withType(ResourceIdentityType.SYSTEM_ASSIGNED)); return this; } @Override public KubernetesClusterImpl withServicePrincipalSecret(String secret) { this.innerModel().servicePrincipalProfile().withSecret(secret); return this; } @Override public KubernetesClusterImpl withDnsPrefix(String dnsPrefix) { this.innerModel().withDnsPrefix(dnsPrefix); return this; } @Override public KubernetesClusterAgentPoolImpl defineAgentPool(String name) { ManagedClusterAgentPoolProfile innerPoolProfile = new ManagedClusterAgentPoolProfile(); innerPoolProfile.withName(name); return new KubernetesClusterAgentPoolImpl(innerPoolProfile, this); } @Override public KubernetesClusterAgentPoolImpl updateAgentPool(String name) { for (ManagedClusterAgentPoolProfile agentPoolProfile : innerModel().agentPoolProfiles()) { if (agentPoolProfile.name().equals(name)) { return new KubernetesClusterAgentPoolImpl(agentPoolProfile, this); } } throw logger.logExceptionAsError(new IllegalArgumentException(String.format( "Cannot get agent pool named %s", name))); } @Override public Update withoutAgentPool(String name) { if (innerModel().agentPoolProfiles() != null) { innerModel().withAgentPoolProfiles( innerModel().agentPoolProfiles().stream() .filter(p -> !name.equals(p.name())) .collect(Collectors.toList())); this.addDependency(context -> manager().serviceClient().getAgentPools().deleteAsync(resourceGroupName(), name(), name) .then(context.voidMono())); } return this; } @Override public KubernetesCluster.DefinitionStages.NetworkProfileDefinitionStages.Blank< KubernetesCluster.DefinitionStages.WithCreate> defineNetworkProfile() { return new KubernetesClusterNetworkProfileImpl(this); } @Override public KubernetesClusterImpl withAddOnProfiles(Map<String, ManagedClusterAddonProfile> addOnProfileMap) { this.innerModel().withAddonProfiles(addOnProfileMap); return this; } @Override public KubernetesClusterImpl withNetworkProfile(ContainerServiceNetworkProfile networkProfile) { this.innerModel().withNetworkProfile(networkProfile); return this; } @Override public KubernetesClusterImpl withRBACEnabled() { this.innerModel().withEnableRbac(true); return this; } @Override public KubernetesClusterImpl withRBACDisabled() { this.innerModel().withEnableRbac(false); return this; } public KubernetesClusterImpl addNewAgentPool(KubernetesClusterAgentPoolImpl agentPool) { if (!isInCreateMode()) { this.addDependency(context -> manager().serviceClient().getAgentPools().createOrUpdateAsync( resourceGroupName(), name(), agentPool.name(), agentPool.getAgentPoolInner()) .then(context.voidMono())); } innerModel().agentPoolProfiles().add(agentPool.innerModel()); return this; } @Override public KubernetesClusterImpl withAutoScalerProfile(ManagedClusterPropertiesAutoScalerProfile autoScalerProfile) { this.innerModel().withAutoScalerProfile(autoScalerProfile); return this; } @Override public KubernetesClusterImpl enablePrivateCluster() { if (innerModel().apiServerAccessProfile() == null) { innerModel().withApiServerAccessProfile(new ManagedClusterApiServerAccessProfile()); } innerModel().apiServerAccessProfile().withEnablePrivateCluster(true); return this; } @Override public PagedIterable<PrivateLinkResource> listPrivateLinkResources() { return new PagedIterable<>(listPrivateLinkResourcesAsync()); } @Override public PagedFlux<PrivateLinkResource> listPrivateLinkResourcesAsync() { Mono<Response<List<PrivateLinkResource>>> retList = this.manager().serviceClient().getPrivateLinkResources() .listWithResponseAsync(this.resourceGroupName(), this.name()) .map(response -> new SimpleResponse<>(response, response.getValue().value().stream() .map(PrivateLinkResourceImpl::new) .collect(Collectors.toList()))); return PagedConverter.convertListToPagedFlux(retList); } @Override public PagedIterable<PrivateEndpointConnection> listPrivateEndpointConnections() { return new PagedIterable<>(listPrivateEndpointConnectionsAsync()); } @Override public PagedFlux<PrivateEndpointConnection> listPrivateEndpointConnectionsAsync() { Mono<Response<List<PrivateEndpointConnection>>> retList = this.manager().serviceClient() .getPrivateEndpointConnections() .listWithResponseAsync(this.resourceGroupName(), this.name()) .map(response -> new SimpleResponse<>(response, response.getValue().value().stream() .map(PrivateEndpointConnectionImpl::new) .collect(Collectors.toList()))); return PagedConverter.convertListToPagedFlux(retList); } private static final class PrivateLinkResourceImpl implements PrivateLinkResource { private final PrivateLinkResourceInner innerModel; private PrivateLinkResourceImpl(PrivateLinkResourceInner innerModel) { this.innerModel = innerModel; } @Override public String groupId() { return innerModel.groupId(); } @Override public List<String> requiredMemberNames() { return Collections.unmodifiableList(innerModel.requiredMembers()); } @Override public List<String> requiredDnsZoneNames() { return Collections.emptyList(); } } private static final class PrivateEndpointConnectionImpl implements PrivateEndpointConnection { private final PrivateEndpointConnectionInner innerModel; private final PrivateEndpoint privateEndpoint; private final com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState privateLinkServiceConnectionState; private final PrivateEndpointConnectionProvisioningState provisioningState; private PrivateEndpointConnectionImpl(PrivateEndpointConnectionInner innerModel) { this.innerModel = innerModel; this.privateEndpoint = innerModel.privateEndpoint() == null ? null : new PrivateEndpoint(innerModel.privateEndpoint().id()); this.privateLinkServiceConnectionState = innerModel.privateLinkServiceConnectionState() == null ? null : new com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState( innerModel.privateLinkServiceConnectionState().status() == null ? null : com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateEndpointServiceConnectionStatus .fromString(innerModel.privateLinkServiceConnectionState().status().toString()), innerModel.privateLinkServiceConnectionState().description(), ""); this.provisioningState = innerModel.provisioningState() == null ? null : PrivateEndpointConnectionProvisioningState.fromString(innerModel.provisioningState().toString()); } @Override public String id() { return innerModel.id(); } @Override public String name() { return innerModel.name(); } @Override public String type() { return innerModel.type(); } @Override public PrivateEndpoint privateEndpoint() { return privateEndpoint; } @Override public com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState privateLinkServiceConnectionState() { return privateLinkServiceConnectionState; } @Override public PrivateEndpointConnectionProvisioningState provisioningState() { return provisioningState; } } }
class KubernetesClusterImpl extends GroupableResourceImpl< KubernetesCluster, ManagedClusterInner, KubernetesClusterImpl, ContainerServiceManager> implements KubernetesCluster, KubernetesCluster.Definition, KubernetesCluster.Update { private final ClientLogger logger = new ClientLogger(getClass()); private List<CredentialResult> adminKubeConfigs; private List<CredentialResult> userKubeConfigs; private final Map<Format, List<CredentialResult>> formatUserKubeConfigsMap = new ConcurrentHashMap<>(); protected KubernetesClusterImpl(String name, ManagedClusterInner innerObject, ContainerServiceManager manager) { super(name, innerObject, manager); if (this.innerModel().agentPoolProfiles() == null) { this.innerModel().withAgentPoolProfiles(new ArrayList<>()); } this.adminKubeConfigs = null; this.userKubeConfigs = null; } @Override public String provisioningState() { return this.innerModel().provisioningState(); } @Override public String dnsPrefix() { return this.innerModel().dnsPrefix(); } @Override public String fqdn() { return this.innerModel().fqdn(); } @Override public String version() { return this.innerModel().kubernetesVersion(); } @Override public List<CredentialResult> adminKubeConfigs() { if (this.adminKubeConfigs == null || this.adminKubeConfigs.size() == 0) { this.adminKubeConfigs = this.manager().kubernetesClusters().listAdminKubeConfigContent(this.resourceGroupName(), this.name()); } return Collections.unmodifiableList(this.adminKubeConfigs); } @Override public List<CredentialResult> userKubeConfigs() { if (this.userKubeConfigs == null || this.userKubeConfigs.size() == 0) { this.userKubeConfigs = this.manager().kubernetesClusters().listUserKubeConfigContent(this.resourceGroupName(), this.name()); } return Collections.unmodifiableList(this.userKubeConfigs); } @Override public List<CredentialResult> userKubeConfigs(Format format) { if (format == null) { return userKubeConfigs(); } return Collections.unmodifiableList( this.formatUserKubeConfigsMap.computeIfAbsent( format, key -> KubernetesClusterImpl.this .manager() .kubernetesClusters() .listUserKubeConfigContent( KubernetesClusterImpl.this.resourceGroupName(), KubernetesClusterImpl.this.name(), format )) ); } @Override public byte[] adminKubeConfigContent() { for (CredentialResult config : adminKubeConfigs()) { return config.value(); } return new byte[0]; } @Override public byte[] userKubeConfigContent() { for (CredentialResult config : userKubeConfigs()) { return config.value(); } return new byte[0]; } @Override @Override public String servicePrincipalClientId() { if (this.innerModel().servicePrincipalProfile() != null) { return this.innerModel().servicePrincipalProfile().clientId(); } else { return null; } } @Override public String servicePrincipalSecret() { if (this.innerModel().servicePrincipalProfile() != null) { return this.innerModel().servicePrincipalProfile().secret(); } else { return null; } } @Override public String linuxRootUsername() { if (this.innerModel().linuxProfile() != null) { return this.innerModel().linuxProfile().adminUsername(); } else { return null; } } @Override public String sshKey() { if (this.innerModel().linuxProfile() == null || this.innerModel().linuxProfile().ssh() == null || this.innerModel().linuxProfile().ssh().publicKeys() == null || this.innerModel().linuxProfile().ssh().publicKeys().size() == 0) { return null; } else { return this.innerModel().linuxProfile().ssh().publicKeys().get(0).keyData(); } } @Override public Map<String, KubernetesClusterAgentPool> agentPools() { Map<String, KubernetesClusterAgentPool> agentPoolMap = new HashMap<>(); if (this.innerModel().agentPoolProfiles() != null && this.innerModel().agentPoolProfiles().size() > 0) { for (ManagedClusterAgentPoolProfile agentPoolProfile : this.innerModel().agentPoolProfiles()) { agentPoolMap.put(agentPoolProfile.name(), new KubernetesClusterAgentPoolImpl(agentPoolProfile, this)); } } return Collections.unmodifiableMap(agentPoolMap); } @Override public ContainerServiceNetworkProfile networkProfile() { return this.innerModel().networkProfile(); } @Override public Map<String, ManagedClusterAddonProfile> addonProfiles() { return Collections.unmodifiableMap(this.innerModel().addonProfiles()); } @Override public String nodeResourceGroup() { return this.innerModel().nodeResourceGroup(); } @Override public boolean enableRBAC() { return this.innerModel().enableRbac(); } @Override public PowerState powerState() { return this.innerModel().powerState(); } @Override public String systemAssignedManagedServiceIdentityPrincipalId() { String objectId = null; if (this.innerModel().identityProfile() != null) { UserAssignedIdentity identity = this.innerModel().identityProfile().get("kubeletidentity"); if (identity != null) { objectId = identity.objectId(); } } return objectId; } @Override public void start() { this.startAsync().block(); } @Override public Mono<Void> startAsync() { return manager().kubernetesClusters().startAsync(this.resourceGroupName(), this.name()); } @Override public void stop() { this.stopAsync().block(); } @Override public Mono<Void> stopAsync() { return manager().kubernetesClusters().stopAsync(this.resourceGroupName(), this.name()); } private Mono<List<CredentialResult>> listAdminConfig(final KubernetesClusterImpl self) { return this .manager() .kubernetesClusters() .listAdminKubeConfigContentAsync(self.resourceGroupName(), self.name()) .map( kubeConfigs -> { self.adminKubeConfigs = kubeConfigs; return self.adminKubeConfigs; }); } private Mono<List<CredentialResult>> listUserConfig(final KubernetesClusterImpl self) { return this .manager() .kubernetesClusters() .listUserKubeConfigContentAsync(self.resourceGroupName(), self.name()) .map( kubeConfigs -> { self.userKubeConfigs = kubeConfigs; return self.userKubeConfigs; }); } @Override protected Mono<ManagedClusterInner> getInnerAsync() { final KubernetesClusterImpl self = this; final Mono<List<CredentialResult>> adminConfig = listAdminConfig(self); final Mono<List<CredentialResult>> userConfig = listUserConfig(self); return this .manager() .serviceClient() .getManagedClusters() .getByResourceGroupAsync(this.resourceGroupName(), this.name()) .flatMap( managedClusterInner -> Flux .merge(adminConfig, userConfig) .last() .map( bytes -> { formatUserKubeConfigsMap.clear(); return managedClusterInner; })); } @Override public Mono<KubernetesCluster> createResourceAsync() { final KubernetesClusterImpl self = this; if (!this.isInCreateMode()) { this.innerModel().withServicePrincipalProfile(null); } final Mono<List<CredentialResult>> adminConfig = listAdminConfig(self); final Mono<List<CredentialResult>> userConfig = listUserConfig(self); return this .manager() .serviceClient() .getManagedClusters() .createOrUpdateAsync(self.resourceGroupName(), self.name(), self.innerModel()) .flatMap( inner -> Flux .merge(adminConfig, userConfig) .last() .map( bytes -> { self.setInner(inner); return self; })); } @Override public KubernetesClusterImpl withVersion(String kubernetesVersion) { this.innerModel().withKubernetesVersion(kubernetesVersion); return this; } @Override public KubernetesClusterImpl withDefaultVersion() { this.innerModel().withKubernetesVersion(""); return this; } @Override public KubernetesClusterImpl withRootUsername(String rootUserName) { if (this.innerModel().linuxProfile() == null) { this.innerModel().withLinuxProfile(new ContainerServiceLinuxProfile()); } this.innerModel().linuxProfile().withAdminUsername(rootUserName); return this; } @Override public KubernetesClusterImpl withSshKey(String sshKeyData) { this .innerModel() .linuxProfile() .withSsh( new ContainerServiceSshConfiguration().withPublicKeys(new ArrayList<ContainerServiceSshPublicKey>())); this.innerModel().linuxProfile().ssh().publicKeys().add( new ContainerServiceSshPublicKey().withKeyData(sshKeyData)); return this; } @Override public KubernetesClusterImpl withServicePrincipalClientId(String clientId) { this.innerModel().withServicePrincipalProfile( new ManagedClusterServicePrincipalProfile().withClientId(clientId)); return this; } @Override public KubernetesClusterImpl withSystemAssignedManagedServiceIdentity() { this.innerModel().withIdentity(new ManagedClusterIdentity().withType(ResourceIdentityType.SYSTEM_ASSIGNED)); return this; } @Override public KubernetesClusterImpl withServicePrincipalSecret(String secret) { this.innerModel().servicePrincipalProfile().withSecret(secret); return this; } @Override public KubernetesClusterImpl withDnsPrefix(String dnsPrefix) { this.innerModel().withDnsPrefix(dnsPrefix); return this; } @Override public KubernetesClusterAgentPoolImpl defineAgentPool(String name) { ManagedClusterAgentPoolProfile innerPoolProfile = new ManagedClusterAgentPoolProfile(); innerPoolProfile.withName(name); return new KubernetesClusterAgentPoolImpl(innerPoolProfile, this); } @Override public KubernetesClusterAgentPoolImpl updateAgentPool(String name) { for (ManagedClusterAgentPoolProfile agentPoolProfile : innerModel().agentPoolProfiles()) { if (agentPoolProfile.name().equals(name)) { return new KubernetesClusterAgentPoolImpl(agentPoolProfile, this); } } throw logger.logExceptionAsError(new IllegalArgumentException(String.format( "Cannot get agent pool named %s", name))); } @Override public Update withoutAgentPool(String name) { if (innerModel().agentPoolProfiles() != null) { innerModel().withAgentPoolProfiles( innerModel().agentPoolProfiles().stream() .filter(p -> !name.equals(p.name())) .collect(Collectors.toList())); this.addDependency(context -> manager().serviceClient().getAgentPools().deleteAsync(resourceGroupName(), name(), name) .then(context.voidMono())); } return this; } @Override public KubernetesCluster.DefinitionStages.NetworkProfileDefinitionStages.Blank< KubernetesCluster.DefinitionStages.WithCreate> defineNetworkProfile() { return new KubernetesClusterNetworkProfileImpl(this); } @Override public KubernetesClusterImpl withAddOnProfiles(Map<String, ManagedClusterAddonProfile> addOnProfileMap) { this.innerModel().withAddonProfiles(addOnProfileMap); return this; } @Override public KubernetesClusterImpl withNetworkProfile(ContainerServiceNetworkProfile networkProfile) { this.innerModel().withNetworkProfile(networkProfile); return this; } @Override public KubernetesClusterImpl withRBACEnabled() { this.innerModel().withEnableRbac(true); return this; } @Override public KubernetesClusterImpl withRBACDisabled() { this.innerModel().withEnableRbac(false); return this; } public KubernetesClusterImpl addNewAgentPool(KubernetesClusterAgentPoolImpl agentPool) { if (!isInCreateMode()) { this.addDependency(context -> manager().serviceClient().getAgentPools().createOrUpdateAsync( resourceGroupName(), name(), agentPool.name(), agentPool.getAgentPoolInner()) .then(context.voidMono())); } innerModel().agentPoolProfiles().add(agentPool.innerModel()); return this; } @Override public KubernetesClusterImpl withAutoScalerProfile(ManagedClusterPropertiesAutoScalerProfile autoScalerProfile) { this.innerModel().withAutoScalerProfile(autoScalerProfile); return this; } @Override public KubernetesClusterImpl enablePrivateCluster() { if (innerModel().apiServerAccessProfile() == null) { innerModel().withApiServerAccessProfile(new ManagedClusterApiServerAccessProfile()); } innerModel().apiServerAccessProfile().withEnablePrivateCluster(true); return this; } @Override public PagedIterable<PrivateLinkResource> listPrivateLinkResources() { return new PagedIterable<>(listPrivateLinkResourcesAsync()); } @Override public PagedFlux<PrivateLinkResource> listPrivateLinkResourcesAsync() { Mono<Response<List<PrivateLinkResource>>> retList = this.manager().serviceClient().getPrivateLinkResources() .listWithResponseAsync(this.resourceGroupName(), this.name()) .map(response -> new SimpleResponse<>(response, response.getValue().value().stream() .map(PrivateLinkResourceImpl::new) .collect(Collectors.toList()))); return PagedConverter.convertListToPagedFlux(retList); } @Override public PagedIterable<PrivateEndpointConnection> listPrivateEndpointConnections() { return new PagedIterable<>(listPrivateEndpointConnectionsAsync()); } @Override public PagedFlux<PrivateEndpointConnection> listPrivateEndpointConnectionsAsync() { Mono<Response<List<PrivateEndpointConnection>>> retList = this.manager().serviceClient() .getPrivateEndpointConnections() .listWithResponseAsync(this.resourceGroupName(), this.name()) .map(response -> new SimpleResponse<>(response, response.getValue().value().stream() .map(PrivateEndpointConnectionImpl::new) .collect(Collectors.toList()))); return PagedConverter.convertListToPagedFlux(retList); } private static final class PrivateLinkResourceImpl implements PrivateLinkResource { private final PrivateLinkResourceInner innerModel; private PrivateLinkResourceImpl(PrivateLinkResourceInner innerModel) { this.innerModel = innerModel; } @Override public String groupId() { return innerModel.groupId(); } @Override public List<String> requiredMemberNames() { return Collections.unmodifiableList(innerModel.requiredMembers()); } @Override public List<String> requiredDnsZoneNames() { return Collections.emptyList(); } } private static final class PrivateEndpointConnectionImpl implements PrivateEndpointConnection { private final PrivateEndpointConnectionInner innerModel; private final PrivateEndpoint privateEndpoint; private final com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState privateLinkServiceConnectionState; private final PrivateEndpointConnectionProvisioningState provisioningState; private PrivateEndpointConnectionImpl(PrivateEndpointConnectionInner innerModel) { this.innerModel = innerModel; this.privateEndpoint = innerModel.privateEndpoint() == null ? null : new PrivateEndpoint(innerModel.privateEndpoint().id()); this.privateLinkServiceConnectionState = innerModel.privateLinkServiceConnectionState() == null ? null : new com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState( innerModel.privateLinkServiceConnectionState().status() == null ? null : com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateEndpointServiceConnectionStatus .fromString(innerModel.privateLinkServiceConnectionState().status().toString()), innerModel.privateLinkServiceConnectionState().description(), ""); this.provisioningState = innerModel.provisioningState() == null ? null : PrivateEndpointConnectionProvisioningState.fromString(innerModel.provisioningState().toString()); } @Override public String id() { return innerModel.id(); } @Override public String name() { return innerModel.name(); } @Override public String type() { return innerModel.type(); } @Override public PrivateEndpoint privateEndpoint() { return privateEndpoint; } @Override public com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState privateLinkServiceConnectionState() { return privateLinkServiceConnectionState; } @Override public PrivateEndpointConnectionProvisioningState provisioningState() { return provisioningState; } } }
Can't find any docs on this format.. The description says it supports list config by this format in AAD cluster. Currently we don't have convenient layer support for AAD profile so it's for clusters created in `portal` or `CLI`.
public byte[] userKubeConfigContent(Format format) { if (format == null) { return userKubeConfigContent(); } for (CredentialResult config : userKubeConfigs(format)) { return config.value(); } return new byte[0]; }
}
public byte[] userKubeConfigContent(Format format) { if (format == null) { return userKubeConfigContent(); } for (CredentialResult config : userKubeConfigs(format)) { return config.value(); } return new byte[0]; }
class KubernetesClusterImpl extends GroupableResourceImpl< KubernetesCluster, ManagedClusterInner, KubernetesClusterImpl, ContainerServiceManager> implements KubernetesCluster, KubernetesCluster.Definition, KubernetesCluster.Update { private final ClientLogger logger = new ClientLogger(getClass()); private List<CredentialResult> adminKubeConfigs; private List<CredentialResult> userKubeConfigs; private final Map<Format, List<CredentialResult>> formatUserKubeConfigsMap = new ConcurrentHashMap<>(); protected KubernetesClusterImpl(String name, ManagedClusterInner innerObject, ContainerServiceManager manager) { super(name, innerObject, manager); if (this.innerModel().agentPoolProfiles() == null) { this.innerModel().withAgentPoolProfiles(new ArrayList<>()); } this.adminKubeConfigs = null; this.userKubeConfigs = null; } @Override public String provisioningState() { return this.innerModel().provisioningState(); } @Override public String dnsPrefix() { return this.innerModel().dnsPrefix(); } @Override public String fqdn() { return this.innerModel().fqdn(); } @Override public String version() { return this.innerModel().kubernetesVersion(); } @Override public List<CredentialResult> adminKubeConfigs() { if (this.adminKubeConfigs == null || this.adminKubeConfigs.size() == 0) { this.adminKubeConfigs = this.manager().kubernetesClusters().listAdminKubeConfigContent(this.resourceGroupName(), this.name()); } return Collections.unmodifiableList(this.adminKubeConfigs); } @Override public List<CredentialResult> userKubeConfigs() { if (this.userKubeConfigs == null || this.userKubeConfigs.size() == 0) { this.userKubeConfigs = this.manager().kubernetesClusters().listUserKubeConfigContent(this.resourceGroupName(), this.name()); } return Collections.unmodifiableList(this.userKubeConfigs); } @Override public List<CredentialResult> userKubeConfigs(Format format) { if (format == null) { return userKubeConfigs(); } return Collections.unmodifiableList( this.formatUserKubeConfigsMap.computeIfAbsent( format, key -> KubernetesClusterImpl.this .manager() .kubernetesClusters() .listUserKubeConfigContent( KubernetesClusterImpl.this.resourceGroupName(), KubernetesClusterImpl.this.name(), format )) ); } @Override public byte[] adminKubeConfigContent() { for (CredentialResult config : adminKubeConfigs()) { return config.value(); } return new byte[0]; } @Override public byte[] userKubeConfigContent() { for (CredentialResult config : userKubeConfigs()) { return config.value(); } return new byte[0]; } @Override @Override public String servicePrincipalClientId() { if (this.innerModel().servicePrincipalProfile() != null) { return this.innerModel().servicePrincipalProfile().clientId(); } else { return null; } } @Override public String servicePrincipalSecret() { if (this.innerModel().servicePrincipalProfile() != null) { return this.innerModel().servicePrincipalProfile().secret(); } else { return null; } } @Override public String linuxRootUsername() { if (this.innerModel().linuxProfile() != null) { return this.innerModel().linuxProfile().adminUsername(); } else { return null; } } @Override public String sshKey() { if (this.innerModel().linuxProfile() == null || this.innerModel().linuxProfile().ssh() == null || this.innerModel().linuxProfile().ssh().publicKeys() == null || this.innerModel().linuxProfile().ssh().publicKeys().size() == 0) { return null; } else { return this.innerModel().linuxProfile().ssh().publicKeys().get(0).keyData(); } } @Override public Map<String, KubernetesClusterAgentPool> agentPools() { Map<String, KubernetesClusterAgentPool> agentPoolMap = new HashMap<>(); if (this.innerModel().agentPoolProfiles() != null && this.innerModel().agentPoolProfiles().size() > 0) { for (ManagedClusterAgentPoolProfile agentPoolProfile : this.innerModel().agentPoolProfiles()) { agentPoolMap.put(agentPoolProfile.name(), new KubernetesClusterAgentPoolImpl(agentPoolProfile, this)); } } return Collections.unmodifiableMap(agentPoolMap); } @Override public ContainerServiceNetworkProfile networkProfile() { return this.innerModel().networkProfile(); } @Override public Map<String, ManagedClusterAddonProfile> addonProfiles() { return Collections.unmodifiableMap(this.innerModel().addonProfiles()); } @Override public String nodeResourceGroup() { return this.innerModel().nodeResourceGroup(); } @Override public boolean enableRBAC() { return this.innerModel().enableRbac(); } @Override public PowerState powerState() { return this.innerModel().powerState(); } @Override public String systemAssignedManagedServiceIdentityPrincipalId() { String objectId = null; if (this.innerModel().identityProfile() != null) { UserAssignedIdentity identity = this.innerModel().identityProfile().get("kubeletidentity"); if (identity != null) { objectId = identity.objectId(); } } return objectId; } @Override public void start() { this.startAsync().block(); } @Override public Mono<Void> startAsync() { return manager().kubernetesClusters().startAsync(this.resourceGroupName(), this.name()); } @Override public void stop() { this.stopAsync().block(); } @Override public Mono<Void> stopAsync() { return manager().kubernetesClusters().stopAsync(this.resourceGroupName(), this.name()); } private Mono<List<CredentialResult>> listAdminConfig(final KubernetesClusterImpl self) { return this .manager() .kubernetesClusters() .listAdminKubeConfigContentAsync(self.resourceGroupName(), self.name()) .map( kubeConfigs -> { self.adminKubeConfigs = kubeConfigs; return self.adminKubeConfigs; }); } private Mono<List<CredentialResult>> listUserConfig(final KubernetesClusterImpl self) { return this .manager() .kubernetesClusters() .listUserKubeConfigContentAsync(self.resourceGroupName(), self.name()) .map( kubeConfigs -> { self.userKubeConfigs = kubeConfigs; return self.userKubeConfigs; }); } @Override protected Mono<ManagedClusterInner> getInnerAsync() { final KubernetesClusterImpl self = this; final Mono<List<CredentialResult>> adminConfig = listAdminConfig(self); final Mono<List<CredentialResult>> userConfig = listUserConfig(self); return this .manager() .serviceClient() .getManagedClusters() .getByResourceGroupAsync(this.resourceGroupName(), this.name()) .flatMap( managedClusterInner -> Flux.merge(adminConfig, userConfig).last().map(bytes -> managedClusterInner)); } @Override public Mono<KubernetesCluster> createResourceAsync() { final KubernetesClusterImpl self = this; if (!this.isInCreateMode()) { this.innerModel().withServicePrincipalProfile(null); } final Mono<List<CredentialResult>> adminConfig = listAdminConfig(self); final Mono<List<CredentialResult>> userConfig = listUserConfig(self); return this .manager() .serviceClient() .getManagedClusters() .createOrUpdateAsync(self.resourceGroupName(), self.name(), self.innerModel()) .flatMap( inner -> Flux .merge(adminConfig, userConfig) .last() .map( bytes -> { self.setInner(inner); return self; })); } @Override public KubernetesClusterImpl withVersion(String kubernetesVersion) { this.innerModel().withKubernetesVersion(kubernetesVersion); return this; } @Override public KubernetesClusterImpl withDefaultVersion() { this.innerModel().withKubernetesVersion(""); return this; } @Override public KubernetesClusterImpl withRootUsername(String rootUserName) { if (this.innerModel().linuxProfile() == null) { this.innerModel().withLinuxProfile(new ContainerServiceLinuxProfile()); } this.innerModel().linuxProfile().withAdminUsername(rootUserName); return this; } @Override public KubernetesClusterImpl withSshKey(String sshKeyData) { this .innerModel() .linuxProfile() .withSsh( new ContainerServiceSshConfiguration().withPublicKeys(new ArrayList<ContainerServiceSshPublicKey>())); this.innerModel().linuxProfile().ssh().publicKeys().add( new ContainerServiceSshPublicKey().withKeyData(sshKeyData)); return this; } @Override public KubernetesClusterImpl withServicePrincipalClientId(String clientId) { this.innerModel().withServicePrincipalProfile( new ManagedClusterServicePrincipalProfile().withClientId(clientId)); return this; } @Override public KubernetesClusterImpl withSystemAssignedManagedServiceIdentity() { this.innerModel().withIdentity(new ManagedClusterIdentity().withType(ResourceIdentityType.SYSTEM_ASSIGNED)); return this; } @Override public KubernetesClusterImpl withServicePrincipalSecret(String secret) { this.innerModel().servicePrincipalProfile().withSecret(secret); return this; } @Override public KubernetesClusterImpl withDnsPrefix(String dnsPrefix) { this.innerModel().withDnsPrefix(dnsPrefix); return this; } @Override public KubernetesClusterAgentPoolImpl defineAgentPool(String name) { ManagedClusterAgentPoolProfile innerPoolProfile = new ManagedClusterAgentPoolProfile(); innerPoolProfile.withName(name); return new KubernetesClusterAgentPoolImpl(innerPoolProfile, this); } @Override public KubernetesClusterAgentPoolImpl updateAgentPool(String name) { for (ManagedClusterAgentPoolProfile agentPoolProfile : innerModel().agentPoolProfiles()) { if (agentPoolProfile.name().equals(name)) { return new KubernetesClusterAgentPoolImpl(agentPoolProfile, this); } } throw logger.logExceptionAsError(new IllegalArgumentException(String.format( "Cannot get agent pool named %s", name))); } @Override public Update withoutAgentPool(String name) { if (innerModel().agentPoolProfiles() != null) { innerModel().withAgentPoolProfiles( innerModel().agentPoolProfiles().stream() .filter(p -> !name.equals(p.name())) .collect(Collectors.toList())); this.addDependency(context -> manager().serviceClient().getAgentPools().deleteAsync(resourceGroupName(), name(), name) .then(context.voidMono())); } return this; } @Override public KubernetesCluster.DefinitionStages.NetworkProfileDefinitionStages.Blank< KubernetesCluster.DefinitionStages.WithCreate> defineNetworkProfile() { return new KubernetesClusterNetworkProfileImpl(this); } @Override public KubernetesClusterImpl withAddOnProfiles(Map<String, ManagedClusterAddonProfile> addOnProfileMap) { this.innerModel().withAddonProfiles(addOnProfileMap); return this; } @Override public KubernetesClusterImpl withNetworkProfile(ContainerServiceNetworkProfile networkProfile) { this.innerModel().withNetworkProfile(networkProfile); return this; } @Override public KubernetesClusterImpl withRBACEnabled() { this.innerModel().withEnableRbac(true); return this; } @Override public KubernetesClusterImpl withRBACDisabled() { this.innerModel().withEnableRbac(false); return this; } public KubernetesClusterImpl addNewAgentPool(KubernetesClusterAgentPoolImpl agentPool) { if (!isInCreateMode()) { this.addDependency(context -> manager().serviceClient().getAgentPools().createOrUpdateAsync( resourceGroupName(), name(), agentPool.name(), agentPool.getAgentPoolInner()) .then(context.voidMono())); } innerModel().agentPoolProfiles().add(agentPool.innerModel()); return this; } @Override public KubernetesClusterImpl withAutoScalerProfile(ManagedClusterPropertiesAutoScalerProfile autoScalerProfile) { this.innerModel().withAutoScalerProfile(autoScalerProfile); return this; } @Override public KubernetesClusterImpl enablePrivateCluster() { if (innerModel().apiServerAccessProfile() == null) { innerModel().withApiServerAccessProfile(new ManagedClusterApiServerAccessProfile()); } innerModel().apiServerAccessProfile().withEnablePrivateCluster(true); return this; } @Override public PagedIterable<PrivateLinkResource> listPrivateLinkResources() { return new PagedIterable<>(listPrivateLinkResourcesAsync()); } @Override public PagedFlux<PrivateLinkResource> listPrivateLinkResourcesAsync() { Mono<Response<List<PrivateLinkResource>>> retList = this.manager().serviceClient().getPrivateLinkResources() .listWithResponseAsync(this.resourceGroupName(), this.name()) .map(response -> new SimpleResponse<>(response, response.getValue().value().stream() .map(PrivateLinkResourceImpl::new) .collect(Collectors.toList()))); return PagedConverter.convertListToPagedFlux(retList); } @Override public PagedIterable<PrivateEndpointConnection> listPrivateEndpointConnections() { return new PagedIterable<>(listPrivateEndpointConnectionsAsync()); } @Override public PagedFlux<PrivateEndpointConnection> listPrivateEndpointConnectionsAsync() { Mono<Response<List<PrivateEndpointConnection>>> retList = this.manager().serviceClient() .getPrivateEndpointConnections() .listWithResponseAsync(this.resourceGroupName(), this.name()) .map(response -> new SimpleResponse<>(response, response.getValue().value().stream() .map(PrivateEndpointConnectionImpl::new) .collect(Collectors.toList()))); return PagedConverter.convertListToPagedFlux(retList); } private static final class PrivateLinkResourceImpl implements PrivateLinkResource { private final PrivateLinkResourceInner innerModel; private PrivateLinkResourceImpl(PrivateLinkResourceInner innerModel) { this.innerModel = innerModel; } @Override public String groupId() { return innerModel.groupId(); } @Override public List<String> requiredMemberNames() { return Collections.unmodifiableList(innerModel.requiredMembers()); } @Override public List<String> requiredDnsZoneNames() { return Collections.emptyList(); } } private static final class PrivateEndpointConnectionImpl implements PrivateEndpointConnection { private final PrivateEndpointConnectionInner innerModel; private final PrivateEndpoint privateEndpoint; private final com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState privateLinkServiceConnectionState; private final PrivateEndpointConnectionProvisioningState provisioningState; private PrivateEndpointConnectionImpl(PrivateEndpointConnectionInner innerModel) { this.innerModel = innerModel; this.privateEndpoint = innerModel.privateEndpoint() == null ? null : new PrivateEndpoint(innerModel.privateEndpoint().id()); this.privateLinkServiceConnectionState = innerModel.privateLinkServiceConnectionState() == null ? null : new com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState( innerModel.privateLinkServiceConnectionState().status() == null ? null : com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateEndpointServiceConnectionStatus .fromString(innerModel.privateLinkServiceConnectionState().status().toString()), innerModel.privateLinkServiceConnectionState().description(), ""); this.provisioningState = innerModel.provisioningState() == null ? null : PrivateEndpointConnectionProvisioningState.fromString(innerModel.provisioningState().toString()); } @Override public String id() { return innerModel.id(); } @Override public String name() { return innerModel.name(); } @Override public String type() { return innerModel.type(); } @Override public PrivateEndpoint privateEndpoint() { return privateEndpoint; } @Override public com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState privateLinkServiceConnectionState() { return privateLinkServiceConnectionState; } @Override public PrivateEndpointConnectionProvisioningState provisioningState() { return provisioningState; } } }
class KubernetesClusterImpl extends GroupableResourceImpl< KubernetesCluster, ManagedClusterInner, KubernetesClusterImpl, ContainerServiceManager> implements KubernetesCluster, KubernetesCluster.Definition, KubernetesCluster.Update { private final ClientLogger logger = new ClientLogger(getClass()); private List<CredentialResult> adminKubeConfigs; private List<CredentialResult> userKubeConfigs; private final Map<Format, List<CredentialResult>> formatUserKubeConfigsMap = new ConcurrentHashMap<>(); protected KubernetesClusterImpl(String name, ManagedClusterInner innerObject, ContainerServiceManager manager) { super(name, innerObject, manager); if (this.innerModel().agentPoolProfiles() == null) { this.innerModel().withAgentPoolProfiles(new ArrayList<>()); } this.adminKubeConfigs = null; this.userKubeConfigs = null; } @Override public String provisioningState() { return this.innerModel().provisioningState(); } @Override public String dnsPrefix() { return this.innerModel().dnsPrefix(); } @Override public String fqdn() { return this.innerModel().fqdn(); } @Override public String version() { return this.innerModel().kubernetesVersion(); } @Override public List<CredentialResult> adminKubeConfigs() { if (this.adminKubeConfigs == null || this.adminKubeConfigs.size() == 0) { this.adminKubeConfigs = this.manager().kubernetesClusters().listAdminKubeConfigContent(this.resourceGroupName(), this.name()); } return Collections.unmodifiableList(this.adminKubeConfigs); } @Override public List<CredentialResult> userKubeConfigs() { if (this.userKubeConfigs == null || this.userKubeConfigs.size() == 0) { this.userKubeConfigs = this.manager().kubernetesClusters().listUserKubeConfigContent(this.resourceGroupName(), this.name()); } return Collections.unmodifiableList(this.userKubeConfigs); } @Override public List<CredentialResult> userKubeConfigs(Format format) { if (format == null) { return userKubeConfigs(); } return Collections.unmodifiableList( this.formatUserKubeConfigsMap.computeIfAbsent( format, key -> KubernetesClusterImpl.this .manager() .kubernetesClusters() .listUserKubeConfigContent( KubernetesClusterImpl.this.resourceGroupName(), KubernetesClusterImpl.this.name(), format )) ); } @Override public byte[] adminKubeConfigContent() { for (CredentialResult config : adminKubeConfigs()) { return config.value(); } return new byte[0]; } @Override public byte[] userKubeConfigContent() { for (CredentialResult config : userKubeConfigs()) { return config.value(); } return new byte[0]; } @Override @Override public String servicePrincipalClientId() { if (this.innerModel().servicePrincipalProfile() != null) { return this.innerModel().servicePrincipalProfile().clientId(); } else { return null; } } @Override public String servicePrincipalSecret() { if (this.innerModel().servicePrincipalProfile() != null) { return this.innerModel().servicePrincipalProfile().secret(); } else { return null; } } @Override public String linuxRootUsername() { if (this.innerModel().linuxProfile() != null) { return this.innerModel().linuxProfile().adminUsername(); } else { return null; } } @Override public String sshKey() { if (this.innerModel().linuxProfile() == null || this.innerModel().linuxProfile().ssh() == null || this.innerModel().linuxProfile().ssh().publicKeys() == null || this.innerModel().linuxProfile().ssh().publicKeys().size() == 0) { return null; } else { return this.innerModel().linuxProfile().ssh().publicKeys().get(0).keyData(); } } @Override public Map<String, KubernetesClusterAgentPool> agentPools() { Map<String, KubernetesClusterAgentPool> agentPoolMap = new HashMap<>(); if (this.innerModel().agentPoolProfiles() != null && this.innerModel().agentPoolProfiles().size() > 0) { for (ManagedClusterAgentPoolProfile agentPoolProfile : this.innerModel().agentPoolProfiles()) { agentPoolMap.put(agentPoolProfile.name(), new KubernetesClusterAgentPoolImpl(agentPoolProfile, this)); } } return Collections.unmodifiableMap(agentPoolMap); } @Override public ContainerServiceNetworkProfile networkProfile() { return this.innerModel().networkProfile(); } @Override public Map<String, ManagedClusterAddonProfile> addonProfiles() { return Collections.unmodifiableMap(this.innerModel().addonProfiles()); } @Override public String nodeResourceGroup() { return this.innerModel().nodeResourceGroup(); } @Override public boolean enableRBAC() { return this.innerModel().enableRbac(); } @Override public PowerState powerState() { return this.innerModel().powerState(); } @Override public String systemAssignedManagedServiceIdentityPrincipalId() { String objectId = null; if (this.innerModel().identityProfile() != null) { UserAssignedIdentity identity = this.innerModel().identityProfile().get("kubeletidentity"); if (identity != null) { objectId = identity.objectId(); } } return objectId; } @Override public void start() { this.startAsync().block(); } @Override public Mono<Void> startAsync() { return manager().kubernetesClusters().startAsync(this.resourceGroupName(), this.name()); } @Override public void stop() { this.stopAsync().block(); } @Override public Mono<Void> stopAsync() { return manager().kubernetesClusters().stopAsync(this.resourceGroupName(), this.name()); } private Mono<List<CredentialResult>> listAdminConfig(final KubernetesClusterImpl self) { return this .manager() .kubernetesClusters() .listAdminKubeConfigContentAsync(self.resourceGroupName(), self.name()) .map( kubeConfigs -> { self.adminKubeConfigs = kubeConfigs; return self.adminKubeConfigs; }); } private Mono<List<CredentialResult>> listUserConfig(final KubernetesClusterImpl self) { return this .manager() .kubernetesClusters() .listUserKubeConfigContentAsync(self.resourceGroupName(), self.name()) .map( kubeConfigs -> { self.userKubeConfigs = kubeConfigs; return self.userKubeConfigs; }); } @Override protected Mono<ManagedClusterInner> getInnerAsync() { final KubernetesClusterImpl self = this; final Mono<List<CredentialResult>> adminConfig = listAdminConfig(self); final Mono<List<CredentialResult>> userConfig = listUserConfig(self); return this .manager() .serviceClient() .getManagedClusters() .getByResourceGroupAsync(this.resourceGroupName(), this.name()) .flatMap( managedClusterInner -> Flux .merge(adminConfig, userConfig) .last() .map( bytes -> { formatUserKubeConfigsMap.clear(); return managedClusterInner; })); } @Override public Mono<KubernetesCluster> createResourceAsync() { final KubernetesClusterImpl self = this; if (!this.isInCreateMode()) { this.innerModel().withServicePrincipalProfile(null); } final Mono<List<CredentialResult>> adminConfig = listAdminConfig(self); final Mono<List<CredentialResult>> userConfig = listUserConfig(self); return this .manager() .serviceClient() .getManagedClusters() .createOrUpdateAsync(self.resourceGroupName(), self.name(), self.innerModel()) .flatMap( inner -> Flux .merge(adminConfig, userConfig) .last() .map( bytes -> { self.setInner(inner); return self; })); } @Override public KubernetesClusterImpl withVersion(String kubernetesVersion) { this.innerModel().withKubernetesVersion(kubernetesVersion); return this; } @Override public KubernetesClusterImpl withDefaultVersion() { this.innerModel().withKubernetesVersion(""); return this; } @Override public KubernetesClusterImpl withRootUsername(String rootUserName) { if (this.innerModel().linuxProfile() == null) { this.innerModel().withLinuxProfile(new ContainerServiceLinuxProfile()); } this.innerModel().linuxProfile().withAdminUsername(rootUserName); return this; } @Override public KubernetesClusterImpl withSshKey(String sshKeyData) { this .innerModel() .linuxProfile() .withSsh( new ContainerServiceSshConfiguration().withPublicKeys(new ArrayList<ContainerServiceSshPublicKey>())); this.innerModel().linuxProfile().ssh().publicKeys().add( new ContainerServiceSshPublicKey().withKeyData(sshKeyData)); return this; } @Override public KubernetesClusterImpl withServicePrincipalClientId(String clientId) { this.innerModel().withServicePrincipalProfile( new ManagedClusterServicePrincipalProfile().withClientId(clientId)); return this; } @Override public KubernetesClusterImpl withSystemAssignedManagedServiceIdentity() { this.innerModel().withIdentity(new ManagedClusterIdentity().withType(ResourceIdentityType.SYSTEM_ASSIGNED)); return this; } @Override public KubernetesClusterImpl withServicePrincipalSecret(String secret) { this.innerModel().servicePrincipalProfile().withSecret(secret); return this; } @Override public KubernetesClusterImpl withDnsPrefix(String dnsPrefix) { this.innerModel().withDnsPrefix(dnsPrefix); return this; } @Override public KubernetesClusterAgentPoolImpl defineAgentPool(String name) { ManagedClusterAgentPoolProfile innerPoolProfile = new ManagedClusterAgentPoolProfile(); innerPoolProfile.withName(name); return new KubernetesClusterAgentPoolImpl(innerPoolProfile, this); } @Override public KubernetesClusterAgentPoolImpl updateAgentPool(String name) { for (ManagedClusterAgentPoolProfile agentPoolProfile : innerModel().agentPoolProfiles()) { if (agentPoolProfile.name().equals(name)) { return new KubernetesClusterAgentPoolImpl(agentPoolProfile, this); } } throw logger.logExceptionAsError(new IllegalArgumentException(String.format( "Cannot get agent pool named %s", name))); } @Override public Update withoutAgentPool(String name) { if (innerModel().agentPoolProfiles() != null) { innerModel().withAgentPoolProfiles( innerModel().agentPoolProfiles().stream() .filter(p -> !name.equals(p.name())) .collect(Collectors.toList())); this.addDependency(context -> manager().serviceClient().getAgentPools().deleteAsync(resourceGroupName(), name(), name) .then(context.voidMono())); } return this; } @Override public KubernetesCluster.DefinitionStages.NetworkProfileDefinitionStages.Blank< KubernetesCluster.DefinitionStages.WithCreate> defineNetworkProfile() { return new KubernetesClusterNetworkProfileImpl(this); } @Override public KubernetesClusterImpl withAddOnProfiles(Map<String, ManagedClusterAddonProfile> addOnProfileMap) { this.innerModel().withAddonProfiles(addOnProfileMap); return this; } @Override public KubernetesClusterImpl withNetworkProfile(ContainerServiceNetworkProfile networkProfile) { this.innerModel().withNetworkProfile(networkProfile); return this; } @Override public KubernetesClusterImpl withRBACEnabled() { this.innerModel().withEnableRbac(true); return this; } @Override public KubernetesClusterImpl withRBACDisabled() { this.innerModel().withEnableRbac(false); return this; } public KubernetesClusterImpl addNewAgentPool(KubernetesClusterAgentPoolImpl agentPool) { if (!isInCreateMode()) { this.addDependency(context -> manager().serviceClient().getAgentPools().createOrUpdateAsync( resourceGroupName(), name(), agentPool.name(), agentPool.getAgentPoolInner()) .then(context.voidMono())); } innerModel().agentPoolProfiles().add(agentPool.innerModel()); return this; } @Override public KubernetesClusterImpl withAutoScalerProfile(ManagedClusterPropertiesAutoScalerProfile autoScalerProfile) { this.innerModel().withAutoScalerProfile(autoScalerProfile); return this; } @Override public KubernetesClusterImpl enablePrivateCluster() { if (innerModel().apiServerAccessProfile() == null) { innerModel().withApiServerAccessProfile(new ManagedClusterApiServerAccessProfile()); } innerModel().apiServerAccessProfile().withEnablePrivateCluster(true); return this; } @Override public PagedIterable<PrivateLinkResource> listPrivateLinkResources() { return new PagedIterable<>(listPrivateLinkResourcesAsync()); } @Override public PagedFlux<PrivateLinkResource> listPrivateLinkResourcesAsync() { Mono<Response<List<PrivateLinkResource>>> retList = this.manager().serviceClient().getPrivateLinkResources() .listWithResponseAsync(this.resourceGroupName(), this.name()) .map(response -> new SimpleResponse<>(response, response.getValue().value().stream() .map(PrivateLinkResourceImpl::new) .collect(Collectors.toList()))); return PagedConverter.convertListToPagedFlux(retList); } @Override public PagedIterable<PrivateEndpointConnection> listPrivateEndpointConnections() { return new PagedIterable<>(listPrivateEndpointConnectionsAsync()); } @Override public PagedFlux<PrivateEndpointConnection> listPrivateEndpointConnectionsAsync() { Mono<Response<List<PrivateEndpointConnection>>> retList = this.manager().serviceClient() .getPrivateEndpointConnections() .listWithResponseAsync(this.resourceGroupName(), this.name()) .map(response -> new SimpleResponse<>(response, response.getValue().value().stream() .map(PrivateEndpointConnectionImpl::new) .collect(Collectors.toList()))); return PagedConverter.convertListToPagedFlux(retList); } private static final class PrivateLinkResourceImpl implements PrivateLinkResource { private final PrivateLinkResourceInner innerModel; private PrivateLinkResourceImpl(PrivateLinkResourceInner innerModel) { this.innerModel = innerModel; } @Override public String groupId() { return innerModel.groupId(); } @Override public List<String> requiredMemberNames() { return Collections.unmodifiableList(innerModel.requiredMembers()); } @Override public List<String> requiredDnsZoneNames() { return Collections.emptyList(); } } private static final class PrivateEndpointConnectionImpl implements PrivateEndpointConnection { private final PrivateEndpointConnectionInner innerModel; private final PrivateEndpoint privateEndpoint; private final com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState privateLinkServiceConnectionState; private final PrivateEndpointConnectionProvisioningState provisioningState; private PrivateEndpointConnectionImpl(PrivateEndpointConnectionInner innerModel) { this.innerModel = innerModel; this.privateEndpoint = innerModel.privateEndpoint() == null ? null : new PrivateEndpoint(innerModel.privateEndpoint().id()); this.privateLinkServiceConnectionState = innerModel.privateLinkServiceConnectionState() == null ? null : new com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState( innerModel.privateLinkServiceConnectionState().status() == null ? null : com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateEndpointServiceConnectionStatus .fromString(innerModel.privateLinkServiceConnectionState().status().toString()), innerModel.privateLinkServiceConnectionState().description(), ""); this.provisioningState = innerModel.provisioningState() == null ? null : PrivateEndpointConnectionProvisioningState.fromString(innerModel.provisioningState().toString()); } @Override public String id() { return innerModel.id(); } @Override public String name() { return innerModel.name(); } @Override public String type() { return innerModel.type(); } @Override public PrivateEndpoint privateEndpoint() { return privateEndpoint; } @Override public com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState privateLinkServiceConnectionState() { return privateLinkServiceConnectionState; } @Override public PrivateEndpointConnectionProvisioningState provisioningState() { return provisioningState; } } }
So how does your test case work?
public byte[] userKubeConfigContent(Format format) { if (format == null) { return userKubeConfigContent(); } for (CredentialResult config : userKubeConfigs(format)) { return config.value(); } return new byte[0]; }
}
public byte[] userKubeConfigContent(Format format) { if (format == null) { return userKubeConfigContent(); } for (CredentialResult config : userKubeConfigs(format)) { return config.value(); } return new byte[0]; }
class KubernetesClusterImpl extends GroupableResourceImpl< KubernetesCluster, ManagedClusterInner, KubernetesClusterImpl, ContainerServiceManager> implements KubernetesCluster, KubernetesCluster.Definition, KubernetesCluster.Update { private final ClientLogger logger = new ClientLogger(getClass()); private List<CredentialResult> adminKubeConfigs; private List<CredentialResult> userKubeConfigs; private final Map<Format, List<CredentialResult>> formatUserKubeConfigsMap = new ConcurrentHashMap<>(); protected KubernetesClusterImpl(String name, ManagedClusterInner innerObject, ContainerServiceManager manager) { super(name, innerObject, manager); if (this.innerModel().agentPoolProfiles() == null) { this.innerModel().withAgentPoolProfiles(new ArrayList<>()); } this.adminKubeConfigs = null; this.userKubeConfigs = null; } @Override public String provisioningState() { return this.innerModel().provisioningState(); } @Override public String dnsPrefix() { return this.innerModel().dnsPrefix(); } @Override public String fqdn() { return this.innerModel().fqdn(); } @Override public String version() { return this.innerModel().kubernetesVersion(); } @Override public List<CredentialResult> adminKubeConfigs() { if (this.adminKubeConfigs == null || this.adminKubeConfigs.size() == 0) { this.adminKubeConfigs = this.manager().kubernetesClusters().listAdminKubeConfigContent(this.resourceGroupName(), this.name()); } return Collections.unmodifiableList(this.adminKubeConfigs); } @Override public List<CredentialResult> userKubeConfigs() { if (this.userKubeConfigs == null || this.userKubeConfigs.size() == 0) { this.userKubeConfigs = this.manager().kubernetesClusters().listUserKubeConfigContent(this.resourceGroupName(), this.name()); } return Collections.unmodifiableList(this.userKubeConfigs); } @Override public List<CredentialResult> userKubeConfigs(Format format) { if (format == null) { return userKubeConfigs(); } return Collections.unmodifiableList( this.formatUserKubeConfigsMap.computeIfAbsent( format, key -> KubernetesClusterImpl.this .manager() .kubernetesClusters() .listUserKubeConfigContent( KubernetesClusterImpl.this.resourceGroupName(), KubernetesClusterImpl.this.name(), format )) ); } @Override public byte[] adminKubeConfigContent() { for (CredentialResult config : adminKubeConfigs()) { return config.value(); } return new byte[0]; } @Override public byte[] userKubeConfigContent() { for (CredentialResult config : userKubeConfigs()) { return config.value(); } return new byte[0]; } @Override @Override public String servicePrincipalClientId() { if (this.innerModel().servicePrincipalProfile() != null) { return this.innerModel().servicePrincipalProfile().clientId(); } else { return null; } } @Override public String servicePrincipalSecret() { if (this.innerModel().servicePrincipalProfile() != null) { return this.innerModel().servicePrincipalProfile().secret(); } else { return null; } } @Override public String linuxRootUsername() { if (this.innerModel().linuxProfile() != null) { return this.innerModel().linuxProfile().adminUsername(); } else { return null; } } @Override public String sshKey() { if (this.innerModel().linuxProfile() == null || this.innerModel().linuxProfile().ssh() == null || this.innerModel().linuxProfile().ssh().publicKeys() == null || this.innerModel().linuxProfile().ssh().publicKeys().size() == 0) { return null; } else { return this.innerModel().linuxProfile().ssh().publicKeys().get(0).keyData(); } } @Override public Map<String, KubernetesClusterAgentPool> agentPools() { Map<String, KubernetesClusterAgentPool> agentPoolMap = new HashMap<>(); if (this.innerModel().agentPoolProfiles() != null && this.innerModel().agentPoolProfiles().size() > 0) { for (ManagedClusterAgentPoolProfile agentPoolProfile : this.innerModel().agentPoolProfiles()) { agentPoolMap.put(agentPoolProfile.name(), new KubernetesClusterAgentPoolImpl(agentPoolProfile, this)); } } return Collections.unmodifiableMap(agentPoolMap); } @Override public ContainerServiceNetworkProfile networkProfile() { return this.innerModel().networkProfile(); } @Override public Map<String, ManagedClusterAddonProfile> addonProfiles() { return Collections.unmodifiableMap(this.innerModel().addonProfiles()); } @Override public String nodeResourceGroup() { return this.innerModel().nodeResourceGroup(); } @Override public boolean enableRBAC() { return this.innerModel().enableRbac(); } @Override public PowerState powerState() { return this.innerModel().powerState(); } @Override public String systemAssignedManagedServiceIdentityPrincipalId() { String objectId = null; if (this.innerModel().identityProfile() != null) { UserAssignedIdentity identity = this.innerModel().identityProfile().get("kubeletidentity"); if (identity != null) { objectId = identity.objectId(); } } return objectId; } @Override public void start() { this.startAsync().block(); } @Override public Mono<Void> startAsync() { return manager().kubernetesClusters().startAsync(this.resourceGroupName(), this.name()); } @Override public void stop() { this.stopAsync().block(); } @Override public Mono<Void> stopAsync() { return manager().kubernetesClusters().stopAsync(this.resourceGroupName(), this.name()); } private Mono<List<CredentialResult>> listAdminConfig(final KubernetesClusterImpl self) { return this .manager() .kubernetesClusters() .listAdminKubeConfigContentAsync(self.resourceGroupName(), self.name()) .map( kubeConfigs -> { self.adminKubeConfigs = kubeConfigs; return self.adminKubeConfigs; }); } private Mono<List<CredentialResult>> listUserConfig(final KubernetesClusterImpl self) { return this .manager() .kubernetesClusters() .listUserKubeConfigContentAsync(self.resourceGroupName(), self.name()) .map( kubeConfigs -> { self.userKubeConfigs = kubeConfigs; return self.userKubeConfigs; }); } @Override protected Mono<ManagedClusterInner> getInnerAsync() { final KubernetesClusterImpl self = this; final Mono<List<CredentialResult>> adminConfig = listAdminConfig(self); final Mono<List<CredentialResult>> userConfig = listUserConfig(self); return this .manager() .serviceClient() .getManagedClusters() .getByResourceGroupAsync(this.resourceGroupName(), this.name()) .flatMap( managedClusterInner -> Flux.merge(adminConfig, userConfig).last().map(bytes -> managedClusterInner)); } @Override public Mono<KubernetesCluster> createResourceAsync() { final KubernetesClusterImpl self = this; if (!this.isInCreateMode()) { this.innerModel().withServicePrincipalProfile(null); } final Mono<List<CredentialResult>> adminConfig = listAdminConfig(self); final Mono<List<CredentialResult>> userConfig = listUserConfig(self); return this .manager() .serviceClient() .getManagedClusters() .createOrUpdateAsync(self.resourceGroupName(), self.name(), self.innerModel()) .flatMap( inner -> Flux .merge(adminConfig, userConfig) .last() .map( bytes -> { self.setInner(inner); return self; })); } @Override public KubernetesClusterImpl withVersion(String kubernetesVersion) { this.innerModel().withKubernetesVersion(kubernetesVersion); return this; } @Override public KubernetesClusterImpl withDefaultVersion() { this.innerModel().withKubernetesVersion(""); return this; } @Override public KubernetesClusterImpl withRootUsername(String rootUserName) { if (this.innerModel().linuxProfile() == null) { this.innerModel().withLinuxProfile(new ContainerServiceLinuxProfile()); } this.innerModel().linuxProfile().withAdminUsername(rootUserName); return this; } @Override public KubernetesClusterImpl withSshKey(String sshKeyData) { this .innerModel() .linuxProfile() .withSsh( new ContainerServiceSshConfiguration().withPublicKeys(new ArrayList<ContainerServiceSshPublicKey>())); this.innerModel().linuxProfile().ssh().publicKeys().add( new ContainerServiceSshPublicKey().withKeyData(sshKeyData)); return this; } @Override public KubernetesClusterImpl withServicePrincipalClientId(String clientId) { this.innerModel().withServicePrincipalProfile( new ManagedClusterServicePrincipalProfile().withClientId(clientId)); return this; } @Override public KubernetesClusterImpl withSystemAssignedManagedServiceIdentity() { this.innerModel().withIdentity(new ManagedClusterIdentity().withType(ResourceIdentityType.SYSTEM_ASSIGNED)); return this; } @Override public KubernetesClusterImpl withServicePrincipalSecret(String secret) { this.innerModel().servicePrincipalProfile().withSecret(secret); return this; } @Override public KubernetesClusterImpl withDnsPrefix(String dnsPrefix) { this.innerModel().withDnsPrefix(dnsPrefix); return this; } @Override public KubernetesClusterAgentPoolImpl defineAgentPool(String name) { ManagedClusterAgentPoolProfile innerPoolProfile = new ManagedClusterAgentPoolProfile(); innerPoolProfile.withName(name); return new KubernetesClusterAgentPoolImpl(innerPoolProfile, this); } @Override public KubernetesClusterAgentPoolImpl updateAgentPool(String name) { for (ManagedClusterAgentPoolProfile agentPoolProfile : innerModel().agentPoolProfiles()) { if (agentPoolProfile.name().equals(name)) { return new KubernetesClusterAgentPoolImpl(agentPoolProfile, this); } } throw logger.logExceptionAsError(new IllegalArgumentException(String.format( "Cannot get agent pool named %s", name))); } @Override public Update withoutAgentPool(String name) { if (innerModel().agentPoolProfiles() != null) { innerModel().withAgentPoolProfiles( innerModel().agentPoolProfiles().stream() .filter(p -> !name.equals(p.name())) .collect(Collectors.toList())); this.addDependency(context -> manager().serviceClient().getAgentPools().deleteAsync(resourceGroupName(), name(), name) .then(context.voidMono())); } return this; } @Override public KubernetesCluster.DefinitionStages.NetworkProfileDefinitionStages.Blank< KubernetesCluster.DefinitionStages.WithCreate> defineNetworkProfile() { return new KubernetesClusterNetworkProfileImpl(this); } @Override public KubernetesClusterImpl withAddOnProfiles(Map<String, ManagedClusterAddonProfile> addOnProfileMap) { this.innerModel().withAddonProfiles(addOnProfileMap); return this; } @Override public KubernetesClusterImpl withNetworkProfile(ContainerServiceNetworkProfile networkProfile) { this.innerModel().withNetworkProfile(networkProfile); return this; } @Override public KubernetesClusterImpl withRBACEnabled() { this.innerModel().withEnableRbac(true); return this; } @Override public KubernetesClusterImpl withRBACDisabled() { this.innerModel().withEnableRbac(false); return this; } public KubernetesClusterImpl addNewAgentPool(KubernetesClusterAgentPoolImpl agentPool) { if (!isInCreateMode()) { this.addDependency(context -> manager().serviceClient().getAgentPools().createOrUpdateAsync( resourceGroupName(), name(), agentPool.name(), agentPool.getAgentPoolInner()) .then(context.voidMono())); } innerModel().agentPoolProfiles().add(agentPool.innerModel()); return this; } @Override public KubernetesClusterImpl withAutoScalerProfile(ManagedClusterPropertiesAutoScalerProfile autoScalerProfile) { this.innerModel().withAutoScalerProfile(autoScalerProfile); return this; } @Override public KubernetesClusterImpl enablePrivateCluster() { if (innerModel().apiServerAccessProfile() == null) { innerModel().withApiServerAccessProfile(new ManagedClusterApiServerAccessProfile()); } innerModel().apiServerAccessProfile().withEnablePrivateCluster(true); return this; } @Override public PagedIterable<PrivateLinkResource> listPrivateLinkResources() { return new PagedIterable<>(listPrivateLinkResourcesAsync()); } @Override public PagedFlux<PrivateLinkResource> listPrivateLinkResourcesAsync() { Mono<Response<List<PrivateLinkResource>>> retList = this.manager().serviceClient().getPrivateLinkResources() .listWithResponseAsync(this.resourceGroupName(), this.name()) .map(response -> new SimpleResponse<>(response, response.getValue().value().stream() .map(PrivateLinkResourceImpl::new) .collect(Collectors.toList()))); return PagedConverter.convertListToPagedFlux(retList); } @Override public PagedIterable<PrivateEndpointConnection> listPrivateEndpointConnections() { return new PagedIterable<>(listPrivateEndpointConnectionsAsync()); } @Override public PagedFlux<PrivateEndpointConnection> listPrivateEndpointConnectionsAsync() { Mono<Response<List<PrivateEndpointConnection>>> retList = this.manager().serviceClient() .getPrivateEndpointConnections() .listWithResponseAsync(this.resourceGroupName(), this.name()) .map(response -> new SimpleResponse<>(response, response.getValue().value().stream() .map(PrivateEndpointConnectionImpl::new) .collect(Collectors.toList()))); return PagedConverter.convertListToPagedFlux(retList); } private static final class PrivateLinkResourceImpl implements PrivateLinkResource { private final PrivateLinkResourceInner innerModel; private PrivateLinkResourceImpl(PrivateLinkResourceInner innerModel) { this.innerModel = innerModel; } @Override public String groupId() { return innerModel.groupId(); } @Override public List<String> requiredMemberNames() { return Collections.unmodifiableList(innerModel.requiredMembers()); } @Override public List<String> requiredDnsZoneNames() { return Collections.emptyList(); } } private static final class PrivateEndpointConnectionImpl implements PrivateEndpointConnection { private final PrivateEndpointConnectionInner innerModel; private final PrivateEndpoint privateEndpoint; private final com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState privateLinkServiceConnectionState; private final PrivateEndpointConnectionProvisioningState provisioningState; private PrivateEndpointConnectionImpl(PrivateEndpointConnectionInner innerModel) { this.innerModel = innerModel; this.privateEndpoint = innerModel.privateEndpoint() == null ? null : new PrivateEndpoint(innerModel.privateEndpoint().id()); this.privateLinkServiceConnectionState = innerModel.privateLinkServiceConnectionState() == null ? null : new com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState( innerModel.privateLinkServiceConnectionState().status() == null ? null : com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateEndpointServiceConnectionStatus .fromString(innerModel.privateLinkServiceConnectionState().status().toString()), innerModel.privateLinkServiceConnectionState().description(), ""); this.provisioningState = innerModel.provisioningState() == null ? null : PrivateEndpointConnectionProvisioningState.fromString(innerModel.provisioningState().toString()); } @Override public String id() { return innerModel.id(); } @Override public String name() { return innerModel.name(); } @Override public String type() { return innerModel.type(); } @Override public PrivateEndpoint privateEndpoint() { return privateEndpoint; } @Override public com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState privateLinkServiceConnectionState() { return privateLinkServiceConnectionState; } @Override public PrivateEndpointConnectionProvisioningState provisioningState() { return provisioningState; } } }
class KubernetesClusterImpl extends GroupableResourceImpl< KubernetesCluster, ManagedClusterInner, KubernetesClusterImpl, ContainerServiceManager> implements KubernetesCluster, KubernetesCluster.Definition, KubernetesCluster.Update { private final ClientLogger logger = new ClientLogger(getClass()); private List<CredentialResult> adminKubeConfigs; private List<CredentialResult> userKubeConfigs; private final Map<Format, List<CredentialResult>> formatUserKubeConfigsMap = new ConcurrentHashMap<>(); protected KubernetesClusterImpl(String name, ManagedClusterInner innerObject, ContainerServiceManager manager) { super(name, innerObject, manager); if (this.innerModel().agentPoolProfiles() == null) { this.innerModel().withAgentPoolProfiles(new ArrayList<>()); } this.adminKubeConfigs = null; this.userKubeConfigs = null; } @Override public String provisioningState() { return this.innerModel().provisioningState(); } @Override public String dnsPrefix() { return this.innerModel().dnsPrefix(); } @Override public String fqdn() { return this.innerModel().fqdn(); } @Override public String version() { return this.innerModel().kubernetesVersion(); } @Override public List<CredentialResult> adminKubeConfigs() { if (this.adminKubeConfigs == null || this.adminKubeConfigs.size() == 0) { this.adminKubeConfigs = this.manager().kubernetesClusters().listAdminKubeConfigContent(this.resourceGroupName(), this.name()); } return Collections.unmodifiableList(this.adminKubeConfigs); } @Override public List<CredentialResult> userKubeConfigs() { if (this.userKubeConfigs == null || this.userKubeConfigs.size() == 0) { this.userKubeConfigs = this.manager().kubernetesClusters().listUserKubeConfigContent(this.resourceGroupName(), this.name()); } return Collections.unmodifiableList(this.userKubeConfigs); } @Override public List<CredentialResult> userKubeConfigs(Format format) { if (format == null) { return userKubeConfigs(); } return Collections.unmodifiableList( this.formatUserKubeConfigsMap.computeIfAbsent( format, key -> KubernetesClusterImpl.this .manager() .kubernetesClusters() .listUserKubeConfigContent( KubernetesClusterImpl.this.resourceGroupName(), KubernetesClusterImpl.this.name(), format )) ); } @Override public byte[] adminKubeConfigContent() { for (CredentialResult config : adminKubeConfigs()) { return config.value(); } return new byte[0]; } @Override public byte[] userKubeConfigContent() { for (CredentialResult config : userKubeConfigs()) { return config.value(); } return new byte[0]; } @Override @Override public String servicePrincipalClientId() { if (this.innerModel().servicePrincipalProfile() != null) { return this.innerModel().servicePrincipalProfile().clientId(); } else { return null; } } @Override public String servicePrincipalSecret() { if (this.innerModel().servicePrincipalProfile() != null) { return this.innerModel().servicePrincipalProfile().secret(); } else { return null; } } @Override public String linuxRootUsername() { if (this.innerModel().linuxProfile() != null) { return this.innerModel().linuxProfile().adminUsername(); } else { return null; } } @Override public String sshKey() { if (this.innerModel().linuxProfile() == null || this.innerModel().linuxProfile().ssh() == null || this.innerModel().linuxProfile().ssh().publicKeys() == null || this.innerModel().linuxProfile().ssh().publicKeys().size() == 0) { return null; } else { return this.innerModel().linuxProfile().ssh().publicKeys().get(0).keyData(); } } @Override public Map<String, KubernetesClusterAgentPool> agentPools() { Map<String, KubernetesClusterAgentPool> agentPoolMap = new HashMap<>(); if (this.innerModel().agentPoolProfiles() != null && this.innerModel().agentPoolProfiles().size() > 0) { for (ManagedClusterAgentPoolProfile agentPoolProfile : this.innerModel().agentPoolProfiles()) { agentPoolMap.put(agentPoolProfile.name(), new KubernetesClusterAgentPoolImpl(agentPoolProfile, this)); } } return Collections.unmodifiableMap(agentPoolMap); } @Override public ContainerServiceNetworkProfile networkProfile() { return this.innerModel().networkProfile(); } @Override public Map<String, ManagedClusterAddonProfile> addonProfiles() { return Collections.unmodifiableMap(this.innerModel().addonProfiles()); } @Override public String nodeResourceGroup() { return this.innerModel().nodeResourceGroup(); } @Override public boolean enableRBAC() { return this.innerModel().enableRbac(); } @Override public PowerState powerState() { return this.innerModel().powerState(); } @Override public String systemAssignedManagedServiceIdentityPrincipalId() { String objectId = null; if (this.innerModel().identityProfile() != null) { UserAssignedIdentity identity = this.innerModel().identityProfile().get("kubeletidentity"); if (identity != null) { objectId = identity.objectId(); } } return objectId; } @Override public void start() { this.startAsync().block(); } @Override public Mono<Void> startAsync() { return manager().kubernetesClusters().startAsync(this.resourceGroupName(), this.name()); } @Override public void stop() { this.stopAsync().block(); } @Override public Mono<Void> stopAsync() { return manager().kubernetesClusters().stopAsync(this.resourceGroupName(), this.name()); } private Mono<List<CredentialResult>> listAdminConfig(final KubernetesClusterImpl self) { return this .manager() .kubernetesClusters() .listAdminKubeConfigContentAsync(self.resourceGroupName(), self.name()) .map( kubeConfigs -> { self.adminKubeConfigs = kubeConfigs; return self.adminKubeConfigs; }); } private Mono<List<CredentialResult>> listUserConfig(final KubernetesClusterImpl self) { return this .manager() .kubernetesClusters() .listUserKubeConfigContentAsync(self.resourceGroupName(), self.name()) .map( kubeConfigs -> { self.userKubeConfigs = kubeConfigs; return self.userKubeConfigs; }); } @Override protected Mono<ManagedClusterInner> getInnerAsync() { final KubernetesClusterImpl self = this; final Mono<List<CredentialResult>> adminConfig = listAdminConfig(self); final Mono<List<CredentialResult>> userConfig = listUserConfig(self); return this .manager() .serviceClient() .getManagedClusters() .getByResourceGroupAsync(this.resourceGroupName(), this.name()) .flatMap( managedClusterInner -> Flux .merge(adminConfig, userConfig) .last() .map( bytes -> { formatUserKubeConfigsMap.clear(); return managedClusterInner; })); } @Override public Mono<KubernetesCluster> createResourceAsync() { final KubernetesClusterImpl self = this; if (!this.isInCreateMode()) { this.innerModel().withServicePrincipalProfile(null); } final Mono<List<CredentialResult>> adminConfig = listAdminConfig(self); final Mono<List<CredentialResult>> userConfig = listUserConfig(self); return this .manager() .serviceClient() .getManagedClusters() .createOrUpdateAsync(self.resourceGroupName(), self.name(), self.innerModel()) .flatMap( inner -> Flux .merge(adminConfig, userConfig) .last() .map( bytes -> { self.setInner(inner); return self; })); } @Override public KubernetesClusterImpl withVersion(String kubernetesVersion) { this.innerModel().withKubernetesVersion(kubernetesVersion); return this; } @Override public KubernetesClusterImpl withDefaultVersion() { this.innerModel().withKubernetesVersion(""); return this; } @Override public KubernetesClusterImpl withRootUsername(String rootUserName) { if (this.innerModel().linuxProfile() == null) { this.innerModel().withLinuxProfile(new ContainerServiceLinuxProfile()); } this.innerModel().linuxProfile().withAdminUsername(rootUserName); return this; } @Override public KubernetesClusterImpl withSshKey(String sshKeyData) { this .innerModel() .linuxProfile() .withSsh( new ContainerServiceSshConfiguration().withPublicKeys(new ArrayList<ContainerServiceSshPublicKey>())); this.innerModel().linuxProfile().ssh().publicKeys().add( new ContainerServiceSshPublicKey().withKeyData(sshKeyData)); return this; } @Override public KubernetesClusterImpl withServicePrincipalClientId(String clientId) { this.innerModel().withServicePrincipalProfile( new ManagedClusterServicePrincipalProfile().withClientId(clientId)); return this; } @Override public KubernetesClusterImpl withSystemAssignedManagedServiceIdentity() { this.innerModel().withIdentity(new ManagedClusterIdentity().withType(ResourceIdentityType.SYSTEM_ASSIGNED)); return this; } @Override public KubernetesClusterImpl withServicePrincipalSecret(String secret) { this.innerModel().servicePrincipalProfile().withSecret(secret); return this; } @Override public KubernetesClusterImpl withDnsPrefix(String dnsPrefix) { this.innerModel().withDnsPrefix(dnsPrefix); return this; } @Override public KubernetesClusterAgentPoolImpl defineAgentPool(String name) { ManagedClusterAgentPoolProfile innerPoolProfile = new ManagedClusterAgentPoolProfile(); innerPoolProfile.withName(name); return new KubernetesClusterAgentPoolImpl(innerPoolProfile, this); } @Override public KubernetesClusterAgentPoolImpl updateAgentPool(String name) { for (ManagedClusterAgentPoolProfile agentPoolProfile : innerModel().agentPoolProfiles()) { if (agentPoolProfile.name().equals(name)) { return new KubernetesClusterAgentPoolImpl(agentPoolProfile, this); } } throw logger.logExceptionAsError(new IllegalArgumentException(String.format( "Cannot get agent pool named %s", name))); } @Override public Update withoutAgentPool(String name) { if (innerModel().agentPoolProfiles() != null) { innerModel().withAgentPoolProfiles( innerModel().agentPoolProfiles().stream() .filter(p -> !name.equals(p.name())) .collect(Collectors.toList())); this.addDependency(context -> manager().serviceClient().getAgentPools().deleteAsync(resourceGroupName(), name(), name) .then(context.voidMono())); } return this; } @Override public KubernetesCluster.DefinitionStages.NetworkProfileDefinitionStages.Blank< KubernetesCluster.DefinitionStages.WithCreate> defineNetworkProfile() { return new KubernetesClusterNetworkProfileImpl(this); } @Override public KubernetesClusterImpl withAddOnProfiles(Map<String, ManagedClusterAddonProfile> addOnProfileMap) { this.innerModel().withAddonProfiles(addOnProfileMap); return this; } @Override public KubernetesClusterImpl withNetworkProfile(ContainerServiceNetworkProfile networkProfile) { this.innerModel().withNetworkProfile(networkProfile); return this; } @Override public KubernetesClusterImpl withRBACEnabled() { this.innerModel().withEnableRbac(true); return this; } @Override public KubernetesClusterImpl withRBACDisabled() { this.innerModel().withEnableRbac(false); return this; } public KubernetesClusterImpl addNewAgentPool(KubernetesClusterAgentPoolImpl agentPool) { if (!isInCreateMode()) { this.addDependency(context -> manager().serviceClient().getAgentPools().createOrUpdateAsync( resourceGroupName(), name(), agentPool.name(), agentPool.getAgentPoolInner()) .then(context.voidMono())); } innerModel().agentPoolProfiles().add(agentPool.innerModel()); return this; } @Override public KubernetesClusterImpl withAutoScalerProfile(ManagedClusterPropertiesAutoScalerProfile autoScalerProfile) { this.innerModel().withAutoScalerProfile(autoScalerProfile); return this; } @Override public KubernetesClusterImpl enablePrivateCluster() { if (innerModel().apiServerAccessProfile() == null) { innerModel().withApiServerAccessProfile(new ManagedClusterApiServerAccessProfile()); } innerModel().apiServerAccessProfile().withEnablePrivateCluster(true); return this; } @Override public PagedIterable<PrivateLinkResource> listPrivateLinkResources() { return new PagedIterable<>(listPrivateLinkResourcesAsync()); } @Override public PagedFlux<PrivateLinkResource> listPrivateLinkResourcesAsync() { Mono<Response<List<PrivateLinkResource>>> retList = this.manager().serviceClient().getPrivateLinkResources() .listWithResponseAsync(this.resourceGroupName(), this.name()) .map(response -> new SimpleResponse<>(response, response.getValue().value().stream() .map(PrivateLinkResourceImpl::new) .collect(Collectors.toList()))); return PagedConverter.convertListToPagedFlux(retList); } @Override public PagedIterable<PrivateEndpointConnection> listPrivateEndpointConnections() { return new PagedIterable<>(listPrivateEndpointConnectionsAsync()); } @Override public PagedFlux<PrivateEndpointConnection> listPrivateEndpointConnectionsAsync() { Mono<Response<List<PrivateEndpointConnection>>> retList = this.manager().serviceClient() .getPrivateEndpointConnections() .listWithResponseAsync(this.resourceGroupName(), this.name()) .map(response -> new SimpleResponse<>(response, response.getValue().value().stream() .map(PrivateEndpointConnectionImpl::new) .collect(Collectors.toList()))); return PagedConverter.convertListToPagedFlux(retList); } private static final class PrivateLinkResourceImpl implements PrivateLinkResource { private final PrivateLinkResourceInner innerModel; private PrivateLinkResourceImpl(PrivateLinkResourceInner innerModel) { this.innerModel = innerModel; } @Override public String groupId() { return innerModel.groupId(); } @Override public List<String> requiredMemberNames() { return Collections.unmodifiableList(innerModel.requiredMembers()); } @Override public List<String> requiredDnsZoneNames() { return Collections.emptyList(); } } private static final class PrivateEndpointConnectionImpl implements PrivateEndpointConnection { private final PrivateEndpointConnectionInner innerModel; private final PrivateEndpoint privateEndpoint; private final com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState privateLinkServiceConnectionState; private final PrivateEndpointConnectionProvisioningState provisioningState; private PrivateEndpointConnectionImpl(PrivateEndpointConnectionInner innerModel) { this.innerModel = innerModel; this.privateEndpoint = innerModel.privateEndpoint() == null ? null : new PrivateEndpoint(innerModel.privateEndpoint().id()); this.privateLinkServiceConnectionState = innerModel.privateLinkServiceConnectionState() == null ? null : new com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState( innerModel.privateLinkServiceConnectionState().status() == null ? null : com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateEndpointServiceConnectionStatus .fromString(innerModel.privateLinkServiceConnectionState().status().toString()), innerModel.privateLinkServiceConnectionState().description(), ""); this.provisioningState = innerModel.provisioningState() == null ? null : PrivateEndpointConnectionProvisioningState.fromString(innerModel.provisioningState().toString()); } @Override public String id() { return innerModel.id(); } @Override public String name() { return innerModel.name(); } @Override public String type() { return innerModel.type(); } @Override public PrivateEndpoint privateEndpoint() { return privateEndpoint; } @Override public com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState privateLinkServiceConnectionState() { return privateLinkServiceConnectionState; } @Override public PrivateEndpointConnectionProvisioningState provisioningState() { return provisioningState; } } }
It works against normal clusters and they seems to ignore this format... For AAD ones, it can only be done locally for now and the cluster will recognize the `format`.
public byte[] userKubeConfigContent(Format format) { if (format == null) { return userKubeConfigContent(); } for (CredentialResult config : userKubeConfigs(format)) { return config.value(); } return new byte[0]; }
}
public byte[] userKubeConfigContent(Format format) { if (format == null) { return userKubeConfigContent(); } for (CredentialResult config : userKubeConfigs(format)) { return config.value(); } return new byte[0]; }
class KubernetesClusterImpl extends GroupableResourceImpl< KubernetesCluster, ManagedClusterInner, KubernetesClusterImpl, ContainerServiceManager> implements KubernetesCluster, KubernetesCluster.Definition, KubernetesCluster.Update { private final ClientLogger logger = new ClientLogger(getClass()); private List<CredentialResult> adminKubeConfigs; private List<CredentialResult> userKubeConfigs; private final Map<Format, List<CredentialResult>> formatUserKubeConfigsMap = new ConcurrentHashMap<>(); protected KubernetesClusterImpl(String name, ManagedClusterInner innerObject, ContainerServiceManager manager) { super(name, innerObject, manager); if (this.innerModel().agentPoolProfiles() == null) { this.innerModel().withAgentPoolProfiles(new ArrayList<>()); } this.adminKubeConfigs = null; this.userKubeConfigs = null; } @Override public String provisioningState() { return this.innerModel().provisioningState(); } @Override public String dnsPrefix() { return this.innerModel().dnsPrefix(); } @Override public String fqdn() { return this.innerModel().fqdn(); } @Override public String version() { return this.innerModel().kubernetesVersion(); } @Override public List<CredentialResult> adminKubeConfigs() { if (this.adminKubeConfigs == null || this.adminKubeConfigs.size() == 0) { this.adminKubeConfigs = this.manager().kubernetesClusters().listAdminKubeConfigContent(this.resourceGroupName(), this.name()); } return Collections.unmodifiableList(this.adminKubeConfigs); } @Override public List<CredentialResult> userKubeConfigs() { if (this.userKubeConfigs == null || this.userKubeConfigs.size() == 0) { this.userKubeConfigs = this.manager().kubernetesClusters().listUserKubeConfigContent(this.resourceGroupName(), this.name()); } return Collections.unmodifiableList(this.userKubeConfigs); } @Override public List<CredentialResult> userKubeConfigs(Format format) { if (format == null) { return userKubeConfigs(); } return Collections.unmodifiableList( this.formatUserKubeConfigsMap.computeIfAbsent( format, key -> KubernetesClusterImpl.this .manager() .kubernetesClusters() .listUserKubeConfigContent( KubernetesClusterImpl.this.resourceGroupName(), KubernetesClusterImpl.this.name(), format )) ); } @Override public byte[] adminKubeConfigContent() { for (CredentialResult config : adminKubeConfigs()) { return config.value(); } return new byte[0]; } @Override public byte[] userKubeConfigContent() { for (CredentialResult config : userKubeConfigs()) { return config.value(); } return new byte[0]; } @Override @Override public String servicePrincipalClientId() { if (this.innerModel().servicePrincipalProfile() != null) { return this.innerModel().servicePrincipalProfile().clientId(); } else { return null; } } @Override public String servicePrincipalSecret() { if (this.innerModel().servicePrincipalProfile() != null) { return this.innerModel().servicePrincipalProfile().secret(); } else { return null; } } @Override public String linuxRootUsername() { if (this.innerModel().linuxProfile() != null) { return this.innerModel().linuxProfile().adminUsername(); } else { return null; } } @Override public String sshKey() { if (this.innerModel().linuxProfile() == null || this.innerModel().linuxProfile().ssh() == null || this.innerModel().linuxProfile().ssh().publicKeys() == null || this.innerModel().linuxProfile().ssh().publicKeys().size() == 0) { return null; } else { return this.innerModel().linuxProfile().ssh().publicKeys().get(0).keyData(); } } @Override public Map<String, KubernetesClusterAgentPool> agentPools() { Map<String, KubernetesClusterAgentPool> agentPoolMap = new HashMap<>(); if (this.innerModel().agentPoolProfiles() != null && this.innerModel().agentPoolProfiles().size() > 0) { for (ManagedClusterAgentPoolProfile agentPoolProfile : this.innerModel().agentPoolProfiles()) { agentPoolMap.put(agentPoolProfile.name(), new KubernetesClusterAgentPoolImpl(agentPoolProfile, this)); } } return Collections.unmodifiableMap(agentPoolMap); } @Override public ContainerServiceNetworkProfile networkProfile() { return this.innerModel().networkProfile(); } @Override public Map<String, ManagedClusterAddonProfile> addonProfiles() { return Collections.unmodifiableMap(this.innerModel().addonProfiles()); } @Override public String nodeResourceGroup() { return this.innerModel().nodeResourceGroup(); } @Override public boolean enableRBAC() { return this.innerModel().enableRbac(); } @Override public PowerState powerState() { return this.innerModel().powerState(); } @Override public String systemAssignedManagedServiceIdentityPrincipalId() { String objectId = null; if (this.innerModel().identityProfile() != null) { UserAssignedIdentity identity = this.innerModel().identityProfile().get("kubeletidentity"); if (identity != null) { objectId = identity.objectId(); } } return objectId; } @Override public void start() { this.startAsync().block(); } @Override public Mono<Void> startAsync() { return manager().kubernetesClusters().startAsync(this.resourceGroupName(), this.name()); } @Override public void stop() { this.stopAsync().block(); } @Override public Mono<Void> stopAsync() { return manager().kubernetesClusters().stopAsync(this.resourceGroupName(), this.name()); } private Mono<List<CredentialResult>> listAdminConfig(final KubernetesClusterImpl self) { return this .manager() .kubernetesClusters() .listAdminKubeConfigContentAsync(self.resourceGroupName(), self.name()) .map( kubeConfigs -> { self.adminKubeConfigs = kubeConfigs; return self.adminKubeConfigs; }); } private Mono<List<CredentialResult>> listUserConfig(final KubernetesClusterImpl self) { return this .manager() .kubernetesClusters() .listUserKubeConfigContentAsync(self.resourceGroupName(), self.name()) .map( kubeConfigs -> { self.userKubeConfigs = kubeConfigs; return self.userKubeConfigs; }); } @Override protected Mono<ManagedClusterInner> getInnerAsync() { final KubernetesClusterImpl self = this; final Mono<List<CredentialResult>> adminConfig = listAdminConfig(self); final Mono<List<CredentialResult>> userConfig = listUserConfig(self); return this .manager() .serviceClient() .getManagedClusters() .getByResourceGroupAsync(this.resourceGroupName(), this.name()) .flatMap( managedClusterInner -> Flux.merge(adminConfig, userConfig).last().map(bytes -> managedClusterInner)); } @Override public Mono<KubernetesCluster> createResourceAsync() { final KubernetesClusterImpl self = this; if (!this.isInCreateMode()) { this.innerModel().withServicePrincipalProfile(null); } final Mono<List<CredentialResult>> adminConfig = listAdminConfig(self); final Mono<List<CredentialResult>> userConfig = listUserConfig(self); return this .manager() .serviceClient() .getManagedClusters() .createOrUpdateAsync(self.resourceGroupName(), self.name(), self.innerModel()) .flatMap( inner -> Flux .merge(adminConfig, userConfig) .last() .map( bytes -> { self.setInner(inner); return self; })); } @Override public KubernetesClusterImpl withVersion(String kubernetesVersion) { this.innerModel().withKubernetesVersion(kubernetesVersion); return this; } @Override public KubernetesClusterImpl withDefaultVersion() { this.innerModel().withKubernetesVersion(""); return this; } @Override public KubernetesClusterImpl withRootUsername(String rootUserName) { if (this.innerModel().linuxProfile() == null) { this.innerModel().withLinuxProfile(new ContainerServiceLinuxProfile()); } this.innerModel().linuxProfile().withAdminUsername(rootUserName); return this; } @Override public KubernetesClusterImpl withSshKey(String sshKeyData) { this .innerModel() .linuxProfile() .withSsh( new ContainerServiceSshConfiguration().withPublicKeys(new ArrayList<ContainerServiceSshPublicKey>())); this.innerModel().linuxProfile().ssh().publicKeys().add( new ContainerServiceSshPublicKey().withKeyData(sshKeyData)); return this; } @Override public KubernetesClusterImpl withServicePrincipalClientId(String clientId) { this.innerModel().withServicePrincipalProfile( new ManagedClusterServicePrincipalProfile().withClientId(clientId)); return this; } @Override public KubernetesClusterImpl withSystemAssignedManagedServiceIdentity() { this.innerModel().withIdentity(new ManagedClusterIdentity().withType(ResourceIdentityType.SYSTEM_ASSIGNED)); return this; } @Override public KubernetesClusterImpl withServicePrincipalSecret(String secret) { this.innerModel().servicePrincipalProfile().withSecret(secret); return this; } @Override public KubernetesClusterImpl withDnsPrefix(String dnsPrefix) { this.innerModel().withDnsPrefix(dnsPrefix); return this; } @Override public KubernetesClusterAgentPoolImpl defineAgentPool(String name) { ManagedClusterAgentPoolProfile innerPoolProfile = new ManagedClusterAgentPoolProfile(); innerPoolProfile.withName(name); return new KubernetesClusterAgentPoolImpl(innerPoolProfile, this); } @Override public KubernetesClusterAgentPoolImpl updateAgentPool(String name) { for (ManagedClusterAgentPoolProfile agentPoolProfile : innerModel().agentPoolProfiles()) { if (agentPoolProfile.name().equals(name)) { return new KubernetesClusterAgentPoolImpl(agentPoolProfile, this); } } throw logger.logExceptionAsError(new IllegalArgumentException(String.format( "Cannot get agent pool named %s", name))); } @Override public Update withoutAgentPool(String name) { if (innerModel().agentPoolProfiles() != null) { innerModel().withAgentPoolProfiles( innerModel().agentPoolProfiles().stream() .filter(p -> !name.equals(p.name())) .collect(Collectors.toList())); this.addDependency(context -> manager().serviceClient().getAgentPools().deleteAsync(resourceGroupName(), name(), name) .then(context.voidMono())); } return this; } @Override public KubernetesCluster.DefinitionStages.NetworkProfileDefinitionStages.Blank< KubernetesCluster.DefinitionStages.WithCreate> defineNetworkProfile() { return new KubernetesClusterNetworkProfileImpl(this); } @Override public KubernetesClusterImpl withAddOnProfiles(Map<String, ManagedClusterAddonProfile> addOnProfileMap) { this.innerModel().withAddonProfiles(addOnProfileMap); return this; } @Override public KubernetesClusterImpl withNetworkProfile(ContainerServiceNetworkProfile networkProfile) { this.innerModel().withNetworkProfile(networkProfile); return this; } @Override public KubernetesClusterImpl withRBACEnabled() { this.innerModel().withEnableRbac(true); return this; } @Override public KubernetesClusterImpl withRBACDisabled() { this.innerModel().withEnableRbac(false); return this; } public KubernetesClusterImpl addNewAgentPool(KubernetesClusterAgentPoolImpl agentPool) { if (!isInCreateMode()) { this.addDependency(context -> manager().serviceClient().getAgentPools().createOrUpdateAsync( resourceGroupName(), name(), agentPool.name(), agentPool.getAgentPoolInner()) .then(context.voidMono())); } innerModel().agentPoolProfiles().add(agentPool.innerModel()); return this; } @Override public KubernetesClusterImpl withAutoScalerProfile(ManagedClusterPropertiesAutoScalerProfile autoScalerProfile) { this.innerModel().withAutoScalerProfile(autoScalerProfile); return this; } @Override public KubernetesClusterImpl enablePrivateCluster() { if (innerModel().apiServerAccessProfile() == null) { innerModel().withApiServerAccessProfile(new ManagedClusterApiServerAccessProfile()); } innerModel().apiServerAccessProfile().withEnablePrivateCluster(true); return this; } @Override public PagedIterable<PrivateLinkResource> listPrivateLinkResources() { return new PagedIterable<>(listPrivateLinkResourcesAsync()); } @Override public PagedFlux<PrivateLinkResource> listPrivateLinkResourcesAsync() { Mono<Response<List<PrivateLinkResource>>> retList = this.manager().serviceClient().getPrivateLinkResources() .listWithResponseAsync(this.resourceGroupName(), this.name()) .map(response -> new SimpleResponse<>(response, response.getValue().value().stream() .map(PrivateLinkResourceImpl::new) .collect(Collectors.toList()))); return PagedConverter.convertListToPagedFlux(retList); } @Override public PagedIterable<PrivateEndpointConnection> listPrivateEndpointConnections() { return new PagedIterable<>(listPrivateEndpointConnectionsAsync()); } @Override public PagedFlux<PrivateEndpointConnection> listPrivateEndpointConnectionsAsync() { Mono<Response<List<PrivateEndpointConnection>>> retList = this.manager().serviceClient() .getPrivateEndpointConnections() .listWithResponseAsync(this.resourceGroupName(), this.name()) .map(response -> new SimpleResponse<>(response, response.getValue().value().stream() .map(PrivateEndpointConnectionImpl::new) .collect(Collectors.toList()))); return PagedConverter.convertListToPagedFlux(retList); } private static final class PrivateLinkResourceImpl implements PrivateLinkResource { private final PrivateLinkResourceInner innerModel; private PrivateLinkResourceImpl(PrivateLinkResourceInner innerModel) { this.innerModel = innerModel; } @Override public String groupId() { return innerModel.groupId(); } @Override public List<String> requiredMemberNames() { return Collections.unmodifiableList(innerModel.requiredMembers()); } @Override public List<String> requiredDnsZoneNames() { return Collections.emptyList(); } } private static final class PrivateEndpointConnectionImpl implements PrivateEndpointConnection { private final PrivateEndpointConnectionInner innerModel; private final PrivateEndpoint privateEndpoint; private final com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState privateLinkServiceConnectionState; private final PrivateEndpointConnectionProvisioningState provisioningState; private PrivateEndpointConnectionImpl(PrivateEndpointConnectionInner innerModel) { this.innerModel = innerModel; this.privateEndpoint = innerModel.privateEndpoint() == null ? null : new PrivateEndpoint(innerModel.privateEndpoint().id()); this.privateLinkServiceConnectionState = innerModel.privateLinkServiceConnectionState() == null ? null : new com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState( innerModel.privateLinkServiceConnectionState().status() == null ? null : com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateEndpointServiceConnectionStatus .fromString(innerModel.privateLinkServiceConnectionState().status().toString()), innerModel.privateLinkServiceConnectionState().description(), ""); this.provisioningState = innerModel.provisioningState() == null ? null : PrivateEndpointConnectionProvisioningState.fromString(innerModel.provisioningState().toString()); } @Override public String id() { return innerModel.id(); } @Override public String name() { return innerModel.name(); } @Override public String type() { return innerModel.type(); } @Override public PrivateEndpoint privateEndpoint() { return privateEndpoint; } @Override public com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState privateLinkServiceConnectionState() { return privateLinkServiceConnectionState; } @Override public PrivateEndpointConnectionProvisioningState provisioningState() { return provisioningState; } } }
class KubernetesClusterImpl extends GroupableResourceImpl< KubernetesCluster, ManagedClusterInner, KubernetesClusterImpl, ContainerServiceManager> implements KubernetesCluster, KubernetesCluster.Definition, KubernetesCluster.Update { private final ClientLogger logger = new ClientLogger(getClass()); private List<CredentialResult> adminKubeConfigs; private List<CredentialResult> userKubeConfigs; private final Map<Format, List<CredentialResult>> formatUserKubeConfigsMap = new ConcurrentHashMap<>(); protected KubernetesClusterImpl(String name, ManagedClusterInner innerObject, ContainerServiceManager manager) { super(name, innerObject, manager); if (this.innerModel().agentPoolProfiles() == null) { this.innerModel().withAgentPoolProfiles(new ArrayList<>()); } this.adminKubeConfigs = null; this.userKubeConfigs = null; } @Override public String provisioningState() { return this.innerModel().provisioningState(); } @Override public String dnsPrefix() { return this.innerModel().dnsPrefix(); } @Override public String fqdn() { return this.innerModel().fqdn(); } @Override public String version() { return this.innerModel().kubernetesVersion(); } @Override public List<CredentialResult> adminKubeConfigs() { if (this.adminKubeConfigs == null || this.adminKubeConfigs.size() == 0) { this.adminKubeConfigs = this.manager().kubernetesClusters().listAdminKubeConfigContent(this.resourceGroupName(), this.name()); } return Collections.unmodifiableList(this.adminKubeConfigs); } @Override public List<CredentialResult> userKubeConfigs() { if (this.userKubeConfigs == null || this.userKubeConfigs.size() == 0) { this.userKubeConfigs = this.manager().kubernetesClusters().listUserKubeConfigContent(this.resourceGroupName(), this.name()); } return Collections.unmodifiableList(this.userKubeConfigs); } @Override public List<CredentialResult> userKubeConfigs(Format format) { if (format == null) { return userKubeConfigs(); } return Collections.unmodifiableList( this.formatUserKubeConfigsMap.computeIfAbsent( format, key -> KubernetesClusterImpl.this .manager() .kubernetesClusters() .listUserKubeConfigContent( KubernetesClusterImpl.this.resourceGroupName(), KubernetesClusterImpl.this.name(), format )) ); } @Override public byte[] adminKubeConfigContent() { for (CredentialResult config : adminKubeConfigs()) { return config.value(); } return new byte[0]; } @Override public byte[] userKubeConfigContent() { for (CredentialResult config : userKubeConfigs()) { return config.value(); } return new byte[0]; } @Override @Override public String servicePrincipalClientId() { if (this.innerModel().servicePrincipalProfile() != null) { return this.innerModel().servicePrincipalProfile().clientId(); } else { return null; } } @Override public String servicePrincipalSecret() { if (this.innerModel().servicePrincipalProfile() != null) { return this.innerModel().servicePrincipalProfile().secret(); } else { return null; } } @Override public String linuxRootUsername() { if (this.innerModel().linuxProfile() != null) { return this.innerModel().linuxProfile().adminUsername(); } else { return null; } } @Override public String sshKey() { if (this.innerModel().linuxProfile() == null || this.innerModel().linuxProfile().ssh() == null || this.innerModel().linuxProfile().ssh().publicKeys() == null || this.innerModel().linuxProfile().ssh().publicKeys().size() == 0) { return null; } else { return this.innerModel().linuxProfile().ssh().publicKeys().get(0).keyData(); } } @Override public Map<String, KubernetesClusterAgentPool> agentPools() { Map<String, KubernetesClusterAgentPool> agentPoolMap = new HashMap<>(); if (this.innerModel().agentPoolProfiles() != null && this.innerModel().agentPoolProfiles().size() > 0) { for (ManagedClusterAgentPoolProfile agentPoolProfile : this.innerModel().agentPoolProfiles()) { agentPoolMap.put(agentPoolProfile.name(), new KubernetesClusterAgentPoolImpl(agentPoolProfile, this)); } } return Collections.unmodifiableMap(agentPoolMap); } @Override public ContainerServiceNetworkProfile networkProfile() { return this.innerModel().networkProfile(); } @Override public Map<String, ManagedClusterAddonProfile> addonProfiles() { return Collections.unmodifiableMap(this.innerModel().addonProfiles()); } @Override public String nodeResourceGroup() { return this.innerModel().nodeResourceGroup(); } @Override public boolean enableRBAC() { return this.innerModel().enableRbac(); } @Override public PowerState powerState() { return this.innerModel().powerState(); } @Override public String systemAssignedManagedServiceIdentityPrincipalId() { String objectId = null; if (this.innerModel().identityProfile() != null) { UserAssignedIdentity identity = this.innerModel().identityProfile().get("kubeletidentity"); if (identity != null) { objectId = identity.objectId(); } } return objectId; } @Override public void start() { this.startAsync().block(); } @Override public Mono<Void> startAsync() { return manager().kubernetesClusters().startAsync(this.resourceGroupName(), this.name()); } @Override public void stop() { this.stopAsync().block(); } @Override public Mono<Void> stopAsync() { return manager().kubernetesClusters().stopAsync(this.resourceGroupName(), this.name()); } private Mono<List<CredentialResult>> listAdminConfig(final KubernetesClusterImpl self) { return this .manager() .kubernetesClusters() .listAdminKubeConfigContentAsync(self.resourceGroupName(), self.name()) .map( kubeConfigs -> { self.adminKubeConfigs = kubeConfigs; return self.adminKubeConfigs; }); } private Mono<List<CredentialResult>> listUserConfig(final KubernetesClusterImpl self) { return this .manager() .kubernetesClusters() .listUserKubeConfigContentAsync(self.resourceGroupName(), self.name()) .map( kubeConfigs -> { self.userKubeConfigs = kubeConfigs; return self.userKubeConfigs; }); } @Override protected Mono<ManagedClusterInner> getInnerAsync() { final KubernetesClusterImpl self = this; final Mono<List<CredentialResult>> adminConfig = listAdminConfig(self); final Mono<List<CredentialResult>> userConfig = listUserConfig(self); return this .manager() .serviceClient() .getManagedClusters() .getByResourceGroupAsync(this.resourceGroupName(), this.name()) .flatMap( managedClusterInner -> Flux .merge(adminConfig, userConfig) .last() .map( bytes -> { formatUserKubeConfigsMap.clear(); return managedClusterInner; })); } @Override public Mono<KubernetesCluster> createResourceAsync() { final KubernetesClusterImpl self = this; if (!this.isInCreateMode()) { this.innerModel().withServicePrincipalProfile(null); } final Mono<List<CredentialResult>> adminConfig = listAdminConfig(self); final Mono<List<CredentialResult>> userConfig = listUserConfig(self); return this .manager() .serviceClient() .getManagedClusters() .createOrUpdateAsync(self.resourceGroupName(), self.name(), self.innerModel()) .flatMap( inner -> Flux .merge(adminConfig, userConfig) .last() .map( bytes -> { self.setInner(inner); return self; })); } @Override public KubernetesClusterImpl withVersion(String kubernetesVersion) { this.innerModel().withKubernetesVersion(kubernetesVersion); return this; } @Override public KubernetesClusterImpl withDefaultVersion() { this.innerModel().withKubernetesVersion(""); return this; } @Override public KubernetesClusterImpl withRootUsername(String rootUserName) { if (this.innerModel().linuxProfile() == null) { this.innerModel().withLinuxProfile(new ContainerServiceLinuxProfile()); } this.innerModel().linuxProfile().withAdminUsername(rootUserName); return this; } @Override public KubernetesClusterImpl withSshKey(String sshKeyData) { this .innerModel() .linuxProfile() .withSsh( new ContainerServiceSshConfiguration().withPublicKeys(new ArrayList<ContainerServiceSshPublicKey>())); this.innerModel().linuxProfile().ssh().publicKeys().add( new ContainerServiceSshPublicKey().withKeyData(sshKeyData)); return this; } @Override public KubernetesClusterImpl withServicePrincipalClientId(String clientId) { this.innerModel().withServicePrincipalProfile( new ManagedClusterServicePrincipalProfile().withClientId(clientId)); return this; } @Override public KubernetesClusterImpl withSystemAssignedManagedServiceIdentity() { this.innerModel().withIdentity(new ManagedClusterIdentity().withType(ResourceIdentityType.SYSTEM_ASSIGNED)); return this; } @Override public KubernetesClusterImpl withServicePrincipalSecret(String secret) { this.innerModel().servicePrincipalProfile().withSecret(secret); return this; } @Override public KubernetesClusterImpl withDnsPrefix(String dnsPrefix) { this.innerModel().withDnsPrefix(dnsPrefix); return this; } @Override public KubernetesClusterAgentPoolImpl defineAgentPool(String name) { ManagedClusterAgentPoolProfile innerPoolProfile = new ManagedClusterAgentPoolProfile(); innerPoolProfile.withName(name); return new KubernetesClusterAgentPoolImpl(innerPoolProfile, this); } @Override public KubernetesClusterAgentPoolImpl updateAgentPool(String name) { for (ManagedClusterAgentPoolProfile agentPoolProfile : innerModel().agentPoolProfiles()) { if (agentPoolProfile.name().equals(name)) { return new KubernetesClusterAgentPoolImpl(agentPoolProfile, this); } } throw logger.logExceptionAsError(new IllegalArgumentException(String.format( "Cannot get agent pool named %s", name))); } @Override public Update withoutAgentPool(String name) { if (innerModel().agentPoolProfiles() != null) { innerModel().withAgentPoolProfiles( innerModel().agentPoolProfiles().stream() .filter(p -> !name.equals(p.name())) .collect(Collectors.toList())); this.addDependency(context -> manager().serviceClient().getAgentPools().deleteAsync(resourceGroupName(), name(), name) .then(context.voidMono())); } return this; } @Override public KubernetesCluster.DefinitionStages.NetworkProfileDefinitionStages.Blank< KubernetesCluster.DefinitionStages.WithCreate> defineNetworkProfile() { return new KubernetesClusterNetworkProfileImpl(this); } @Override public KubernetesClusterImpl withAddOnProfiles(Map<String, ManagedClusterAddonProfile> addOnProfileMap) { this.innerModel().withAddonProfiles(addOnProfileMap); return this; } @Override public KubernetesClusterImpl withNetworkProfile(ContainerServiceNetworkProfile networkProfile) { this.innerModel().withNetworkProfile(networkProfile); return this; } @Override public KubernetesClusterImpl withRBACEnabled() { this.innerModel().withEnableRbac(true); return this; } @Override public KubernetesClusterImpl withRBACDisabled() { this.innerModel().withEnableRbac(false); return this; } public KubernetesClusterImpl addNewAgentPool(KubernetesClusterAgentPoolImpl agentPool) { if (!isInCreateMode()) { this.addDependency(context -> manager().serviceClient().getAgentPools().createOrUpdateAsync( resourceGroupName(), name(), agentPool.name(), agentPool.getAgentPoolInner()) .then(context.voidMono())); } innerModel().agentPoolProfiles().add(agentPool.innerModel()); return this; } @Override public KubernetesClusterImpl withAutoScalerProfile(ManagedClusterPropertiesAutoScalerProfile autoScalerProfile) { this.innerModel().withAutoScalerProfile(autoScalerProfile); return this; } @Override public KubernetesClusterImpl enablePrivateCluster() { if (innerModel().apiServerAccessProfile() == null) { innerModel().withApiServerAccessProfile(new ManagedClusterApiServerAccessProfile()); } innerModel().apiServerAccessProfile().withEnablePrivateCluster(true); return this; } @Override public PagedIterable<PrivateLinkResource> listPrivateLinkResources() { return new PagedIterable<>(listPrivateLinkResourcesAsync()); } @Override public PagedFlux<PrivateLinkResource> listPrivateLinkResourcesAsync() { Mono<Response<List<PrivateLinkResource>>> retList = this.manager().serviceClient().getPrivateLinkResources() .listWithResponseAsync(this.resourceGroupName(), this.name()) .map(response -> new SimpleResponse<>(response, response.getValue().value().stream() .map(PrivateLinkResourceImpl::new) .collect(Collectors.toList()))); return PagedConverter.convertListToPagedFlux(retList); } @Override public PagedIterable<PrivateEndpointConnection> listPrivateEndpointConnections() { return new PagedIterable<>(listPrivateEndpointConnectionsAsync()); } @Override public PagedFlux<PrivateEndpointConnection> listPrivateEndpointConnectionsAsync() { Mono<Response<List<PrivateEndpointConnection>>> retList = this.manager().serviceClient() .getPrivateEndpointConnections() .listWithResponseAsync(this.resourceGroupName(), this.name()) .map(response -> new SimpleResponse<>(response, response.getValue().value().stream() .map(PrivateEndpointConnectionImpl::new) .collect(Collectors.toList()))); return PagedConverter.convertListToPagedFlux(retList); } private static final class PrivateLinkResourceImpl implements PrivateLinkResource { private final PrivateLinkResourceInner innerModel; private PrivateLinkResourceImpl(PrivateLinkResourceInner innerModel) { this.innerModel = innerModel; } @Override public String groupId() { return innerModel.groupId(); } @Override public List<String> requiredMemberNames() { return Collections.unmodifiableList(innerModel.requiredMembers()); } @Override public List<String> requiredDnsZoneNames() { return Collections.emptyList(); } } private static final class PrivateEndpointConnectionImpl implements PrivateEndpointConnection { private final PrivateEndpointConnectionInner innerModel; private final PrivateEndpoint privateEndpoint; private final com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState privateLinkServiceConnectionState; private final PrivateEndpointConnectionProvisioningState provisioningState; private PrivateEndpointConnectionImpl(PrivateEndpointConnectionInner innerModel) { this.innerModel = innerModel; this.privateEndpoint = innerModel.privateEndpoint() == null ? null : new PrivateEndpoint(innerModel.privateEndpoint().id()); this.privateLinkServiceConnectionState = innerModel.privateLinkServiceConnectionState() == null ? null : new com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState( innerModel.privateLinkServiceConnectionState().status() == null ? null : com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateEndpointServiceConnectionStatus .fromString(innerModel.privateLinkServiceConnectionState().status().toString()), innerModel.privateLinkServiceConnectionState().description(), ""); this.provisioningState = innerModel.provisioningState() == null ? null : PrivateEndpointConnectionProvisioningState.fromString(innerModel.provisioningState().toString()); } @Override public String id() { return innerModel.id(); } @Override public String name() { return innerModel.name(); } @Override public String type() { return innerModel.type(); } @Override public PrivateEndpoint privateEndpoint() { return privateEndpoint; } @Override public com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState privateLinkServiceConnectionState() { return privateLinkServiceConnectionState; } @Override public PrivateEndpointConnectionProvisioningState provisioningState() { return provisioningState; } } }
For the same AAD cluster, for `kubeconfig` with format `AZURE`: ```yaml users: - name: clusterUser_rg-xiaofei_testxiaofeikubecluster user: auth-provider: config: apiserver-id: xxx client-id: xxx config-mode: "1" environment: AzurePublicCloud tenant-id: xxx name: azure ``` for format `EXEC`: ```yaml users: - name: clusterUser_rg-xiaofei_testxiaofeikubecluster user: exec: apiVersion: client.authentication.k8s.io/v1beta1 args: - get-token - --environment - AzurePublicCloud - --server-id - xxx - --client-id - xxx - --tenant-id - xxx - --login - devicecode command: kubelogin env: null ```
public byte[] userKubeConfigContent(Format format) { if (format == null) { return userKubeConfigContent(); } for (CredentialResult config : userKubeConfigs(format)) { return config.value(); } return new byte[0]; }
}
public byte[] userKubeConfigContent(Format format) { if (format == null) { return userKubeConfigContent(); } for (CredentialResult config : userKubeConfigs(format)) { return config.value(); } return new byte[0]; }
class KubernetesClusterImpl extends GroupableResourceImpl< KubernetesCluster, ManagedClusterInner, KubernetesClusterImpl, ContainerServiceManager> implements KubernetesCluster, KubernetesCluster.Definition, KubernetesCluster.Update { private final ClientLogger logger = new ClientLogger(getClass()); private List<CredentialResult> adminKubeConfigs; private List<CredentialResult> userKubeConfigs; private final Map<Format, List<CredentialResult>> formatUserKubeConfigsMap = new ConcurrentHashMap<>(); protected KubernetesClusterImpl(String name, ManagedClusterInner innerObject, ContainerServiceManager manager) { super(name, innerObject, manager); if (this.innerModel().agentPoolProfiles() == null) { this.innerModel().withAgentPoolProfiles(new ArrayList<>()); } this.adminKubeConfigs = null; this.userKubeConfigs = null; } @Override public String provisioningState() { return this.innerModel().provisioningState(); } @Override public String dnsPrefix() { return this.innerModel().dnsPrefix(); } @Override public String fqdn() { return this.innerModel().fqdn(); } @Override public String version() { return this.innerModel().kubernetesVersion(); } @Override public List<CredentialResult> adminKubeConfigs() { if (this.adminKubeConfigs == null || this.adminKubeConfigs.size() == 0) { this.adminKubeConfigs = this.manager().kubernetesClusters().listAdminKubeConfigContent(this.resourceGroupName(), this.name()); } return Collections.unmodifiableList(this.adminKubeConfigs); } @Override public List<CredentialResult> userKubeConfigs() { if (this.userKubeConfigs == null || this.userKubeConfigs.size() == 0) { this.userKubeConfigs = this.manager().kubernetesClusters().listUserKubeConfigContent(this.resourceGroupName(), this.name()); } return Collections.unmodifiableList(this.userKubeConfigs); } @Override public List<CredentialResult> userKubeConfigs(Format format) { if (format == null) { return userKubeConfigs(); } return Collections.unmodifiableList( this.formatUserKubeConfigsMap.computeIfAbsent( format, key -> KubernetesClusterImpl.this .manager() .kubernetesClusters() .listUserKubeConfigContent( KubernetesClusterImpl.this.resourceGroupName(), KubernetesClusterImpl.this.name(), format )) ); } @Override public byte[] adminKubeConfigContent() { for (CredentialResult config : adminKubeConfigs()) { return config.value(); } return new byte[0]; } @Override public byte[] userKubeConfigContent() { for (CredentialResult config : userKubeConfigs()) { return config.value(); } return new byte[0]; } @Override @Override public String servicePrincipalClientId() { if (this.innerModel().servicePrincipalProfile() != null) { return this.innerModel().servicePrincipalProfile().clientId(); } else { return null; } } @Override public String servicePrincipalSecret() { if (this.innerModel().servicePrincipalProfile() != null) { return this.innerModel().servicePrincipalProfile().secret(); } else { return null; } } @Override public String linuxRootUsername() { if (this.innerModel().linuxProfile() != null) { return this.innerModel().linuxProfile().adminUsername(); } else { return null; } } @Override public String sshKey() { if (this.innerModel().linuxProfile() == null || this.innerModel().linuxProfile().ssh() == null || this.innerModel().linuxProfile().ssh().publicKeys() == null || this.innerModel().linuxProfile().ssh().publicKeys().size() == 0) { return null; } else { return this.innerModel().linuxProfile().ssh().publicKeys().get(0).keyData(); } } @Override public Map<String, KubernetesClusterAgentPool> agentPools() { Map<String, KubernetesClusterAgentPool> agentPoolMap = new HashMap<>(); if (this.innerModel().agentPoolProfiles() != null && this.innerModel().agentPoolProfiles().size() > 0) { for (ManagedClusterAgentPoolProfile agentPoolProfile : this.innerModel().agentPoolProfiles()) { agentPoolMap.put(agentPoolProfile.name(), new KubernetesClusterAgentPoolImpl(agentPoolProfile, this)); } } return Collections.unmodifiableMap(agentPoolMap); } @Override public ContainerServiceNetworkProfile networkProfile() { return this.innerModel().networkProfile(); } @Override public Map<String, ManagedClusterAddonProfile> addonProfiles() { return Collections.unmodifiableMap(this.innerModel().addonProfiles()); } @Override public String nodeResourceGroup() { return this.innerModel().nodeResourceGroup(); } @Override public boolean enableRBAC() { return this.innerModel().enableRbac(); } @Override public PowerState powerState() { return this.innerModel().powerState(); } @Override public String systemAssignedManagedServiceIdentityPrincipalId() { String objectId = null; if (this.innerModel().identityProfile() != null) { UserAssignedIdentity identity = this.innerModel().identityProfile().get("kubeletidentity"); if (identity != null) { objectId = identity.objectId(); } } return objectId; } @Override public void start() { this.startAsync().block(); } @Override public Mono<Void> startAsync() { return manager().kubernetesClusters().startAsync(this.resourceGroupName(), this.name()); } @Override public void stop() { this.stopAsync().block(); } @Override public Mono<Void> stopAsync() { return manager().kubernetesClusters().stopAsync(this.resourceGroupName(), this.name()); } private Mono<List<CredentialResult>> listAdminConfig(final KubernetesClusterImpl self) { return this .manager() .kubernetesClusters() .listAdminKubeConfigContentAsync(self.resourceGroupName(), self.name()) .map( kubeConfigs -> { self.adminKubeConfigs = kubeConfigs; return self.adminKubeConfigs; }); } private Mono<List<CredentialResult>> listUserConfig(final KubernetesClusterImpl self) { return this .manager() .kubernetesClusters() .listUserKubeConfigContentAsync(self.resourceGroupName(), self.name()) .map( kubeConfigs -> { self.userKubeConfigs = kubeConfigs; return self.userKubeConfigs; }); } @Override protected Mono<ManagedClusterInner> getInnerAsync() { final KubernetesClusterImpl self = this; final Mono<List<CredentialResult>> adminConfig = listAdminConfig(self); final Mono<List<CredentialResult>> userConfig = listUserConfig(self); return this .manager() .serviceClient() .getManagedClusters() .getByResourceGroupAsync(this.resourceGroupName(), this.name()) .flatMap( managedClusterInner -> Flux.merge(adminConfig, userConfig).last().map(bytes -> managedClusterInner)); } @Override public Mono<KubernetesCluster> createResourceAsync() { final KubernetesClusterImpl self = this; if (!this.isInCreateMode()) { this.innerModel().withServicePrincipalProfile(null); } final Mono<List<CredentialResult>> adminConfig = listAdminConfig(self); final Mono<List<CredentialResult>> userConfig = listUserConfig(self); return this .manager() .serviceClient() .getManagedClusters() .createOrUpdateAsync(self.resourceGroupName(), self.name(), self.innerModel()) .flatMap( inner -> Flux .merge(adminConfig, userConfig) .last() .map( bytes -> { self.setInner(inner); return self; })); } @Override public KubernetesClusterImpl withVersion(String kubernetesVersion) { this.innerModel().withKubernetesVersion(kubernetesVersion); return this; } @Override public KubernetesClusterImpl withDefaultVersion() { this.innerModel().withKubernetesVersion(""); return this; } @Override public KubernetesClusterImpl withRootUsername(String rootUserName) { if (this.innerModel().linuxProfile() == null) { this.innerModel().withLinuxProfile(new ContainerServiceLinuxProfile()); } this.innerModel().linuxProfile().withAdminUsername(rootUserName); return this; } @Override public KubernetesClusterImpl withSshKey(String sshKeyData) { this .innerModel() .linuxProfile() .withSsh( new ContainerServiceSshConfiguration().withPublicKeys(new ArrayList<ContainerServiceSshPublicKey>())); this.innerModel().linuxProfile().ssh().publicKeys().add( new ContainerServiceSshPublicKey().withKeyData(sshKeyData)); return this; } @Override public KubernetesClusterImpl withServicePrincipalClientId(String clientId) { this.innerModel().withServicePrincipalProfile( new ManagedClusterServicePrincipalProfile().withClientId(clientId)); return this; } @Override public KubernetesClusterImpl withSystemAssignedManagedServiceIdentity() { this.innerModel().withIdentity(new ManagedClusterIdentity().withType(ResourceIdentityType.SYSTEM_ASSIGNED)); return this; } @Override public KubernetesClusterImpl withServicePrincipalSecret(String secret) { this.innerModel().servicePrincipalProfile().withSecret(secret); return this; } @Override public KubernetesClusterImpl withDnsPrefix(String dnsPrefix) { this.innerModel().withDnsPrefix(dnsPrefix); return this; } @Override public KubernetesClusterAgentPoolImpl defineAgentPool(String name) { ManagedClusterAgentPoolProfile innerPoolProfile = new ManagedClusterAgentPoolProfile(); innerPoolProfile.withName(name); return new KubernetesClusterAgentPoolImpl(innerPoolProfile, this); } @Override public KubernetesClusterAgentPoolImpl updateAgentPool(String name) { for (ManagedClusterAgentPoolProfile agentPoolProfile : innerModel().agentPoolProfiles()) { if (agentPoolProfile.name().equals(name)) { return new KubernetesClusterAgentPoolImpl(agentPoolProfile, this); } } throw logger.logExceptionAsError(new IllegalArgumentException(String.format( "Cannot get agent pool named %s", name))); } @Override public Update withoutAgentPool(String name) { if (innerModel().agentPoolProfiles() != null) { innerModel().withAgentPoolProfiles( innerModel().agentPoolProfiles().stream() .filter(p -> !name.equals(p.name())) .collect(Collectors.toList())); this.addDependency(context -> manager().serviceClient().getAgentPools().deleteAsync(resourceGroupName(), name(), name) .then(context.voidMono())); } return this; } @Override public KubernetesCluster.DefinitionStages.NetworkProfileDefinitionStages.Blank< KubernetesCluster.DefinitionStages.WithCreate> defineNetworkProfile() { return new KubernetesClusterNetworkProfileImpl(this); } @Override public KubernetesClusterImpl withAddOnProfiles(Map<String, ManagedClusterAddonProfile> addOnProfileMap) { this.innerModel().withAddonProfiles(addOnProfileMap); return this; } @Override public KubernetesClusterImpl withNetworkProfile(ContainerServiceNetworkProfile networkProfile) { this.innerModel().withNetworkProfile(networkProfile); return this; } @Override public KubernetesClusterImpl withRBACEnabled() { this.innerModel().withEnableRbac(true); return this; } @Override public KubernetesClusterImpl withRBACDisabled() { this.innerModel().withEnableRbac(false); return this; } public KubernetesClusterImpl addNewAgentPool(KubernetesClusterAgentPoolImpl agentPool) { if (!isInCreateMode()) { this.addDependency(context -> manager().serviceClient().getAgentPools().createOrUpdateAsync( resourceGroupName(), name(), agentPool.name(), agentPool.getAgentPoolInner()) .then(context.voidMono())); } innerModel().agentPoolProfiles().add(agentPool.innerModel()); return this; } @Override public KubernetesClusterImpl withAutoScalerProfile(ManagedClusterPropertiesAutoScalerProfile autoScalerProfile) { this.innerModel().withAutoScalerProfile(autoScalerProfile); return this; } @Override public KubernetesClusterImpl enablePrivateCluster() { if (innerModel().apiServerAccessProfile() == null) { innerModel().withApiServerAccessProfile(new ManagedClusterApiServerAccessProfile()); } innerModel().apiServerAccessProfile().withEnablePrivateCluster(true); return this; } @Override public PagedIterable<PrivateLinkResource> listPrivateLinkResources() { return new PagedIterable<>(listPrivateLinkResourcesAsync()); } @Override public PagedFlux<PrivateLinkResource> listPrivateLinkResourcesAsync() { Mono<Response<List<PrivateLinkResource>>> retList = this.manager().serviceClient().getPrivateLinkResources() .listWithResponseAsync(this.resourceGroupName(), this.name()) .map(response -> new SimpleResponse<>(response, response.getValue().value().stream() .map(PrivateLinkResourceImpl::new) .collect(Collectors.toList()))); return PagedConverter.convertListToPagedFlux(retList); } @Override public PagedIterable<PrivateEndpointConnection> listPrivateEndpointConnections() { return new PagedIterable<>(listPrivateEndpointConnectionsAsync()); } @Override public PagedFlux<PrivateEndpointConnection> listPrivateEndpointConnectionsAsync() { Mono<Response<List<PrivateEndpointConnection>>> retList = this.manager().serviceClient() .getPrivateEndpointConnections() .listWithResponseAsync(this.resourceGroupName(), this.name()) .map(response -> new SimpleResponse<>(response, response.getValue().value().stream() .map(PrivateEndpointConnectionImpl::new) .collect(Collectors.toList()))); return PagedConverter.convertListToPagedFlux(retList); } private static final class PrivateLinkResourceImpl implements PrivateLinkResource { private final PrivateLinkResourceInner innerModel; private PrivateLinkResourceImpl(PrivateLinkResourceInner innerModel) { this.innerModel = innerModel; } @Override public String groupId() { return innerModel.groupId(); } @Override public List<String> requiredMemberNames() { return Collections.unmodifiableList(innerModel.requiredMembers()); } @Override public List<String> requiredDnsZoneNames() { return Collections.emptyList(); } } private static final class PrivateEndpointConnectionImpl implements PrivateEndpointConnection { private final PrivateEndpointConnectionInner innerModel; private final PrivateEndpoint privateEndpoint; private final com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState privateLinkServiceConnectionState; private final PrivateEndpointConnectionProvisioningState provisioningState; private PrivateEndpointConnectionImpl(PrivateEndpointConnectionInner innerModel) { this.innerModel = innerModel; this.privateEndpoint = innerModel.privateEndpoint() == null ? null : new PrivateEndpoint(innerModel.privateEndpoint().id()); this.privateLinkServiceConnectionState = innerModel.privateLinkServiceConnectionState() == null ? null : new com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState( innerModel.privateLinkServiceConnectionState().status() == null ? null : com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateEndpointServiceConnectionStatus .fromString(innerModel.privateLinkServiceConnectionState().status().toString()), innerModel.privateLinkServiceConnectionState().description(), ""); this.provisioningState = innerModel.provisioningState() == null ? null : PrivateEndpointConnectionProvisioningState.fromString(innerModel.provisioningState().toString()); } @Override public String id() { return innerModel.id(); } @Override public String name() { return innerModel.name(); } @Override public String type() { return innerModel.type(); } @Override public PrivateEndpoint privateEndpoint() { return privateEndpoint; } @Override public com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState privateLinkServiceConnectionState() { return privateLinkServiceConnectionState; } @Override public PrivateEndpointConnectionProvisioningState provisioningState() { return provisioningState; } } }
class KubernetesClusterImpl extends GroupableResourceImpl< KubernetesCluster, ManagedClusterInner, KubernetesClusterImpl, ContainerServiceManager> implements KubernetesCluster, KubernetesCluster.Definition, KubernetesCluster.Update { private final ClientLogger logger = new ClientLogger(getClass()); private List<CredentialResult> adminKubeConfigs; private List<CredentialResult> userKubeConfigs; private final Map<Format, List<CredentialResult>> formatUserKubeConfigsMap = new ConcurrentHashMap<>(); protected KubernetesClusterImpl(String name, ManagedClusterInner innerObject, ContainerServiceManager manager) { super(name, innerObject, manager); if (this.innerModel().agentPoolProfiles() == null) { this.innerModel().withAgentPoolProfiles(new ArrayList<>()); } this.adminKubeConfigs = null; this.userKubeConfigs = null; } @Override public String provisioningState() { return this.innerModel().provisioningState(); } @Override public String dnsPrefix() { return this.innerModel().dnsPrefix(); } @Override public String fqdn() { return this.innerModel().fqdn(); } @Override public String version() { return this.innerModel().kubernetesVersion(); } @Override public List<CredentialResult> adminKubeConfigs() { if (this.adminKubeConfigs == null || this.adminKubeConfigs.size() == 0) { this.adminKubeConfigs = this.manager().kubernetesClusters().listAdminKubeConfigContent(this.resourceGroupName(), this.name()); } return Collections.unmodifiableList(this.adminKubeConfigs); } @Override public List<CredentialResult> userKubeConfigs() { if (this.userKubeConfigs == null || this.userKubeConfigs.size() == 0) { this.userKubeConfigs = this.manager().kubernetesClusters().listUserKubeConfigContent(this.resourceGroupName(), this.name()); } return Collections.unmodifiableList(this.userKubeConfigs); } @Override public List<CredentialResult> userKubeConfigs(Format format) { if (format == null) { return userKubeConfigs(); } return Collections.unmodifiableList( this.formatUserKubeConfigsMap.computeIfAbsent( format, key -> KubernetesClusterImpl.this .manager() .kubernetesClusters() .listUserKubeConfigContent( KubernetesClusterImpl.this.resourceGroupName(), KubernetesClusterImpl.this.name(), format )) ); } @Override public byte[] adminKubeConfigContent() { for (CredentialResult config : adminKubeConfigs()) { return config.value(); } return new byte[0]; } @Override public byte[] userKubeConfigContent() { for (CredentialResult config : userKubeConfigs()) { return config.value(); } return new byte[0]; } @Override @Override public String servicePrincipalClientId() { if (this.innerModel().servicePrincipalProfile() != null) { return this.innerModel().servicePrincipalProfile().clientId(); } else { return null; } } @Override public String servicePrincipalSecret() { if (this.innerModel().servicePrincipalProfile() != null) { return this.innerModel().servicePrincipalProfile().secret(); } else { return null; } } @Override public String linuxRootUsername() { if (this.innerModel().linuxProfile() != null) { return this.innerModel().linuxProfile().adminUsername(); } else { return null; } } @Override public String sshKey() { if (this.innerModel().linuxProfile() == null || this.innerModel().linuxProfile().ssh() == null || this.innerModel().linuxProfile().ssh().publicKeys() == null || this.innerModel().linuxProfile().ssh().publicKeys().size() == 0) { return null; } else { return this.innerModel().linuxProfile().ssh().publicKeys().get(0).keyData(); } } @Override public Map<String, KubernetesClusterAgentPool> agentPools() { Map<String, KubernetesClusterAgentPool> agentPoolMap = new HashMap<>(); if (this.innerModel().agentPoolProfiles() != null && this.innerModel().agentPoolProfiles().size() > 0) { for (ManagedClusterAgentPoolProfile agentPoolProfile : this.innerModel().agentPoolProfiles()) { agentPoolMap.put(agentPoolProfile.name(), new KubernetesClusterAgentPoolImpl(agentPoolProfile, this)); } } return Collections.unmodifiableMap(agentPoolMap); } @Override public ContainerServiceNetworkProfile networkProfile() { return this.innerModel().networkProfile(); } @Override public Map<String, ManagedClusterAddonProfile> addonProfiles() { return Collections.unmodifiableMap(this.innerModel().addonProfiles()); } @Override public String nodeResourceGroup() { return this.innerModel().nodeResourceGroup(); } @Override public boolean enableRBAC() { return this.innerModel().enableRbac(); } @Override public PowerState powerState() { return this.innerModel().powerState(); } @Override public String systemAssignedManagedServiceIdentityPrincipalId() { String objectId = null; if (this.innerModel().identityProfile() != null) { UserAssignedIdentity identity = this.innerModel().identityProfile().get("kubeletidentity"); if (identity != null) { objectId = identity.objectId(); } } return objectId; } @Override public void start() { this.startAsync().block(); } @Override public Mono<Void> startAsync() { return manager().kubernetesClusters().startAsync(this.resourceGroupName(), this.name()); } @Override public void stop() { this.stopAsync().block(); } @Override public Mono<Void> stopAsync() { return manager().kubernetesClusters().stopAsync(this.resourceGroupName(), this.name()); } private Mono<List<CredentialResult>> listAdminConfig(final KubernetesClusterImpl self) { return this .manager() .kubernetesClusters() .listAdminKubeConfigContentAsync(self.resourceGroupName(), self.name()) .map( kubeConfigs -> { self.adminKubeConfigs = kubeConfigs; return self.adminKubeConfigs; }); } private Mono<List<CredentialResult>> listUserConfig(final KubernetesClusterImpl self) { return this .manager() .kubernetesClusters() .listUserKubeConfigContentAsync(self.resourceGroupName(), self.name()) .map( kubeConfigs -> { self.userKubeConfigs = kubeConfigs; return self.userKubeConfigs; }); } @Override protected Mono<ManagedClusterInner> getInnerAsync() { final KubernetesClusterImpl self = this; final Mono<List<CredentialResult>> adminConfig = listAdminConfig(self); final Mono<List<CredentialResult>> userConfig = listUserConfig(self); return this .manager() .serviceClient() .getManagedClusters() .getByResourceGroupAsync(this.resourceGroupName(), this.name()) .flatMap( managedClusterInner -> Flux .merge(adminConfig, userConfig) .last() .map( bytes -> { formatUserKubeConfigsMap.clear(); return managedClusterInner; })); } @Override public Mono<KubernetesCluster> createResourceAsync() { final KubernetesClusterImpl self = this; if (!this.isInCreateMode()) { this.innerModel().withServicePrincipalProfile(null); } final Mono<List<CredentialResult>> adminConfig = listAdminConfig(self); final Mono<List<CredentialResult>> userConfig = listUserConfig(self); return this .manager() .serviceClient() .getManagedClusters() .createOrUpdateAsync(self.resourceGroupName(), self.name(), self.innerModel()) .flatMap( inner -> Flux .merge(adminConfig, userConfig) .last() .map( bytes -> { self.setInner(inner); return self; })); } @Override public KubernetesClusterImpl withVersion(String kubernetesVersion) { this.innerModel().withKubernetesVersion(kubernetesVersion); return this; } @Override public KubernetesClusterImpl withDefaultVersion() { this.innerModel().withKubernetesVersion(""); return this; } @Override public KubernetesClusterImpl withRootUsername(String rootUserName) { if (this.innerModel().linuxProfile() == null) { this.innerModel().withLinuxProfile(new ContainerServiceLinuxProfile()); } this.innerModel().linuxProfile().withAdminUsername(rootUserName); return this; } @Override public KubernetesClusterImpl withSshKey(String sshKeyData) { this .innerModel() .linuxProfile() .withSsh( new ContainerServiceSshConfiguration().withPublicKeys(new ArrayList<ContainerServiceSshPublicKey>())); this.innerModel().linuxProfile().ssh().publicKeys().add( new ContainerServiceSshPublicKey().withKeyData(sshKeyData)); return this; } @Override public KubernetesClusterImpl withServicePrincipalClientId(String clientId) { this.innerModel().withServicePrincipalProfile( new ManagedClusterServicePrincipalProfile().withClientId(clientId)); return this; } @Override public KubernetesClusterImpl withSystemAssignedManagedServiceIdentity() { this.innerModel().withIdentity(new ManagedClusterIdentity().withType(ResourceIdentityType.SYSTEM_ASSIGNED)); return this; } @Override public KubernetesClusterImpl withServicePrincipalSecret(String secret) { this.innerModel().servicePrincipalProfile().withSecret(secret); return this; } @Override public KubernetesClusterImpl withDnsPrefix(String dnsPrefix) { this.innerModel().withDnsPrefix(dnsPrefix); return this; } @Override public KubernetesClusterAgentPoolImpl defineAgentPool(String name) { ManagedClusterAgentPoolProfile innerPoolProfile = new ManagedClusterAgentPoolProfile(); innerPoolProfile.withName(name); return new KubernetesClusterAgentPoolImpl(innerPoolProfile, this); } @Override public KubernetesClusterAgentPoolImpl updateAgentPool(String name) { for (ManagedClusterAgentPoolProfile agentPoolProfile : innerModel().agentPoolProfiles()) { if (agentPoolProfile.name().equals(name)) { return new KubernetesClusterAgentPoolImpl(agentPoolProfile, this); } } throw logger.logExceptionAsError(new IllegalArgumentException(String.format( "Cannot get agent pool named %s", name))); } @Override public Update withoutAgentPool(String name) { if (innerModel().agentPoolProfiles() != null) { innerModel().withAgentPoolProfiles( innerModel().agentPoolProfiles().stream() .filter(p -> !name.equals(p.name())) .collect(Collectors.toList())); this.addDependency(context -> manager().serviceClient().getAgentPools().deleteAsync(resourceGroupName(), name(), name) .then(context.voidMono())); } return this; } @Override public KubernetesCluster.DefinitionStages.NetworkProfileDefinitionStages.Blank< KubernetesCluster.DefinitionStages.WithCreate> defineNetworkProfile() { return new KubernetesClusterNetworkProfileImpl(this); } @Override public KubernetesClusterImpl withAddOnProfiles(Map<String, ManagedClusterAddonProfile> addOnProfileMap) { this.innerModel().withAddonProfiles(addOnProfileMap); return this; } @Override public KubernetesClusterImpl withNetworkProfile(ContainerServiceNetworkProfile networkProfile) { this.innerModel().withNetworkProfile(networkProfile); return this; } @Override public KubernetesClusterImpl withRBACEnabled() { this.innerModel().withEnableRbac(true); return this; } @Override public KubernetesClusterImpl withRBACDisabled() { this.innerModel().withEnableRbac(false); return this; } public KubernetesClusterImpl addNewAgentPool(KubernetesClusterAgentPoolImpl agentPool) { if (!isInCreateMode()) { this.addDependency(context -> manager().serviceClient().getAgentPools().createOrUpdateAsync( resourceGroupName(), name(), agentPool.name(), agentPool.getAgentPoolInner()) .then(context.voidMono())); } innerModel().agentPoolProfiles().add(agentPool.innerModel()); return this; } @Override public KubernetesClusterImpl withAutoScalerProfile(ManagedClusterPropertiesAutoScalerProfile autoScalerProfile) { this.innerModel().withAutoScalerProfile(autoScalerProfile); return this; } @Override public KubernetesClusterImpl enablePrivateCluster() { if (innerModel().apiServerAccessProfile() == null) { innerModel().withApiServerAccessProfile(new ManagedClusterApiServerAccessProfile()); } innerModel().apiServerAccessProfile().withEnablePrivateCluster(true); return this; } @Override public PagedIterable<PrivateLinkResource> listPrivateLinkResources() { return new PagedIterable<>(listPrivateLinkResourcesAsync()); } @Override public PagedFlux<PrivateLinkResource> listPrivateLinkResourcesAsync() { Mono<Response<List<PrivateLinkResource>>> retList = this.manager().serviceClient().getPrivateLinkResources() .listWithResponseAsync(this.resourceGroupName(), this.name()) .map(response -> new SimpleResponse<>(response, response.getValue().value().stream() .map(PrivateLinkResourceImpl::new) .collect(Collectors.toList()))); return PagedConverter.convertListToPagedFlux(retList); } @Override public PagedIterable<PrivateEndpointConnection> listPrivateEndpointConnections() { return new PagedIterable<>(listPrivateEndpointConnectionsAsync()); } @Override public PagedFlux<PrivateEndpointConnection> listPrivateEndpointConnectionsAsync() { Mono<Response<List<PrivateEndpointConnection>>> retList = this.manager().serviceClient() .getPrivateEndpointConnections() .listWithResponseAsync(this.resourceGroupName(), this.name()) .map(response -> new SimpleResponse<>(response, response.getValue().value().stream() .map(PrivateEndpointConnectionImpl::new) .collect(Collectors.toList()))); return PagedConverter.convertListToPagedFlux(retList); } private static final class PrivateLinkResourceImpl implements PrivateLinkResource { private final PrivateLinkResourceInner innerModel; private PrivateLinkResourceImpl(PrivateLinkResourceInner innerModel) { this.innerModel = innerModel; } @Override public String groupId() { return innerModel.groupId(); } @Override public List<String> requiredMemberNames() { return Collections.unmodifiableList(innerModel.requiredMembers()); } @Override public List<String> requiredDnsZoneNames() { return Collections.emptyList(); } } private static final class PrivateEndpointConnectionImpl implements PrivateEndpointConnection { private final PrivateEndpointConnectionInner innerModel; private final PrivateEndpoint privateEndpoint; private final com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState privateLinkServiceConnectionState; private final PrivateEndpointConnectionProvisioningState provisioningState; private PrivateEndpointConnectionImpl(PrivateEndpointConnectionInner innerModel) { this.innerModel = innerModel; this.privateEndpoint = innerModel.privateEndpoint() == null ? null : new PrivateEndpoint(innerModel.privateEndpoint().id()); this.privateLinkServiceConnectionState = innerModel.privateLinkServiceConnectionState() == null ? null : new com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState( innerModel.privateLinkServiceConnectionState().status() == null ? null : com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateEndpointServiceConnectionStatus .fromString(innerModel.privateLinkServiceConnectionState().status().toString()), innerModel.privateLinkServiceConnectionState().description(), ""); this.provisioningState = innerModel.provisioningState() == null ? null : PrivateEndpointConnectionProvisioningState.fromString(innerModel.provisioningState().toString()); } @Override public String id() { return innerModel.id(); } @Override public String name() { return innerModel.name(); } @Override public String type() { return innerModel.type(); } @Override public PrivateEndpoint privateEndpoint() { return privateEndpoint; } @Override public com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState privateLinkServiceConnectionState() { return privateLinkServiceConnectionState; } @Override public PrivateEndpointConnectionProvisioningState provisioningState() { return provisioningState; } } }
We should not use the ClientLogger here.
private static void validateLength(String namespace) { if (namespace.length() < 6 || namespace.length() > 50) { LOGGER.warning(LENGTH_ERROR); } }
LOGGER.warning(LENGTH_ERROR);
private static void validateLength(String namespace) { if (namespace.length() < 6 || namespace.length() > 50) { throw new IllegalArgumentException(LENGTH_ERROR); } }
class PropertiesValidator { private static final ClientLogger LOGGER = new ClientLogger(PropertiesValidator.class); public static final String LENGTH_ERROR = "The namespace must be between 6 and 50 characters long."; public static final String ILLEGAL_SYMBOL_ERROR = "The namespace can contain only letters, numbers, and hyphens."; public static final String START_SYMBOL_ERROR = "The namespace must start with a letter."; public static final String END_SYMBOL_ERROR = "The namespace must end with a letter or number."; public static void validateNamespace(String namespace) { validateLength(namespace); validateIllegalSymbol(namespace); validateStartingSymbol(namespace); validateEndingSymbol(namespace); } private static void validateIllegalSymbol(String namespace) { if (!namespace.matches("[a-z0-9A-Z-]+")) { LOGGER.warning(ILLEGAL_SYMBOL_ERROR); } } private static void validateStartingSymbol(String namespace) { if (!Character.isLetter(namespace.charAt(0))) { LOGGER.warning(START_SYMBOL_ERROR); } } private static void validateEndingSymbol(String namespace) { if (!Character.isLetterOrDigit(namespace.charAt(namespace.length() - 1))) { LOGGER.warning(END_SYMBOL_ERROR); } } }
class PropertiesValidator { public static final String LENGTH_ERROR = "The namespace must be between 6 and 50 characters long."; public static final String ILLEGAL_SYMBOL_ERROR = "The namespace can contain only letters, numbers, and hyphens."; public static final String START_SYMBOL_ERROR = "The namespace must start with a letter."; public static final String END_SYMBOL_ERROR = "The namespace must end with a letter or number."; public static void validateNamespace(String namespace) { validateLength(namespace); validateIllegalSymbol(namespace); validateStartingSymbol(namespace); validateEndingSymbol(namespace); } private static void validateIllegalSymbol(String namespace) { if (!namespace.matches("[a-z0-9A-Z-]+")) { throw new IllegalArgumentException(ILLEGAL_SYMBOL_ERROR); } } private static void validateStartingSymbol(String namespace) { if (!Character.isLetter(namespace.charAt(0))) { throw new IllegalArgumentException(START_SYMBOL_ERROR); } } private static void validateEndingSymbol(String namespace) { if (!Character.isLetterOrDigit(namespace.charAt(namespace.length() - 1))) { throw new IllegalArgumentException(END_SYMBOL_ERROR); } } }
How about we still throw the exception here, and in the configuration properties, we catch the exception and log it. Printing warning message is not what a validator should do.
private static void validateLength(String namespace) { if (namespace.length() < 6 || namespace.length() > 50) { LOGGER.warning(LENGTH_ERROR); } }
LOGGER.warning(LENGTH_ERROR);
private static void validateLength(String namespace) { if (namespace.length() < 6 || namespace.length() > 50) { throw new IllegalArgumentException(LENGTH_ERROR); } }
class PropertiesValidator { private static final ClientLogger LOGGER = new ClientLogger(PropertiesValidator.class); public static final String LENGTH_ERROR = "The namespace must be between 6 and 50 characters long."; public static final String ILLEGAL_SYMBOL_ERROR = "The namespace can contain only letters, numbers, and hyphens."; public static final String START_SYMBOL_ERROR = "The namespace must start with a letter."; public static final String END_SYMBOL_ERROR = "The namespace must end with a letter or number."; public static void validateNamespace(String namespace) { validateLength(namespace); validateIllegalSymbol(namespace); validateStartingSymbol(namespace); validateEndingSymbol(namespace); } private static void validateIllegalSymbol(String namespace) { if (!namespace.matches("[a-z0-9A-Z-]+")) { LOGGER.warning(ILLEGAL_SYMBOL_ERROR); } } private static void validateStartingSymbol(String namespace) { if (!Character.isLetter(namespace.charAt(0))) { LOGGER.warning(START_SYMBOL_ERROR); } } private static void validateEndingSymbol(String namespace) { if (!Character.isLetterOrDigit(namespace.charAt(namespace.length() - 1))) { LOGGER.warning(END_SYMBOL_ERROR); } } }
class PropertiesValidator { public static final String LENGTH_ERROR = "The namespace must be between 6 and 50 characters long."; public static final String ILLEGAL_SYMBOL_ERROR = "The namespace can contain only letters, numbers, and hyphens."; public static final String START_SYMBOL_ERROR = "The namespace must start with a letter."; public static final String END_SYMBOL_ERROR = "The namespace must end with a letter or number."; public static void validateNamespace(String namespace) { validateLength(namespace); validateIllegalSymbol(namespace); validateStartingSymbol(namespace); validateEndingSymbol(namespace); } private static void validateIllegalSymbol(String namespace) { if (!namespace.matches("[a-z0-9A-Z-]+")) { throw new IllegalArgumentException(ILLEGAL_SYMBOL_ERROR); } } private static void validateStartingSymbol(String namespace) { if (!Character.isLetter(namespace.charAt(0))) { throw new IllegalArgumentException(START_SYMBOL_ERROR); } } private static void validateEndingSymbol(String namespace) { if (!Character.isLetterOrDigit(namespace.charAt(namespace.length() - 1))) { throw new IllegalArgumentException(END_SYMBOL_ERROR); } } }
Should this new `String` be enforced to UTF-8? #Pending
private static String byteArrayToHex(byte[] bytes) { char[] hexChars = new char[bytes.length * 2]; for (int j = 0; j < bytes.length; j++) { int v = bytes[j] & 0xFF; hexChars[j * 2] = HEX_ARRAY[v >>> 4]; hexChars[j * 2 + 1] = HEX_ARRAY[v & 0x0F]; } return new String(hexChars); }
return new String(hexChars);
private static String byteArrayToHex(byte[] bytes) { char[] hexChars = new char[bytes.length * 2]; for (int j = 0; j < bytes.length; j++) { int v = bytes[j] & 0xFF; hexChars[j * 2] = HEX_ARRAY[v >>> 4]; hexChars[j * 2 + 1] = HEX_ARRAY[v & 0x0F]; } return new String(hexChars); }
class UtilsImpl { private static final String CLIENT_NAME; private static final String CLIENT_VERSION; private static final int HTTP_STATUS_CODE_NOT_FOUND; private static final int HTTP_STATUS_CODE_ACCEPTED; private static final String CONTINUATION_LINK_HEADER_NAME; private static final Pattern CONTINUATION_LINK_PATTERN; public static final String OCI_MANIFEST_MEDIA_TYPE; public static final String DOCKER_DIGEST_HEADER_NAME; public static final String CONTAINER_REGISTRY_TRACING_NAMESPACE_VALUE; private static final ClientLogger LOGGER; static { LOGGER = new ClientLogger(UtilsImpl.class); Map<String, String> properties = CoreUtils.getProperties("azure-containers-containerregistry.properties"); CLIENT_NAME = properties.getOrDefault("name", "UnknownName"); CLIENT_VERSION = properties.getOrDefault("version", "UnknownVersion"); HTTP_STATUS_CODE_NOT_FOUND = 404; HTTP_STATUS_CODE_ACCEPTED = 202; OCI_MANIFEST_MEDIA_TYPE = "application/vnd.oci.image.manifest.v1+json"; DOCKER_DIGEST_HEADER_NAME = "Docker-Content-Digest"; CONTINUATION_LINK_HEADER_NAME = "Link"; CONTINUATION_LINK_PATTERN = Pattern.compile("<(.+)>;.*"); CONTAINER_REGISTRY_TRACING_NAMESPACE_VALUE = "Microsoft.ContainerRegistry"; } private UtilsImpl() { } /** * This method builds the httpPipeline for the builders. * @param clientOptions The client options * @param logOptions http log options. * @param configuration configuration settings. * @param retryPolicy retry policy * @param retryOptions retry options * @param credential credentials. * @param perCallPolicies per call policies. * @param perRetryPolicies per retry policies. * @param httpClient http client * @param endpoint endpoint to be called * @param serviceVersion the service api version being targeted by the client. * @return returns the httpPipeline to be consumed by the builders. */ public static HttpPipeline buildHttpPipeline( ClientOptions clientOptions, HttpLogOptions logOptions, Configuration configuration, RetryPolicy retryPolicy, RetryOptions retryOptions, TokenCredential credential, ContainerRegistryAudience audience, List<HttpPipelinePolicy> perCallPolicies, List<HttpPipelinePolicy> perRetryPolicies, HttpClient httpClient, String endpoint, ContainerRegistryServiceVersion serviceVersion, ClientLogger logger) { ArrayList<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add( new UserAgentPolicy(CoreUtils.getApplicationId(clientOptions, logOptions), CLIENT_NAME, CLIENT_VERSION, configuration)); policies.add(new RequestIdPolicy()); policies.addAll(perCallPolicies); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(ClientBuilderUtil.validateAndGetRetryPolicy(retryPolicy, retryOptions)); policies.add(new CookiePolicy()); policies.add(new AddDatePolicy()); policies.addAll(perRetryPolicies); HttpPolicyProviders.addAfterRetryPolicies(policies); HttpLoggingPolicy loggingPolicy = new HttpLoggingPolicy(logOptions); if (credential == null) { logger.verbose("Credentials are null, enabling anonymous access"); } ArrayList<HttpPipelinePolicy> credentialPolicies = clone(policies); credentialPolicies.add(loggingPolicy); if (audience == null) { audience = ContainerRegistryAudience.AZURE_RESOURCE_MANAGER_PUBLIC_CLOUD; } ContainerRegistryTokenService tokenService = new ContainerRegistryTokenService( credential, audience, endpoint, serviceVersion, new HttpPipelineBuilder() .policies(credentialPolicies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(), JacksonAdapter.createDefaultSerializerAdapter()); ContainerRegistryCredentialsPolicy credentialsPolicy = new ContainerRegistryCredentialsPolicy(tokenService); policies.add(credentialsPolicy); policies.add(loggingPolicy); HttpPipeline httpPipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); return httpPipeline; } private static ArrayList<HttpPipelinePolicy> clone(ArrayList<HttpPipelinePolicy> policies) { ArrayList<HttpPipelinePolicy> clonedPolicy = new ArrayList<>(); for (HttpPipelinePolicy policy:policies) { clonedPolicy.add(policy); } return clonedPolicy; } /** * This method computes the digest for the buffer content. * Docker digest is a SHA256 hash of the docker image content and is deterministic based on the image build. * @param buffer The buffer containing the image bytes. * @return SHA-256 digest for the given buffer. */ public static String computeDigest(ByteBuffer buffer) { ByteBuffer readOnlyBuffer = buffer.asReadOnlyBuffer(); try { MessageDigest md = MessageDigest.getInstance("SHA-256"); md.update(readOnlyBuffer); byte[] digest = md.digest(); return "sha256:" + byteArrayToHex(digest); } catch (NoSuchAlgorithmException e) { LOGGER.error("SHA-256 conversion failed with" + e.getMessage()); throw new RuntimeException(e); } } private static final char[] HEX_ARRAY = "0123456789abcdef".toCharArray(); /** * Delete operation should be idempotent. * And so should result in a success in case the service response is 400 : Not found. * @param responseT The response object. * @param <T> The encapsulating value. * @return The transformed response object. */ public static <T> Mono<Response<Void>> deleteResponseToSuccess(Response<T> responseT) { if (responseT.getStatusCode() != HTTP_STATUS_CODE_NOT_FOUND) { return getAcceptedDeleteResponse(responseT, responseT.getStatusCode()); } return getAcceptedDeleteResponse(responseT, HTTP_STATUS_CODE_ACCEPTED); } static <T> Mono<Response<Void>> getAcceptedDeleteResponse(Response<T> responseT, int statusCode) { return Mono.just(new ResponseBase<String, Void>( responseT.getRequest(), statusCode, responseT.getHeaders(), null, null)); } /** * This method converts the API response codes into well known exceptions. * @param exception The exception returned by the rest client. * @return The exception returned by the public methods. */ public static Throwable mapException(Throwable exception) { AcrErrorsException acrException = null; if (exception instanceof AcrErrorsException) { acrException = ((AcrErrorsException) exception); } else if (exception instanceof RuntimeException) { RuntimeException runtimeException = (RuntimeException) exception; Throwable throwable = runtimeException.getCause(); if (throwable instanceof AcrErrorsException) { acrException = (AcrErrorsException) throwable; } } if (acrException == null) { return exception; } final HttpResponse errorHttpResponse = acrException.getResponse(); final int statusCode = errorHttpResponse.getStatusCode(); final String errorDetail = acrException.getMessage(); switch (statusCode) { case 401: return new ClientAuthenticationException(errorDetail, acrException.getResponse(), exception); case 404: return new ResourceNotFoundException(errorDetail, acrException.getResponse(), exception); case 409: return new ResourceExistsException(errorDetail, acrException.getResponse(), exception); case 412: return new ResourceModifiedException(errorDetail, acrException.getResponse(), exception); default: return new HttpResponseException(errorDetail, acrException.getResponse(), exception); } } /** * This method parses the response to get the continuation token used to make the next pagination call. * The continuation token is returned by the service in the form of a header and not as a nextLink field. * @param listResponse response that is parsed. * @param <T> the model type that is being operated on. * @return paged response with the correct continuation token. */ public static <T> PagedResponse<T> getPagedResponseWithContinuationToken(PagedResponse<T> listResponse) { return getPagedResponseWithContinuationToken(listResponse, values -> values); } /** * This method parses the response to get the continuation token used to make the next pagination call. * The continuation token is returned by the service in the form of a header and not as a nextLink field. * * <p> * Per the Docker v2 HTTP API spec, the Link header is an RFC5988 * compliant rel='next' with URL to next result set, if available. * See: https: * * The URI reference can be obtained from link-value as follows: * Link = "Link" ":" * link-value = "<" URI-Reference ">" * (";" link-param ) * See: https: * </p> * @param listResponse response that is parsed. * @param mapperFunction the function that maps the rest api response into the public model exposed by the client. * @param <T> The model type returned by the rest client. * @param <R> The model type returned by the public client. * @return paged response with the correct continuation token. */ public static <T, R> PagedResponse<T> getPagedResponseWithContinuationToken(PagedResponse<R> listResponse, Function<List<R>, List<T>> mapperFunction) { Objects.requireNonNull(mapperFunction); String continuationLink = null; HttpHeaders headers = listResponse.getHeaders(); if (headers != null) { String continuationLinkHeader = headers.getValue(CONTINUATION_LINK_HEADER_NAME); if (!CoreUtils.isNullOrEmpty(continuationLinkHeader)) { Matcher matcher = CONTINUATION_LINK_PATTERN.matcher(continuationLinkHeader); if (matcher.matches()) { if (matcher.groupCount() == 1) { continuationLink = matcher.group(1); } } } } List<T> values = mapperFunction.apply(listResponse.getValue()); return new PagedResponseBase<String, T>( listResponse.getRequest(), listResponse.getStatusCode(), listResponse.getHeaders(), values, continuationLink, null ); } }
class UtilsImpl { private static final String CLIENT_NAME; private static final String CLIENT_VERSION; private static final int HTTP_STATUS_CODE_NOT_FOUND; private static final int HTTP_STATUS_CODE_ACCEPTED; private static final String CONTINUATION_LINK_HEADER_NAME; private static final Pattern CONTINUATION_LINK_PATTERN; public static final String OCI_MANIFEST_MEDIA_TYPE; public static final String DOCKER_DIGEST_HEADER_NAME; public static final String CONTAINER_REGISTRY_TRACING_NAMESPACE_VALUE; private static final ClientLogger LOGGER; static { LOGGER = new ClientLogger(UtilsImpl.class); Map<String, String> properties = CoreUtils.getProperties("azure-containers-containerregistry.properties"); CLIENT_NAME = properties.getOrDefault("name", "UnknownName"); CLIENT_VERSION = properties.getOrDefault("version", "UnknownVersion"); HTTP_STATUS_CODE_NOT_FOUND = 404; HTTP_STATUS_CODE_ACCEPTED = 202; OCI_MANIFEST_MEDIA_TYPE = "application/vnd.oci.image.manifest.v1+json"; DOCKER_DIGEST_HEADER_NAME = "Docker-Content-Digest"; CONTINUATION_LINK_HEADER_NAME = "Link"; CONTINUATION_LINK_PATTERN = Pattern.compile("<(.+)>;.*"); CONTAINER_REGISTRY_TRACING_NAMESPACE_VALUE = "Microsoft.ContainerRegistry"; } private UtilsImpl() { } /** * This method builds the httpPipeline for the builders. * @param clientOptions The client options * @param logOptions http log options. * @param configuration configuration settings. * @param retryPolicy retry policy * @param retryOptions retry options * @param credential credentials. * @param perCallPolicies per call policies. * @param perRetryPolicies per retry policies. * @param httpClient http client * @param endpoint endpoint to be called * @param serviceVersion the service api version being targeted by the client. * @return returns the httpPipeline to be consumed by the builders. */ public static HttpPipeline buildHttpPipeline( ClientOptions clientOptions, HttpLogOptions logOptions, Configuration configuration, RetryPolicy retryPolicy, RetryOptions retryOptions, TokenCredential credential, ContainerRegistryAudience audience, List<HttpPipelinePolicy> perCallPolicies, List<HttpPipelinePolicy> perRetryPolicies, HttpClient httpClient, String endpoint, ContainerRegistryServiceVersion serviceVersion, ClientLogger logger) { ArrayList<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add( new UserAgentPolicy(CoreUtils.getApplicationId(clientOptions, logOptions), CLIENT_NAME, CLIENT_VERSION, configuration)); policies.add(new RequestIdPolicy()); policies.addAll(perCallPolicies); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(ClientBuilderUtil.validateAndGetRetryPolicy(retryPolicy, retryOptions)); policies.add(new CookiePolicy()); policies.add(new AddDatePolicy()); policies.addAll(perRetryPolicies); HttpPolicyProviders.addAfterRetryPolicies(policies); HttpLoggingPolicy loggingPolicy = new HttpLoggingPolicy(logOptions); if (credential == null) { logger.verbose("Credentials are null, enabling anonymous access"); } ArrayList<HttpPipelinePolicy> credentialPolicies = clone(policies); credentialPolicies.add(loggingPolicy); if (audience == null) { audience = ContainerRegistryAudience.AZURE_RESOURCE_MANAGER_PUBLIC_CLOUD; } ContainerRegistryTokenService tokenService = new ContainerRegistryTokenService( credential, audience, endpoint, serviceVersion, new HttpPipelineBuilder() .policies(credentialPolicies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(), JacksonAdapter.createDefaultSerializerAdapter()); ContainerRegistryCredentialsPolicy credentialsPolicy = new ContainerRegistryCredentialsPolicy(tokenService); policies.add(credentialsPolicy); policies.add(loggingPolicy); HttpPipeline httpPipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); return httpPipeline; } private static ArrayList<HttpPipelinePolicy> clone(ArrayList<HttpPipelinePolicy> policies) { ArrayList<HttpPipelinePolicy> clonedPolicy = new ArrayList<>(); for (HttpPipelinePolicy policy:policies) { clonedPolicy.add(policy); } return clonedPolicy; } /** * This method computes the digest for the buffer content. * Docker digest is a SHA256 hash of the docker image content and is deterministic based on the image build. * @param buffer The buffer containing the image bytes. * @return SHA-256 digest for the given buffer. */ public static String computeDigest(ByteBuffer buffer) { ByteBuffer readOnlyBuffer = buffer.asReadOnlyBuffer(); try { MessageDigest md = MessageDigest.getInstance("SHA-256"); md.update(readOnlyBuffer); byte[] digest = md.digest(); return "sha256:" + byteArrayToHex(digest); } catch (NoSuchAlgorithmException e) { LOGGER.error("SHA-256 conversion failed with" + e); throw new RuntimeException(e); } } private static final char[] HEX_ARRAY = "0123456789abcdef".toCharArray(); /** * Delete operation should be idempotent. * And so should result in a success in case the service response is 400 : Not found. * @param responseT The response object. * @param <T> The encapsulating value. * @return The transformed response object. */ public static <T> Mono<Response<Void>> deleteResponseToSuccess(Response<T> responseT) { if (responseT.getStatusCode() != HTTP_STATUS_CODE_NOT_FOUND) { return getAcceptedDeleteResponse(responseT, responseT.getStatusCode()); } return getAcceptedDeleteResponse(responseT, HTTP_STATUS_CODE_ACCEPTED); } static <T> Mono<Response<Void>> getAcceptedDeleteResponse(Response<T> responseT, int statusCode) { return Mono.just(new SimpleResponse<Void>( responseT.getRequest(), statusCode, responseT.getHeaders(), null)); } /** * This method converts the API response codes into well known exceptions. * @param exception The exception returned by the rest client. * @return The exception returned by the public methods. */ public static Throwable mapException(Throwable exception) { AcrErrorsException acrException = null; if (exception instanceof AcrErrorsException) { acrException = ((AcrErrorsException) exception); } else if (exception instanceof RuntimeException) { RuntimeException runtimeException = (RuntimeException) exception; Throwable throwable = runtimeException.getCause(); if (throwable instanceof AcrErrorsException) { acrException = (AcrErrorsException) throwable; } } if (acrException == null) { return exception; } final HttpResponse errorHttpResponse = acrException.getResponse(); final int statusCode = errorHttpResponse.getStatusCode(); final String errorDetail = acrException.getMessage(); switch (statusCode) { case 401: return new ClientAuthenticationException(errorDetail, acrException.getResponse(), exception); case 404: return new ResourceNotFoundException(errorDetail, acrException.getResponse(), exception); case 409: return new ResourceExistsException(errorDetail, acrException.getResponse(), exception); case 412: return new ResourceModifiedException(errorDetail, acrException.getResponse(), exception); default: return new HttpResponseException(errorDetail, acrException.getResponse(), exception); } } /** * This method parses the response to get the continuation token used to make the next pagination call. * The continuation token is returned by the service in the form of a header and not as a nextLink field. * @param listResponse response that is parsed. * @param <T> the model type that is being operated on. * @return paged response with the correct continuation token. */ public static <T> PagedResponse<T> getPagedResponseWithContinuationToken(PagedResponse<T> listResponse) { return getPagedResponseWithContinuationToken(listResponse, values -> values); } /** * This method parses the response to get the continuation token used to make the next pagination call. * The continuation token is returned by the service in the form of a header and not as a nextLink field. * * <p> * Per the Docker v2 HTTP API spec, the Link header is an RFC5988 * compliant rel='next' with URL to next result set, if available. * See: https: * * The URI reference can be obtained from link-value as follows: * Link = "Link" ":" * link-value = "<" URI-Reference ">" * (";" link-param ) * See: https: * </p> * @param listResponse response that is parsed. * @param mapperFunction the function that maps the rest api response into the public model exposed by the client. * @param <T> The model type returned by the rest client. * @param <R> The model type returned by the public client. * @return paged response with the correct continuation token. */ public static <T, R> PagedResponse<T> getPagedResponseWithContinuationToken(PagedResponse<R> listResponse, Function<List<R>, List<T>> mapperFunction) { Objects.requireNonNull(mapperFunction); String continuationLink = null; HttpHeaders headers = listResponse.getHeaders(); if (headers != null) { String continuationLinkHeader = headers.getValue(CONTINUATION_LINK_HEADER_NAME); if (!CoreUtils.isNullOrEmpty(continuationLinkHeader)) { Matcher matcher = CONTINUATION_LINK_PATTERN.matcher(continuationLinkHeader); if (matcher.matches()) { if (matcher.groupCount() == 1) { continuationLink = matcher.group(1); } } } } List<T> values = mapperFunction.apply(listResponse.getValue()); return new PagedResponseBase<String, T>( listResponse.getRequest(), listResponse.getStatusCode(), listResponse.getHeaders(), values, continuationLink, null ); } }
```suggestion LOGGER.error("SHA-256 conversion failed.", e); ``` I'd let the logger handle how the exception should be logged #Resolved
public static String computeDigest(ByteBuffer buffer) { ByteBuffer readOnlyBuffer = buffer.asReadOnlyBuffer(); try { MessageDigest md = MessageDigest.getInstance("SHA-256"); md.update(readOnlyBuffer); byte[] digest = md.digest(); return "sha256:" + byteArrayToHex(digest); } catch (NoSuchAlgorithmException e) { LOGGER.error("SHA-256 conversion failed with" + e.getMessage()); throw new RuntimeException(e); } }
LOGGER.error("SHA-256 conversion failed with" + e.getMessage());
public static String computeDigest(ByteBuffer buffer) { ByteBuffer readOnlyBuffer = buffer.asReadOnlyBuffer(); try { MessageDigest md = MessageDigest.getInstance("SHA-256"); md.update(readOnlyBuffer); byte[] digest = md.digest(); return "sha256:" + byteArrayToHex(digest); } catch (NoSuchAlgorithmException e) { LOGGER.error("SHA-256 conversion failed with" + e); throw new RuntimeException(e); } }
class UtilsImpl { private static final String CLIENT_NAME; private static final String CLIENT_VERSION; private static final int HTTP_STATUS_CODE_NOT_FOUND; private static final int HTTP_STATUS_CODE_ACCEPTED; private static final String CONTINUATION_LINK_HEADER_NAME; private static final Pattern CONTINUATION_LINK_PATTERN; public static final String OCI_MANIFEST_MEDIA_TYPE; public static final String DOCKER_DIGEST_HEADER_NAME; public static final String CONTAINER_REGISTRY_TRACING_NAMESPACE_VALUE; private static final ClientLogger LOGGER; static { LOGGER = new ClientLogger(UtilsImpl.class); Map<String, String> properties = CoreUtils.getProperties("azure-containers-containerregistry.properties"); CLIENT_NAME = properties.getOrDefault("name", "UnknownName"); CLIENT_VERSION = properties.getOrDefault("version", "UnknownVersion"); HTTP_STATUS_CODE_NOT_FOUND = 404; HTTP_STATUS_CODE_ACCEPTED = 202; OCI_MANIFEST_MEDIA_TYPE = "application/vnd.oci.image.manifest.v1+json"; DOCKER_DIGEST_HEADER_NAME = "Docker-Content-Digest"; CONTINUATION_LINK_HEADER_NAME = "Link"; CONTINUATION_LINK_PATTERN = Pattern.compile("<(.+)>;.*"); CONTAINER_REGISTRY_TRACING_NAMESPACE_VALUE = "Microsoft.ContainerRegistry"; } private UtilsImpl() { } /** * This method builds the httpPipeline for the builders. * @param clientOptions The client options * @param logOptions http log options. * @param configuration configuration settings. * @param retryPolicy retry policy * @param retryOptions retry options * @param credential credentials. * @param perCallPolicies per call policies. * @param perRetryPolicies per retry policies. * @param httpClient http client * @param endpoint endpoint to be called * @param serviceVersion the service api version being targeted by the client. * @return returns the httpPipeline to be consumed by the builders. */ public static HttpPipeline buildHttpPipeline( ClientOptions clientOptions, HttpLogOptions logOptions, Configuration configuration, RetryPolicy retryPolicy, RetryOptions retryOptions, TokenCredential credential, ContainerRegistryAudience audience, List<HttpPipelinePolicy> perCallPolicies, List<HttpPipelinePolicy> perRetryPolicies, HttpClient httpClient, String endpoint, ContainerRegistryServiceVersion serviceVersion, ClientLogger logger) { ArrayList<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add( new UserAgentPolicy(CoreUtils.getApplicationId(clientOptions, logOptions), CLIENT_NAME, CLIENT_VERSION, configuration)); policies.add(new RequestIdPolicy()); policies.addAll(perCallPolicies); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(ClientBuilderUtil.validateAndGetRetryPolicy(retryPolicy, retryOptions)); policies.add(new CookiePolicy()); policies.add(new AddDatePolicy()); policies.addAll(perRetryPolicies); HttpPolicyProviders.addAfterRetryPolicies(policies); HttpLoggingPolicy loggingPolicy = new HttpLoggingPolicy(logOptions); if (credential == null) { logger.verbose("Credentials are null, enabling anonymous access"); } ArrayList<HttpPipelinePolicy> credentialPolicies = clone(policies); credentialPolicies.add(loggingPolicy); if (audience == null) { audience = ContainerRegistryAudience.AZURE_RESOURCE_MANAGER_PUBLIC_CLOUD; } ContainerRegistryTokenService tokenService = new ContainerRegistryTokenService( credential, audience, endpoint, serviceVersion, new HttpPipelineBuilder() .policies(credentialPolicies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(), JacksonAdapter.createDefaultSerializerAdapter()); ContainerRegistryCredentialsPolicy credentialsPolicy = new ContainerRegistryCredentialsPolicy(tokenService); policies.add(credentialsPolicy); policies.add(loggingPolicy); HttpPipeline httpPipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); return httpPipeline; } private static ArrayList<HttpPipelinePolicy> clone(ArrayList<HttpPipelinePolicy> policies) { ArrayList<HttpPipelinePolicy> clonedPolicy = new ArrayList<>(); for (HttpPipelinePolicy policy:policies) { clonedPolicy.add(policy); } return clonedPolicy; } /** * This method computes the digest for the buffer content. * Docker digest is a SHA256 hash of the docker image content and is deterministic based on the image build. * @param buffer The buffer containing the image bytes. * @return SHA-256 digest for the given buffer. */ private static final char[] HEX_ARRAY = "0123456789abcdef".toCharArray(); private static String byteArrayToHex(byte[] bytes) { char[] hexChars = new char[bytes.length * 2]; for (int j = 0; j < bytes.length; j++) { int v = bytes[j] & 0xFF; hexChars[j * 2] = HEX_ARRAY[v >>> 4]; hexChars[j * 2 + 1] = HEX_ARRAY[v & 0x0F]; } return new String(hexChars); } /** * Delete operation should be idempotent. * And so should result in a success in case the service response is 400 : Not found. * @param responseT The response object. * @param <T> The encapsulating value. * @return The transformed response object. */ public static <T> Mono<Response<Void>> deleteResponseToSuccess(Response<T> responseT) { if (responseT.getStatusCode() != HTTP_STATUS_CODE_NOT_FOUND) { return getAcceptedDeleteResponse(responseT, responseT.getStatusCode()); } return getAcceptedDeleteResponse(responseT, HTTP_STATUS_CODE_ACCEPTED); } static <T> Mono<Response<Void>> getAcceptedDeleteResponse(Response<T> responseT, int statusCode) { return Mono.just(new ResponseBase<String, Void>( responseT.getRequest(), statusCode, responseT.getHeaders(), null, null)); } /** * This method converts the API response codes into well known exceptions. * @param exception The exception returned by the rest client. * @return The exception returned by the public methods. */ public static Throwable mapException(Throwable exception) { AcrErrorsException acrException = null; if (exception instanceof AcrErrorsException) { acrException = ((AcrErrorsException) exception); } else if (exception instanceof RuntimeException) { RuntimeException runtimeException = (RuntimeException) exception; Throwable throwable = runtimeException.getCause(); if (throwable instanceof AcrErrorsException) { acrException = (AcrErrorsException) throwable; } } if (acrException == null) { return exception; } final HttpResponse errorHttpResponse = acrException.getResponse(); final int statusCode = errorHttpResponse.getStatusCode(); final String errorDetail = acrException.getMessage(); switch (statusCode) { case 401: return new ClientAuthenticationException(errorDetail, acrException.getResponse(), exception); case 404: return new ResourceNotFoundException(errorDetail, acrException.getResponse(), exception); case 409: return new ResourceExistsException(errorDetail, acrException.getResponse(), exception); case 412: return new ResourceModifiedException(errorDetail, acrException.getResponse(), exception); default: return new HttpResponseException(errorDetail, acrException.getResponse(), exception); } } /** * This method parses the response to get the continuation token used to make the next pagination call. * The continuation token is returned by the service in the form of a header and not as a nextLink field. * @param listResponse response that is parsed. * @param <T> the model type that is being operated on. * @return paged response with the correct continuation token. */ public static <T> PagedResponse<T> getPagedResponseWithContinuationToken(PagedResponse<T> listResponse) { return getPagedResponseWithContinuationToken(listResponse, values -> values); } /** * This method parses the response to get the continuation token used to make the next pagination call. * The continuation token is returned by the service in the form of a header and not as a nextLink field. * * <p> * Per the Docker v2 HTTP API spec, the Link header is an RFC5988 * compliant rel='next' with URL to next result set, if available. * See: https: * * The URI reference can be obtained from link-value as follows: * Link = "Link" ":" * link-value = "<" URI-Reference ">" * (";" link-param ) * See: https: * </p> * @param listResponse response that is parsed. * @param mapperFunction the function that maps the rest api response into the public model exposed by the client. * @param <T> The model type returned by the rest client. * @param <R> The model type returned by the public client. * @return paged response with the correct continuation token. */ public static <T, R> PagedResponse<T> getPagedResponseWithContinuationToken(PagedResponse<R> listResponse, Function<List<R>, List<T>> mapperFunction) { Objects.requireNonNull(mapperFunction); String continuationLink = null; HttpHeaders headers = listResponse.getHeaders(); if (headers != null) { String continuationLinkHeader = headers.getValue(CONTINUATION_LINK_HEADER_NAME); if (!CoreUtils.isNullOrEmpty(continuationLinkHeader)) { Matcher matcher = CONTINUATION_LINK_PATTERN.matcher(continuationLinkHeader); if (matcher.matches()) { if (matcher.groupCount() == 1) { continuationLink = matcher.group(1); } } } } List<T> values = mapperFunction.apply(listResponse.getValue()); return new PagedResponseBase<String, T>( listResponse.getRequest(), listResponse.getStatusCode(), listResponse.getHeaders(), values, continuationLink, null ); } }
class UtilsImpl { private static final String CLIENT_NAME; private static final String CLIENT_VERSION; private static final int HTTP_STATUS_CODE_NOT_FOUND; private static final int HTTP_STATUS_CODE_ACCEPTED; private static final String CONTINUATION_LINK_HEADER_NAME; private static final Pattern CONTINUATION_LINK_PATTERN; public static final String OCI_MANIFEST_MEDIA_TYPE; public static final String DOCKER_DIGEST_HEADER_NAME; public static final String CONTAINER_REGISTRY_TRACING_NAMESPACE_VALUE; private static final ClientLogger LOGGER; static { LOGGER = new ClientLogger(UtilsImpl.class); Map<String, String> properties = CoreUtils.getProperties("azure-containers-containerregistry.properties"); CLIENT_NAME = properties.getOrDefault("name", "UnknownName"); CLIENT_VERSION = properties.getOrDefault("version", "UnknownVersion"); HTTP_STATUS_CODE_NOT_FOUND = 404; HTTP_STATUS_CODE_ACCEPTED = 202; OCI_MANIFEST_MEDIA_TYPE = "application/vnd.oci.image.manifest.v1+json"; DOCKER_DIGEST_HEADER_NAME = "Docker-Content-Digest"; CONTINUATION_LINK_HEADER_NAME = "Link"; CONTINUATION_LINK_PATTERN = Pattern.compile("<(.+)>;.*"); CONTAINER_REGISTRY_TRACING_NAMESPACE_VALUE = "Microsoft.ContainerRegistry"; } private UtilsImpl() { } /** * This method builds the httpPipeline for the builders. * @param clientOptions The client options * @param logOptions http log options. * @param configuration configuration settings. * @param retryPolicy retry policy * @param retryOptions retry options * @param credential credentials. * @param perCallPolicies per call policies. * @param perRetryPolicies per retry policies. * @param httpClient http client * @param endpoint endpoint to be called * @param serviceVersion the service api version being targeted by the client. * @return returns the httpPipeline to be consumed by the builders. */ public static HttpPipeline buildHttpPipeline( ClientOptions clientOptions, HttpLogOptions logOptions, Configuration configuration, RetryPolicy retryPolicy, RetryOptions retryOptions, TokenCredential credential, ContainerRegistryAudience audience, List<HttpPipelinePolicy> perCallPolicies, List<HttpPipelinePolicy> perRetryPolicies, HttpClient httpClient, String endpoint, ContainerRegistryServiceVersion serviceVersion, ClientLogger logger) { ArrayList<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add( new UserAgentPolicy(CoreUtils.getApplicationId(clientOptions, logOptions), CLIENT_NAME, CLIENT_VERSION, configuration)); policies.add(new RequestIdPolicy()); policies.addAll(perCallPolicies); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(ClientBuilderUtil.validateAndGetRetryPolicy(retryPolicy, retryOptions)); policies.add(new CookiePolicy()); policies.add(new AddDatePolicy()); policies.addAll(perRetryPolicies); HttpPolicyProviders.addAfterRetryPolicies(policies); HttpLoggingPolicy loggingPolicy = new HttpLoggingPolicy(logOptions); if (credential == null) { logger.verbose("Credentials are null, enabling anonymous access"); } ArrayList<HttpPipelinePolicy> credentialPolicies = clone(policies); credentialPolicies.add(loggingPolicy); if (audience == null) { audience = ContainerRegistryAudience.AZURE_RESOURCE_MANAGER_PUBLIC_CLOUD; } ContainerRegistryTokenService tokenService = new ContainerRegistryTokenService( credential, audience, endpoint, serviceVersion, new HttpPipelineBuilder() .policies(credentialPolicies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(), JacksonAdapter.createDefaultSerializerAdapter()); ContainerRegistryCredentialsPolicy credentialsPolicy = new ContainerRegistryCredentialsPolicy(tokenService); policies.add(credentialsPolicy); policies.add(loggingPolicy); HttpPipeline httpPipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); return httpPipeline; } private static ArrayList<HttpPipelinePolicy> clone(ArrayList<HttpPipelinePolicy> policies) { ArrayList<HttpPipelinePolicy> clonedPolicy = new ArrayList<>(); for (HttpPipelinePolicy policy:policies) { clonedPolicy.add(policy); } return clonedPolicy; } /** * This method computes the digest for the buffer content. * Docker digest is a SHA256 hash of the docker image content and is deterministic based on the image build. * @param buffer The buffer containing the image bytes. * @return SHA-256 digest for the given buffer. */ private static final char[] HEX_ARRAY = "0123456789abcdef".toCharArray(); private static String byteArrayToHex(byte[] bytes) { char[] hexChars = new char[bytes.length * 2]; for (int j = 0; j < bytes.length; j++) { int v = bytes[j] & 0xFF; hexChars[j * 2] = HEX_ARRAY[v >>> 4]; hexChars[j * 2 + 1] = HEX_ARRAY[v & 0x0F]; } return new String(hexChars); } /** * Delete operation should be idempotent. * And so should result in a success in case the service response is 400 : Not found. * @param responseT The response object. * @param <T> The encapsulating value. * @return The transformed response object. */ public static <T> Mono<Response<Void>> deleteResponseToSuccess(Response<T> responseT) { if (responseT.getStatusCode() != HTTP_STATUS_CODE_NOT_FOUND) { return getAcceptedDeleteResponse(responseT, responseT.getStatusCode()); } return getAcceptedDeleteResponse(responseT, HTTP_STATUS_CODE_ACCEPTED); } static <T> Mono<Response<Void>> getAcceptedDeleteResponse(Response<T> responseT, int statusCode) { return Mono.just(new SimpleResponse<Void>( responseT.getRequest(), statusCode, responseT.getHeaders(), null)); } /** * This method converts the API response codes into well known exceptions. * @param exception The exception returned by the rest client. * @return The exception returned by the public methods. */ public static Throwable mapException(Throwable exception) { AcrErrorsException acrException = null; if (exception instanceof AcrErrorsException) { acrException = ((AcrErrorsException) exception); } else if (exception instanceof RuntimeException) { RuntimeException runtimeException = (RuntimeException) exception; Throwable throwable = runtimeException.getCause(); if (throwable instanceof AcrErrorsException) { acrException = (AcrErrorsException) throwable; } } if (acrException == null) { return exception; } final HttpResponse errorHttpResponse = acrException.getResponse(); final int statusCode = errorHttpResponse.getStatusCode(); final String errorDetail = acrException.getMessage(); switch (statusCode) { case 401: return new ClientAuthenticationException(errorDetail, acrException.getResponse(), exception); case 404: return new ResourceNotFoundException(errorDetail, acrException.getResponse(), exception); case 409: return new ResourceExistsException(errorDetail, acrException.getResponse(), exception); case 412: return new ResourceModifiedException(errorDetail, acrException.getResponse(), exception); default: return new HttpResponseException(errorDetail, acrException.getResponse(), exception); } } /** * This method parses the response to get the continuation token used to make the next pagination call. * The continuation token is returned by the service in the form of a header and not as a nextLink field. * @param listResponse response that is parsed. * @param <T> the model type that is being operated on. * @return paged response with the correct continuation token. */ public static <T> PagedResponse<T> getPagedResponseWithContinuationToken(PagedResponse<T> listResponse) { return getPagedResponseWithContinuationToken(listResponse, values -> values); } /** * This method parses the response to get the continuation token used to make the next pagination call. * The continuation token is returned by the service in the form of a header and not as a nextLink field. * * <p> * Per the Docker v2 HTTP API spec, the Link header is an RFC5988 * compliant rel='next' with URL to next result set, if available. * See: https: * * The URI reference can be obtained from link-value as follows: * Link = "Link" ":" * link-value = "<" URI-Reference ">" * (";" link-param ) * See: https: * </p> * @param listResponse response that is parsed. * @param mapperFunction the function that maps the rest api response into the public model exposed by the client. * @param <T> The model type returned by the rest client. * @param <R> The model type returned by the public client. * @return paged response with the correct continuation token. */ public static <T, R> PagedResponse<T> getPagedResponseWithContinuationToken(PagedResponse<R> listResponse, Function<List<R>, List<T>> mapperFunction) { Objects.requireNonNull(mapperFunction); String continuationLink = null; HttpHeaders headers = listResponse.getHeaders(); if (headers != null) { String continuationLinkHeader = headers.getValue(CONTINUATION_LINK_HEADER_NAME); if (!CoreUtils.isNullOrEmpty(continuationLinkHeader)) { Matcher matcher = CONTINUATION_LINK_PATTERN.matcher(continuationLinkHeader); if (matcher.matches()) { if (matcher.groupCount() == 1) { continuationLink = matcher.group(1); } } } } List<T> values = mapperFunction.apply(listResponse.getValue()); return new PagedResponseBase<String, T>( listResponse.getRequest(), listResponse.getStatusCode(), listResponse.getHeaders(), values, continuationLink, null ); } }
Maybe update the private constructor to take values so that this can be a one-liner and so the fields can be final #Resolved
public static DownloadManifestOptions fromTag(String tag) { DownloadManifestOptions options = new DownloadManifestOptions(); options.tag = tag; return options; }
DownloadManifestOptions options = new DownloadManifestOptions();
public static DownloadManifestOptions fromTag(String tag) { Objects.requireNonNull(tag, "tag can't be null"); return new DownloadManifestOptions(tag, null); }
class with tag. * @param tag The tag associated with the manifest. * @return The DownloadManifestOptions object. */
class with tag. * @param tag The tag associated with the manifest. * @return The DownloadManifestOptions object. */