comment
stringlengths
1
45k
method_body
stringlengths
23
281k
target_code
stringlengths
0
5.16k
method_body_after
stringlengths
12
281k
context_before
stringlengths
8
543k
context_after
stringlengths
8
543k
will update in a separate PR.
public void deleteKey() { }
public void deleteKey() { deleteKeyRunner((keyToDelete) -> { StepVerifier.create(client.createKey(keyToDelete)) .assertNext(keyResponse -> assertKeyEquals(keyToDelete, keyResponse)).verifyComplete(); Poller<DeletedKey, Void> poller = client.beginDeleteKey(keyToDelete.getName()); poller.blockUntil(PollResponse.OperationStatus.SUCCESSFULLY_COMPLETED); DeletedKey deletedKeyResponse = poller.getLastPollResponse().getValue(); assertNotNull(deletedKeyResponse.getDeletedOn()); assertNotNull(deletedKeyResponse.getRecoveryId()); assertNotNull(deletedKeyResponse.getScheduledPurgeDate()); assertEquals(keyToDelete.getName(), deletedKeyResponse.getName()); StepVerifier.create(client.purgeDeletedKeyWithResponse(keyToDelete.getName())) .assertNext(voidResponse -> { assertEquals(HttpURLConnection.HTTP_NO_CONTENT, voidResponse.getStatusCode()); }).verifyComplete(); sleepInRecordMode(15000); }); }
class KeyAsyncClientTest extends KeyClientTestBase { private KeyAsyncClient client; @Override protected void beforeTest() { beforeTestSetup(); if (interceptorManager.isPlaybackMode()) { client = clientSetup(pipeline -> new KeyClientBuilder() .vaultEndpoint(getEndpoint()) .pipeline(pipeline) .buildAsyncClient()); } else { client = clientSetup(pipeline -> new KeyClientBuilder() .pipeline(pipeline) .vaultEndpoint(getEndpoint()) .buildAsyncClient()); } } /** * Tests that a key can be created in the key vault. */ public void setKey() { setKeyRunner((expected) -> StepVerifier.create(client.createKey(expected)) .assertNext(response -> assertKeyEquals(expected, response)) .verifyComplete()); } /** * Tests that we cannot create a key when the key is an empty string. */ public void setKeyEmptyName() { StepVerifier.create(client.createKey("", KeyType.RSA)) .verifyErrorSatisfies(ex -> assertRestException(ex, ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST)); } /** * Tests that we can create keys when value is not null or an empty string. */ public void setKeyNullType() { setKeyEmptyValueRunner((key) -> { StepVerifier.create(client.createKey(key)) .verifyErrorSatisfies(ex -> assertRestException(ex, ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST)); }); } /** * Verifies that an exception is thrown when null key object is passed for creation. */ public void setKeyNull() { StepVerifier.create(client.createKey(null)) .verifyError(NullPointerException.class); } /** * Tests that a key is able to be updated when it exists. */ public void updateKey() { updateKeyRunner((original, updated) -> { StepVerifier.create(client.createKey(original)) .assertNext(response -> assertKeyEquals(original, response)) .verifyComplete(); KeyVaultKey keyToUpdate = client.getKey(original.getName()).block(); StepVerifier.create(client.updateKeyProperties(keyToUpdate.getProperties().setExpiresOn(updated.getExpiresOn()))) .assertNext(response -> { assertNotNull(response); assertEquals(original.getName(), response.getName()); }).verifyComplete(); StepVerifier.create(client.getKey(original.getName())) .assertNext(updatedKeyResponse -> assertKeyEquals(updated, updatedKeyResponse)) .verifyComplete(); }); } /** * Tests that a key is not able to be updated when it is disabled. 403 error is expected. */ public void updateDisabledKey() { updateDisabledKeyRunner((original, updated) -> { StepVerifier.create(client.createKey(original)) .assertNext(response -> assertKeyEquals(original, response)) .verifyComplete(); KeyVaultKey keyToUpdate = client.getKey(original.getName()).block(); StepVerifier.create(client.updateKeyProperties(keyToUpdate.getProperties().setExpiresOn(updated.getExpiresOn()))) .assertNext(response -> { assertNotNull(response); assertEquals(original.getName(), response.getName()); }).verifyComplete(); StepVerifier.create(client.getKey(original.getName())) .assertNext(updatedKeyResponse -> assertKeyEquals(updated, updatedKeyResponse)) .verifyComplete(); }); } /** * Tests that an existing key can be retrieved. */ public void getKey() { getKeyRunner((original) -> { client.createKey(original); StepVerifier.create(client.getKey(original.getName())) .assertNext(response -> assertKeyEquals(original, response)) .verifyComplete(); }); } /** * Tests that a specific version of the key can be retrieved. */ public void getKeySpecificVersion() { getKeySpecificVersionRunner((key, keyWithNewVal) -> { final KeyVaultKey keyVersionOne = client.createKey(key).block(); final KeyVaultKey keyVersionTwo = client.createKey(keyWithNewVal).block(); StepVerifier.create(client.getKey(key.getName(), keyVersionOne.getProperties().getVersion())) .assertNext(response -> assertKeyEquals(key, response)) .verifyComplete(); StepVerifier.create(client.getKey(keyWithNewVal.getName(), keyVersionTwo.getProperties().getVersion())) .assertNext(response -> assertKeyEquals(keyWithNewVal, response)) .verifyComplete(); }); } /** * Tests that an attempt to get a non-existing key throws an error. */ public void getKeyNotFound() { StepVerifier.create(client.getKey("non-existing")) .verifyErrorSatisfies(ex -> assertRestException(ex, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND)); } /** * Tests that an existing key can be deleted. */ public void deleteKeyNotFound() { } /** * Tests that an attempt to retrieve a non existing deleted key throws an error on a soft-delete enabled vault. */ public void getDeletedKeyNotFound() { StepVerifier.create(client.getDeletedKey("non-existing")) .verifyErrorSatisfies(ex -> assertRestException(ex, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND)); } /** * Tests that a deleted key can be recovered on a soft-delete enabled vault. */ public void recoverDeletedKey() { } /** * Tests that an attempt to recover a non existing deleted key throws an error on a soft-delete enabled vault. */ public void recoverDeletedKeyNotFound() { } /** * Tests that a key can be backed up in the key vault. */ public void backupKey() { backupKeyRunner((keyToBackup) -> { StepVerifier.create(client.createKey(keyToBackup)) .assertNext(keyResponse -> assertKeyEquals(keyToBackup, keyResponse)).verifyComplete(); StepVerifier.create(client.backupKey(keyToBackup.getName())) .assertNext(response -> { assertNotNull(response); assertTrue(response.length > 0); }).verifyComplete(); }); } /** * Tests that an attempt to backup a non existing key throws an error. */ public void backupKeyNotFound() { StepVerifier.create(client.backupKey("non-existing")) .verifyErrorSatisfies(ex -> assertRestException(ex, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND)); } /** * Tests that a key can be backed up in the key vault. */ public void restoreKey() { } /** * Tests that an attempt to restore a key from malformed backup bytes throws an error. */ public void restoreKeyFromMalformedBackup() { byte[] keyBackupBytes = "non-existing".getBytes(); StepVerifier.create(client.restoreKeyBackup(keyBackupBytes)) .verifyErrorSatisfies(ex -> assertRestException(ex, ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST)); } /** * Tests that a deleted key can be retrieved on a soft-delete enabled vault. */ public void getDeletedKey() { } /** * Tests that deleted keys can be listed in the key vault. */ @Override public void listDeletedKeys() { } /** * Tests that key versions can be listed in the key vault. */ @Override public void listKeyVersions() { } /** * Tests that keys can be listed in the key vault. */ public void listKeys() { listKeysRunner((keys) -> { List<KeyProperties> output = new ArrayList<>(); for (CreateKeyOptions key : keys.values()) { client.createKey(key).subscribe(keyResponse -> assertKeyEquals(key, keyResponse)); sleepInRecordMode(1000); } sleepInRecordMode(30000); client.listPropertiesOfKeys().subscribe(output::add); sleepInRecordMode(30000); for (KeyProperties actualKey : output) { if (keys.containsKey(actualKey.getName())) { CreateKeyOptions expectedKey = keys.get(actualKey.getName()); assertEquals(expectedKey.getExpiresOn(), actualKey.getExpiresOn()); assertEquals(expectedKey.getNotBefore(), actualKey.getNotBefore()); keys.remove(actualKey.getName()); } } assertEquals(0, keys.size()); }); } private void pollOnKeyDeletion(String keyName) { int pendingPollCount = 0; while (pendingPollCount < 30) { DeletedKey deletedKey = null; try { deletedKey = client.getDeletedKeyWithResponse(keyName).block().getValue(); } catch (ResourceNotFoundException e) { } if (deletedKey == null) { sleepInRecordMode(2000); pendingPollCount += 1; } else { return; } } System.err.printf("Deleted Key %s not found \n", keyName); } private void pollOnKeyPurge(String keyName) { int pendingPollCount = 0; while (pendingPollCount < 10) { DeletedKey deletedKey = null; try { deletedKey = client.getDeletedKeyWithResponse(keyName).block().getValue(); } catch (ResourceNotFoundException e) { } if (deletedKey != null) { sleepInRecordMode(2000); pendingPollCount += 1; } else { return; } } System.err.printf("Deleted Key %s was not purged \n", keyName); } }
class KeyAsyncClientTest extends KeyClientTestBase { private KeyAsyncClient client; @Override protected void beforeTest() { beforeTestSetup(); if (interceptorManager.isPlaybackMode()) { client = clientSetup(pipeline -> new KeyClientBuilder() .vaultUrl(getEndpoint()) .pipeline(pipeline) .buildAsyncClient()); } else { client = clientSetup(pipeline -> new KeyClientBuilder() .pipeline(pipeline) .vaultUrl(getEndpoint()) .buildAsyncClient()); } } /** * Tests that a key can be created in the key vault. */ public void setKey() { setKeyRunner((expected) -> StepVerifier.create(client.createKey(expected)) .assertNext(response -> assertKeyEquals(expected, response)) .verifyComplete()); } /** * Tests that we cannot create a key when the key is an empty string. */ public void setKeyEmptyName() { StepVerifier.create(client.createKey("", KeyType.RSA)) .verifyErrorSatisfies(ex -> assertRestException(ex, ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST)); } /** * Tests that we can create keys when value is not null or an empty string. */ public void setKeyNullType() { setKeyEmptyValueRunner((key) -> { StepVerifier.create(client.createKey(key)) .verifyErrorSatisfies(ex -> assertRestException(ex, ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST)); }); } /** * Verifies that an exception is thrown when null key object is passed for creation. */ public void setKeyNull() { StepVerifier.create(client.createKey(null)) .verifyError(NullPointerException.class); } /** * Tests that a key is able to be updated when it exists. */ public void updateKey() { updateKeyRunner((original, updated) -> { StepVerifier.create(client.createKey(original)) .assertNext(response -> assertKeyEquals(original, response)) .verifyComplete(); KeyVaultKey keyToUpdate = client.getKey(original.getName()).block(); StepVerifier.create(client.updateKeyProperties(keyToUpdate.getProperties().setExpiresOn(updated.getExpiresOn()))) .assertNext(response -> { assertNotNull(response); assertEquals(original.getName(), response.getName()); }).verifyComplete(); StepVerifier.create(client.getKey(original.getName())) .assertNext(updatedKeyResponse -> assertKeyEquals(updated, updatedKeyResponse)) .verifyComplete(); }); } /** * Tests that a key is not able to be updated when it is disabled. 403 error is expected. */ public void updateDisabledKey() { updateDisabledKeyRunner((original, updated) -> { StepVerifier.create(client.createKey(original)) .assertNext(response -> assertKeyEquals(original, response)) .verifyComplete(); KeyVaultKey keyToUpdate = client.getKey(original.getName()).block(); StepVerifier.create(client.updateKeyProperties(keyToUpdate.getProperties().setExpiresOn(updated.getExpiresOn()))) .assertNext(response -> { assertNotNull(response); assertEquals(original.getName(), response.getName()); }).verifyComplete(); StepVerifier.create(client.getKey(original.getName())) .assertNext(updatedKeyResponse -> assertKeyEquals(updated, updatedKeyResponse)) .verifyComplete(); }); } /** * Tests that an existing key can be retrieved. */ public void getKey() { getKeyRunner((original) -> { client.createKey(original); StepVerifier.create(client.getKey(original.getName())) .assertNext(response -> assertKeyEquals(original, response)) .verifyComplete(); }); } /** * Tests that a specific version of the key can be retrieved. */ public void getKeySpecificVersion() { getKeySpecificVersionRunner((key, keyWithNewVal) -> { final KeyVaultKey keyVersionOne = client.createKey(key).block(); final KeyVaultKey keyVersionTwo = client.createKey(keyWithNewVal).block(); StepVerifier.create(client.getKey(key.getName(), keyVersionOne.getProperties().getVersion())) .assertNext(response -> assertKeyEquals(key, response)) .verifyComplete(); StepVerifier.create(client.getKey(keyWithNewVal.getName(), keyVersionTwo.getProperties().getVersion())) .assertNext(response -> assertKeyEquals(keyWithNewVal, response)) .verifyComplete(); }); } /** * Tests that an attempt to get a non-existing key throws an error. */ public void getKeyNotFound() { StepVerifier.create(client.getKey("non-existing")) .verifyErrorSatisfies(ex -> assertRestException(ex, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND)); } /** * Tests that an existing key can be deleted. */ public void deleteKeyNotFound() { Poller<DeletedKey, Void> deletedKeyPoller = client.beginDeleteKey("non-existing"); while (!deletedKeyPoller.isComplete()) { sleepInRecordMode(1000); } assertEquals(deletedKeyPoller.getLastPollResponse().getStatus(), PollResponse.OperationStatus.FAILED); } /** * Tests that an attempt to retrieve a non existing deleted key throws an error on a soft-delete enabled vault. */ public void getDeletedKeyNotFound() { StepVerifier.create(client.getDeletedKey("non-existing")) .verifyErrorSatisfies(ex -> assertRestException(ex, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND)); } /** * Tests that a deleted key can be recovered on a soft-delete enabled vault. */ public void recoverDeletedKey() { recoverDeletedKeyRunner((keyToDeleteAndRecover) -> { StepVerifier.create(client.createKey(keyToDeleteAndRecover)) .assertNext(keyResponse -> assertKeyEquals(keyToDeleteAndRecover, keyResponse)).verifyComplete(); Poller<DeletedKey, Void> poller = client.beginDeleteKey(keyToDeleteAndRecover.getName()); poller.blockUntil(PollResponse.OperationStatus.SUCCESSFULLY_COMPLETED); assertNotNull(poller.getLastPollResponse().getValue()); Poller<KeyVaultKey, Void> recoverPoller = client.beginRecoverDeletedKey(keyToDeleteAndRecover.getName()); recoverPoller.blockUntil(PollResponse.OperationStatus.SUCCESSFULLY_COMPLETED); KeyVaultKey keyResponse = recoverPoller.getLastPollResponse().getValue(); assertEquals(keyToDeleteAndRecover.getName(), keyResponse.getName()); assertEquals(keyToDeleteAndRecover.getNotBefore(), keyResponse.getProperties().getNotBefore()); assertEquals(keyToDeleteAndRecover.getExpiresOn(), keyResponse.getProperties().getExpiresOn()); }); } /** * Tests that an attempt to recover a non existing deleted key throws an error on a soft-delete enabled vault. */ public void recoverDeletedKeyNotFound() { Poller<KeyVaultKey, Void> poller = client.beginRecoverDeletedKey("non-existing"); while (!poller.isComplete()) { sleepInRecordMode(1000); } assertEquals(poller.getStatus(), PollResponse.OperationStatus.FAILED); } /** * Tests that a key can be backed up in the key vault. */ public void backupKey() { Poller<KeyVaultKey, Void> poller = client.beginRecoverDeletedKey("non-existing"); while (!poller.isComplete()) { sleepInRecordMode(1000); } assertEquals(poller.getStatus(), PollResponse.OperationStatus.FAILED); } /** * Tests that an attempt to backup a non existing key throws an error. */ public void backupKeyNotFound() { StepVerifier.create(client.backupKey("non-existing")) .verifyErrorSatisfies(ex -> assertRestException(ex, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND)); } /** * Tests that a key can be backed up in the key vault. */ public void restoreKey() { restoreKeyRunner((keyToBackupAndRestore) -> { StepVerifier.create(client.createKey(keyToBackupAndRestore)) .assertNext(keyResponse -> assertKeyEquals(keyToBackupAndRestore, keyResponse)).verifyComplete(); byte[] backup = client.backupKey(keyToBackupAndRestore.getName()).block(); Poller<DeletedKey, Void> poller = client.beginDeleteKey(keyToBackupAndRestore.getName()); poller.blockUntil(PollResponse.OperationStatus.SUCCESSFULLY_COMPLETED); assertNotNull(poller.getLastPollResponse().getValue()); StepVerifier.create(client.purgeDeletedKeyWithResponse(keyToBackupAndRestore.getName())) .assertNext(voidResponse -> { assertEquals(HttpURLConnection.HTTP_NO_CONTENT, voidResponse.getStatusCode()); }).verifyComplete(); pollOnKeyPurge(keyToBackupAndRestore.getName()); sleepInRecordMode(60000); StepVerifier.create(client.restoreKeyBackup(backup)) .assertNext(response -> { assertEquals(keyToBackupAndRestore.getName(), response.getName()); assertEquals(keyToBackupAndRestore.getNotBefore(), response.getProperties().getNotBefore()); assertEquals(keyToBackupAndRestore.getExpiresOn(), response.getProperties().getExpiresOn()); }).verifyComplete(); }); } /** * Tests that an attempt to restore a key from malformed backup bytes throws an error. */ public void restoreKeyFromMalformedBackup() { byte[] keyBackupBytes = "non-existing".getBytes(); StepVerifier.create(client.restoreKeyBackup(keyBackupBytes)) .verifyErrorSatisfies(ex -> assertRestException(ex, ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST)); } /** * Tests that a deleted key can be retrieved on a soft-delete enabled vault. */ public void getDeletedKey() { getDeletedKeyRunner((keyToDeleteAndGet) -> { StepVerifier.create(client.createKey(keyToDeleteAndGet)) .assertNext(keyResponse -> assertKeyEquals(keyToDeleteAndGet, keyResponse)).verifyComplete(); Poller<DeletedKey, Void> poller = client.beginDeleteKey(keyToDeleteAndGet.getName()); poller.blockUntil(PollResponse.OperationStatus.SUCCESSFULLY_COMPLETED); assertNotNull(poller.getLastPollResponse().getValue()); StepVerifier.create(client.getDeletedKey(keyToDeleteAndGet.getName())) .assertNext(deletedKeyResponse -> { assertNotNull(deletedKeyResponse.getDeletedOn()); assertNotNull(deletedKeyResponse.getRecoveryId()); assertNotNull(deletedKeyResponse.getScheduledPurgeDate()); assertEquals(keyToDeleteAndGet.getName(), deletedKeyResponse.getName()); }).verifyComplete(); StepVerifier.create(client.purgeDeletedKeyWithResponse(keyToDeleteAndGet.getName())) .assertNext(voidResponse -> { assertEquals(HttpURLConnection.HTTP_NO_CONTENT, voidResponse.getStatusCode()); }).verifyComplete(); pollOnKeyPurge(keyToDeleteAndGet.getName()); sleepInRecordMode(15000); }); } /** * Tests that deleted keys can be listed in the key vault. */ @Override public void listDeletedKeys() { listDeletedKeysRunner((keys) -> { List<DeletedKey> deletedKeys = new ArrayList<>(); for (CreateKeyOptions key : keys.values()) { StepVerifier.create(client.createKey(key)) .assertNext(keyResponse -> assertKeyEquals(key, keyResponse)).verifyComplete(); } sleepInRecordMode(10000); for (CreateKeyOptions key : keys.values()) { Poller<DeletedKey, Void> poller = client.beginDeleteKey(key.getName()); poller.blockUntil(PollResponse.OperationStatus.SUCCESSFULLY_COMPLETED); assertNotNull(poller.getLastPollResponse().getValue()); } sleepInRecordMode(60000); client.listDeletedKeys().subscribe(deletedKeys::add); sleepInRecordMode(30000); for (DeletedKey actualKey : deletedKeys) { if (keys.containsKey(actualKey.getName())) { assertNotNull(actualKey.getDeletedOn()); assertNotNull(actualKey.getRecoveryId()); keys.remove(actualKey.getName()); } } assertEquals(0, keys.size()); for (DeletedKey deletedKey : deletedKeys) { StepVerifier.create(client.purgeDeletedKeyWithResponse(deletedKey.getName())) .assertNext(voidResponse -> { assertEquals(HttpURLConnection.HTTP_NO_CONTENT, voidResponse.getStatusCode()); }).verifyComplete(); pollOnKeyPurge(deletedKey.getName()); } }); } /** * Tests that key versions can be listed in the key vault. */ @Override public void listKeyVersions() { listKeyVersionsRunner((keys) -> { List<KeyProperties> output = new ArrayList<>(); String keyName = null; for (CreateKeyOptions key : keys) { keyName = key.getName(); client.createKey(key).subscribe(keyResponse -> assertKeyEquals(key, keyResponse)); sleepInRecordMode(1000); } sleepInRecordMode(30000); client.listPropertiesOfKeyVersions(keyName).subscribe(output::add); sleepInRecordMode(30000); assertEquals(keys.size(), output.size()); Poller<DeletedKey, Void> poller = client.beginDeleteKey(keyName); poller.blockUntil(PollResponse.OperationStatus.SUCCESSFULLY_COMPLETED); assertNotNull(poller.getLastPollResponse().getValue()); StepVerifier.create(client.purgeDeletedKeyWithResponse(keyName)) .assertNext(voidResponse -> { assertEquals(HttpURLConnection.HTTP_NO_CONTENT, voidResponse.getStatusCode()); }).verifyComplete(); pollOnKeyPurge(keyName); }); } /** * Tests that keys can be listed in the key vault. */ public void listKeys() { listKeysRunner((keys) -> { List<KeyProperties> output = new ArrayList<>(); for (CreateKeyOptions key : keys.values()) { client.createKey(key).subscribe(keyResponse -> assertKeyEquals(key, keyResponse)); sleepInRecordMode(1000); } sleepInRecordMode(30000); client.listPropertiesOfKeys().subscribe(output::add); sleepInRecordMode(30000); for (KeyProperties actualKey : output) { if (keys.containsKey(actualKey.getName())) { CreateKeyOptions expectedKey = keys.get(actualKey.getName()); assertEquals(expectedKey.getExpiresOn(), actualKey.getExpiresOn()); assertEquals(expectedKey.getNotBefore(), actualKey.getNotBefore()); keys.remove(actualKey.getName()); } } assertEquals(0, keys.size()); }); } private void pollOnKeyDeletion(String keyName) { int pendingPollCount = 0; while (pendingPollCount < 30) { DeletedKey deletedKey = null; try { deletedKey = client.getDeletedKeyWithResponse(keyName).block().getValue(); } catch (ResourceNotFoundException e) { } if (deletedKey == null) { sleepInRecordMode(2000); pendingPollCount += 1; } else { return; } } System.err.printf("Deleted Key %s not found \n", keyName); } private void pollOnKeyPurge(String keyName) { int pendingPollCount = 0; while (pendingPollCount < 10) { DeletedKey deletedKey = null; try { deletedKey = client.getDeletedKeyWithResponse(keyName).block().getValue(); } catch (ResourceNotFoundException e) { } if (deletedKey != null) { sleepInRecordMode(2000); pendingPollCount += 1; } else { return; } } System.err.printf("Deleted Key %s was not purged \n", keyName); } }
Need to pass the exception object.
public SecretClientBuilder vaultUrl(String vaultUrl) { try { this.vaultUrl = new URL(vaultUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsError(new IllegalArgumentException( "The Azure Key Vault endpoint url is malformed.")); } return this; }
throw logger.logExceptionAsError(new IllegalArgumentException(
public SecretClientBuilder vaultUrl(String vaultUrl) { try { this.vaultUrl = new URL(vaultUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsError(new IllegalArgumentException( "The Azure Key Vault url is malformed.", e)); } return this; }
class SecretClientBuilder { private final ClientLogger logger = new ClientLogger(SecretClientBuilder.class); private final List<HttpPipelinePolicy> policies; private TokenCredential credential; private HttpPipeline pipeline; private URL vaultUrl; private HttpClient httpClient; private HttpLogOptions httpLogOptions; private final RetryPolicy retryPolicy; private Configuration configuration; private SecretServiceVersion version; /** * The constructor with defaults. */ public SecretClientBuilder() { retryPolicy = new RetryPolicy(); httpLogOptions = new HttpLogOptions(); policies = new ArrayList<>(); } /** * Creates a {@link SecretClient} based on options set in the builder. * Every time {@code buildClient()} is called, a new instance of {@link SecretClient} is created. * * <p>If {@link SecretClientBuilder * {@link SecretClientBuilder * {@link SecretClientBuilder client}. All other builder settings are ignored. If {@code pipeline} is not set, * then {@link SecretClientBuilder * {@link SecretClientBuilder * client}.}</p> * * @return A SecretClient with the options set from the builder. * @throws IllegalStateException If {@link SecretClientBuilder * {@link SecretClientBuilder */ public SecretClient buildClient() { return new SecretClient(buildAsyncClient()); } /** * Creates a {@link SecretAsyncClient} based on options set in the builder. * Every time {@code buildAsyncClient()} is called, a new instance of {@link SecretAsyncClient} is created. * * <p>If {@link SecretClientBuilder * {@link SecretClientBuilder * {@link SecretClientBuilder client}. All other builder settings are ignored. If {@code pipeline} is not set, * then {@link SecretClientBuilder * {@link SecretClientBuilder * SecretAsyncClient client}.}</p> * * @return A SecretAsyncClient with the options set from the builder. * @throws IllegalStateException If {@link SecretClientBuilder * {@link SecretClientBuilder */ public SecretAsyncClient buildAsyncClient() { Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration().clone() : configuration; URL buildEndpoint = getBuildEndpoint(buildConfiguration); if (buildEndpoint == null) { throw logger.logExceptionAsError( new IllegalStateException( KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.VAULT_END_POINT_REQUIRED))); } SecretServiceVersion serviceVersion = version != null ? version : SecretServiceVersion.getLatest(); if (pipeline != null) { return new SecretAsyncClient(vaultUrl, pipeline, serviceVersion); } if (credential == null) { throw logger.logExceptionAsError( new IllegalStateException( KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.CREDENTIAL_REQUIRED))); } final List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new UserAgentPolicy(AzureKeyVaultConfiguration.SDK_NAME, AzureKeyVaultConfiguration.SDK_VERSION, buildConfiguration, serviceVersion)); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(retryPolicy); policies.add(new KeyVaultCredentialPolicy(credential)); policies.addAll(this.policies); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); HttpPipeline pipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); return new SecretAsyncClient(vaultUrl, pipeline, serviceVersion); } /** * Sets the vault endpoint url to send HTTP requests to. * * @param vaultUrl The vault endpoint url is used as destination on Azure to send requests to. * @return the updated {@link SecretClientBuilder} object. * @throws IllegalArgumentException if {@code vaultUrl} is null or it cannot be parsed into a valid URL. */ /** * Sets the credential to use when authenticating HTTP requests. * * @param credential The credential to use for authenticating HTTP requests. * @return the updated {@link SecretClientBuilder} object. * @throws NullPointerException if {@code credential} is {@code null}. */ public SecretClientBuilder credential(TokenCredential credential) { Objects.requireNonNull(credential); this.credential = credential; return this; } /** * Sets the logging configuration for HTTP requests and responses. * * <p> If logLevel is not provided, default value of {@link HttpLogDetailLevel * * @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses. * @return the updated {@link SecretClientBuilder} object. */ public SecretClientBuilder httpLogOptions(HttpLogOptions logOptions) { httpLogOptions = logOptions; return this; } /** * Adds a policy to the set of existing policies that are executed after * {@link SecretAsyncClient} or {@link SecretClient} required policies. * * @param policy The {@link HttpPipelinePolicy policy} to be added. * @return the updated {@link SecretClientBuilder} object. * @throws NullPointerException if {@code policy} is {@code null}. */ public SecretClientBuilder addPolicy(HttpPipelinePolicy policy) { Objects.requireNonNull(policy); policies.add(policy); return this; } /** * Sets the HTTP client to use for sending and receiving requests to and from the service. * * @param client The HTTP client to use for requests. * @return the updated {@link SecretClientBuilder} object. * @throws NullPointerException If {@code client} is {@code null}. */ public SecretClientBuilder httpClient(HttpClient client) { Objects.requireNonNull(client); this.httpClient = client; return this; } /** * Sets the HTTP pipeline to use for the service client. * * If {@code pipeline} is set, all other settings are ignored, aside from * {@link SecretClientBuilder * * @param pipeline The HTTP pipeline to use for sending service requests and receiving responses. * @return the updated {@link SecretClientBuilder} object. */ public SecretClientBuilder pipeline(HttpPipeline pipeline) { Objects.requireNonNull(pipeline); this.pipeline = pipeline; return this; } /** * Sets the configuration store that is used during construction of the service client. * * The default configuration store is a clone of the {@link Configuration * configuration store}, use {@link Configuration * * @param configuration The configuration store used to * @return The updated SecretClientBuilder object. */ public SecretClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the {@link SecretServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version the client library will have the result of potentially moving to a newer service version. * * @param version {@link SecretServiceVersion} of the service API used when making requests. * @return The updated SecretClientBuilder object. */ public SecretClientBuilder serviceVersion(SecretServiceVersion version) { this.version = version; return this; } private URL getBuildEndpoint(Configuration configuration) { if (vaultUrl != null) { return vaultUrl; } String configEndpoint = configuration.get("AZURE_KEYVAULT_ENDPOINT"); if (ImplUtils.isNullOrEmpty(configEndpoint)) { return null; } try { return new URL(configEndpoint); } catch (MalformedURLException ex) { return null; } } }
class SecretClientBuilder { private final ClientLogger logger = new ClientLogger(SecretClientBuilder.class); private final List<HttpPipelinePolicy> policies; private TokenCredential credential; private HttpPipeline pipeline; private URL vaultUrl; private HttpClient httpClient; private HttpLogOptions httpLogOptions; private final RetryPolicy retryPolicy; private Configuration configuration; private SecretServiceVersion version; /** * The constructor with defaults. */ public SecretClientBuilder() { retryPolicy = new RetryPolicy(); httpLogOptions = new HttpLogOptions(); policies = new ArrayList<>(); } /** * Creates a {@link SecretClient} based on options set in the builder. * Every time {@code buildClient()} is called, a new instance of {@link SecretClient} is created. * * <p>If {@link SecretClientBuilder * {@link SecretClientBuilder * {@link SecretClientBuilder client}. All other builder settings are ignored. If {@code pipeline} is not set, * then {@link SecretClientBuilder * {@link SecretClientBuilder * client}.}</p> * * @return A SecretClient with the options set from the builder. * @throws IllegalStateException If {@link SecretClientBuilder * {@link SecretClientBuilder */ public SecretClient buildClient() { return new SecretClient(buildAsyncClient()); } /** * Creates a {@link SecretAsyncClient} based on options set in the builder. * Every time {@code buildAsyncClient()} is called, a new instance of {@link SecretAsyncClient} is created. * * <p>If {@link SecretClientBuilder * {@link SecretClientBuilder * {@link SecretClientBuilder client}. All other builder settings are ignored. If {@code pipeline} is not set, * then {@link SecretClientBuilder * {@link SecretClientBuilder * SecretAsyncClient client}.}</p> * * @return A SecretAsyncClient with the options set from the builder. * @throws IllegalStateException If {@link SecretClientBuilder * {@link SecretClientBuilder */ public SecretAsyncClient buildAsyncClient() { Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration().clone() : configuration; URL buildEndpoint = getBuildEndpoint(buildConfiguration); if (buildEndpoint == null) { throw logger.logExceptionAsError( new IllegalStateException( KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.VAULT_END_POINT_REQUIRED))); } SecretServiceVersion serviceVersion = version != null ? version : SecretServiceVersion.getLatest(); if (pipeline != null) { return new SecretAsyncClient(vaultUrl, pipeline, serviceVersion); } if (credential == null) { throw logger.logExceptionAsError( new IllegalStateException( KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.CREDENTIAL_REQUIRED))); } final List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new UserAgentPolicy(AzureKeyVaultConfiguration.SDK_NAME, AzureKeyVaultConfiguration.SDK_VERSION, buildConfiguration, serviceVersion)); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(retryPolicy); policies.add(new KeyVaultCredentialPolicy(credential)); policies.addAll(this.policies); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); HttpPipeline pipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); return new SecretAsyncClient(vaultUrl, pipeline, serviceVersion); } /** * Sets the vault url to send HTTP requests to. * * @param vaultUrl The vault url is used as destination on Azure to send requests to. * @return the updated {@link SecretClientBuilder} object. * @throws IllegalArgumentException if {@code vaultUrl} is null or it cannot be parsed into a valid URL. */ /** * Sets the credential to use when authenticating HTTP requests. * * @param credential The credential to use for authenticating HTTP requests. * @return the updated {@link SecretClientBuilder} object. * @throws NullPointerException if {@code credential} is {@code null}. */ public SecretClientBuilder credential(TokenCredential credential) { Objects.requireNonNull(credential); this.credential = credential; return this; } /** * Sets the logging configuration for HTTP requests and responses. * * <p> If logLevel is not provided, default value of {@link HttpLogDetailLevel * * @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses. * @return the updated {@link SecretClientBuilder} object. */ public SecretClientBuilder httpLogOptions(HttpLogOptions logOptions) { httpLogOptions = logOptions; return this; } /** * Adds a policy to the set of existing policies that are executed after * {@link SecretAsyncClient} or {@link SecretClient} required policies. * * @param policy The {@link HttpPipelinePolicy policy} to be added. * @return the updated {@link SecretClientBuilder} object. * @throws NullPointerException if {@code policy} is {@code null}. */ public SecretClientBuilder addPolicy(HttpPipelinePolicy policy) { Objects.requireNonNull(policy); policies.add(policy); return this; } /** * Sets the HTTP client to use for sending and receiving requests to and from the service. * * @param client The HTTP client to use for requests. * @return the updated {@link SecretClientBuilder} object. * @throws NullPointerException If {@code client} is {@code null}. */ public SecretClientBuilder httpClient(HttpClient client) { Objects.requireNonNull(client); this.httpClient = client; return this; } /** * Sets the HTTP pipeline to use for the service client. * * If {@code pipeline} is set, all other settings are ignored, aside from * {@link SecretClientBuilder * * @param pipeline The HTTP pipeline to use for sending service requests and receiving responses. * @return the updated {@link SecretClientBuilder} object. */ public SecretClientBuilder pipeline(HttpPipeline pipeline) { Objects.requireNonNull(pipeline); this.pipeline = pipeline; return this; } /** * Sets the configuration store that is used during construction of the service client. * * The default configuration store is a clone of the {@link Configuration * configuration store}, use {@link Configuration * * @param configuration The configuration store used to * @return The updated SecretClientBuilder object. */ public SecretClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the {@link SecretServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version the client library will have the result of potentially moving to a newer service version. * * @param version {@link SecretServiceVersion} of the service API used when making requests. * @return The updated SecretClientBuilder object. */ public SecretClientBuilder serviceVersion(SecretServiceVersion version) { this.version = version; return this; } private URL getBuildEndpoint(Configuration configuration) { if (vaultUrl != null) { return vaultUrl; } String configEndpoint = configuration.get("AZURE_KEYVAULT_ENDPOINT"); if (ImplUtils.isNullOrEmpty(configEndpoint)) { return null; } try { return new URL(configEndpoint); } catch (MalformedURLException ex) { return null; } } }
updated
public SecretClientBuilder vaultUrl(String vaultUrl) { try { this.vaultUrl = new URL(vaultUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsError(new IllegalArgumentException( "The Azure Key Vault endpoint url is malformed.")); } return this; }
throw logger.logExceptionAsError(new IllegalArgumentException(
public SecretClientBuilder vaultUrl(String vaultUrl) { try { this.vaultUrl = new URL(vaultUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsError(new IllegalArgumentException( "The Azure Key Vault url is malformed.", e)); } return this; }
class SecretClientBuilder { private final ClientLogger logger = new ClientLogger(SecretClientBuilder.class); private final List<HttpPipelinePolicy> policies; private TokenCredential credential; private HttpPipeline pipeline; private URL vaultUrl; private HttpClient httpClient; private HttpLogOptions httpLogOptions; private final RetryPolicy retryPolicy; private Configuration configuration; private SecretServiceVersion version; /** * The constructor with defaults. */ public SecretClientBuilder() { retryPolicy = new RetryPolicy(); httpLogOptions = new HttpLogOptions(); policies = new ArrayList<>(); } /** * Creates a {@link SecretClient} based on options set in the builder. * Every time {@code buildClient()} is called, a new instance of {@link SecretClient} is created. * * <p>If {@link SecretClientBuilder * {@link SecretClientBuilder * {@link SecretClientBuilder client}. All other builder settings are ignored. If {@code pipeline} is not set, * then {@link SecretClientBuilder * {@link SecretClientBuilder * client}.}</p> * * @return A SecretClient with the options set from the builder. * @throws IllegalStateException If {@link SecretClientBuilder * {@link SecretClientBuilder */ public SecretClient buildClient() { return new SecretClient(buildAsyncClient()); } /** * Creates a {@link SecretAsyncClient} based on options set in the builder. * Every time {@code buildAsyncClient()} is called, a new instance of {@link SecretAsyncClient} is created. * * <p>If {@link SecretClientBuilder * {@link SecretClientBuilder * {@link SecretClientBuilder client}. All other builder settings are ignored. If {@code pipeline} is not set, * then {@link SecretClientBuilder * {@link SecretClientBuilder * SecretAsyncClient client}.}</p> * * @return A SecretAsyncClient with the options set from the builder. * @throws IllegalStateException If {@link SecretClientBuilder * {@link SecretClientBuilder */ public SecretAsyncClient buildAsyncClient() { Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration().clone() : configuration; URL buildEndpoint = getBuildEndpoint(buildConfiguration); if (buildEndpoint == null) { throw logger.logExceptionAsError( new IllegalStateException( KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.VAULT_END_POINT_REQUIRED))); } SecretServiceVersion serviceVersion = version != null ? version : SecretServiceVersion.getLatest(); if (pipeline != null) { return new SecretAsyncClient(vaultUrl, pipeline, serviceVersion); } if (credential == null) { throw logger.logExceptionAsError( new IllegalStateException( KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.CREDENTIAL_REQUIRED))); } final List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new UserAgentPolicy(AzureKeyVaultConfiguration.SDK_NAME, AzureKeyVaultConfiguration.SDK_VERSION, buildConfiguration, serviceVersion)); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(retryPolicy); policies.add(new KeyVaultCredentialPolicy(credential)); policies.addAll(this.policies); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); HttpPipeline pipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); return new SecretAsyncClient(vaultUrl, pipeline, serviceVersion); } /** * Sets the vault endpoint url to send HTTP requests to. * * @param vaultUrl The vault endpoint url is used as destination on Azure to send requests to. * @return the updated {@link SecretClientBuilder} object. * @throws IllegalArgumentException if {@code vaultUrl} is null or it cannot be parsed into a valid URL. */ /** * Sets the credential to use when authenticating HTTP requests. * * @param credential The credential to use for authenticating HTTP requests. * @return the updated {@link SecretClientBuilder} object. * @throws NullPointerException if {@code credential} is {@code null}. */ public SecretClientBuilder credential(TokenCredential credential) { Objects.requireNonNull(credential); this.credential = credential; return this; } /** * Sets the logging configuration for HTTP requests and responses. * * <p> If logLevel is not provided, default value of {@link HttpLogDetailLevel * * @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses. * @return the updated {@link SecretClientBuilder} object. */ public SecretClientBuilder httpLogOptions(HttpLogOptions logOptions) { httpLogOptions = logOptions; return this; } /** * Adds a policy to the set of existing policies that are executed after * {@link SecretAsyncClient} or {@link SecretClient} required policies. * * @param policy The {@link HttpPipelinePolicy policy} to be added. * @return the updated {@link SecretClientBuilder} object. * @throws NullPointerException if {@code policy} is {@code null}. */ public SecretClientBuilder addPolicy(HttpPipelinePolicy policy) { Objects.requireNonNull(policy); policies.add(policy); return this; } /** * Sets the HTTP client to use for sending and receiving requests to and from the service. * * @param client The HTTP client to use for requests. * @return the updated {@link SecretClientBuilder} object. * @throws NullPointerException If {@code client} is {@code null}. */ public SecretClientBuilder httpClient(HttpClient client) { Objects.requireNonNull(client); this.httpClient = client; return this; } /** * Sets the HTTP pipeline to use for the service client. * * If {@code pipeline} is set, all other settings are ignored, aside from * {@link SecretClientBuilder * * @param pipeline The HTTP pipeline to use for sending service requests and receiving responses. * @return the updated {@link SecretClientBuilder} object. */ public SecretClientBuilder pipeline(HttpPipeline pipeline) { Objects.requireNonNull(pipeline); this.pipeline = pipeline; return this; } /** * Sets the configuration store that is used during construction of the service client. * * The default configuration store is a clone of the {@link Configuration * configuration store}, use {@link Configuration * * @param configuration The configuration store used to * @return The updated SecretClientBuilder object. */ public SecretClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the {@link SecretServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version the client library will have the result of potentially moving to a newer service version. * * @param version {@link SecretServiceVersion} of the service API used when making requests. * @return The updated SecretClientBuilder object. */ public SecretClientBuilder serviceVersion(SecretServiceVersion version) { this.version = version; return this; } private URL getBuildEndpoint(Configuration configuration) { if (vaultUrl != null) { return vaultUrl; } String configEndpoint = configuration.get("AZURE_KEYVAULT_ENDPOINT"); if (ImplUtils.isNullOrEmpty(configEndpoint)) { return null; } try { return new URL(configEndpoint); } catch (MalformedURLException ex) { return null; } } }
class SecretClientBuilder { private final ClientLogger logger = new ClientLogger(SecretClientBuilder.class); private final List<HttpPipelinePolicy> policies; private TokenCredential credential; private HttpPipeline pipeline; private URL vaultUrl; private HttpClient httpClient; private HttpLogOptions httpLogOptions; private final RetryPolicy retryPolicy; private Configuration configuration; private SecretServiceVersion version; /** * The constructor with defaults. */ public SecretClientBuilder() { retryPolicy = new RetryPolicy(); httpLogOptions = new HttpLogOptions(); policies = new ArrayList<>(); } /** * Creates a {@link SecretClient} based on options set in the builder. * Every time {@code buildClient()} is called, a new instance of {@link SecretClient} is created. * * <p>If {@link SecretClientBuilder * {@link SecretClientBuilder * {@link SecretClientBuilder client}. All other builder settings are ignored. If {@code pipeline} is not set, * then {@link SecretClientBuilder * {@link SecretClientBuilder * client}.}</p> * * @return A SecretClient with the options set from the builder. * @throws IllegalStateException If {@link SecretClientBuilder * {@link SecretClientBuilder */ public SecretClient buildClient() { return new SecretClient(buildAsyncClient()); } /** * Creates a {@link SecretAsyncClient} based on options set in the builder. * Every time {@code buildAsyncClient()} is called, a new instance of {@link SecretAsyncClient} is created. * * <p>If {@link SecretClientBuilder * {@link SecretClientBuilder * {@link SecretClientBuilder client}. All other builder settings are ignored. If {@code pipeline} is not set, * then {@link SecretClientBuilder * {@link SecretClientBuilder * SecretAsyncClient client}.}</p> * * @return A SecretAsyncClient with the options set from the builder. * @throws IllegalStateException If {@link SecretClientBuilder * {@link SecretClientBuilder */ public SecretAsyncClient buildAsyncClient() { Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration().clone() : configuration; URL buildEndpoint = getBuildEndpoint(buildConfiguration); if (buildEndpoint == null) { throw logger.logExceptionAsError( new IllegalStateException( KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.VAULT_END_POINT_REQUIRED))); } SecretServiceVersion serviceVersion = version != null ? version : SecretServiceVersion.getLatest(); if (pipeline != null) { return new SecretAsyncClient(vaultUrl, pipeline, serviceVersion); } if (credential == null) { throw logger.logExceptionAsError( new IllegalStateException( KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.CREDENTIAL_REQUIRED))); } final List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new UserAgentPolicy(AzureKeyVaultConfiguration.SDK_NAME, AzureKeyVaultConfiguration.SDK_VERSION, buildConfiguration, serviceVersion)); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(retryPolicy); policies.add(new KeyVaultCredentialPolicy(credential)); policies.addAll(this.policies); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); HttpPipeline pipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); return new SecretAsyncClient(vaultUrl, pipeline, serviceVersion); } /** * Sets the vault url to send HTTP requests to. * * @param vaultUrl The vault url is used as destination on Azure to send requests to. * @return the updated {@link SecretClientBuilder} object. * @throws IllegalArgumentException if {@code vaultUrl} is null or it cannot be parsed into a valid URL. */ /** * Sets the credential to use when authenticating HTTP requests. * * @param credential The credential to use for authenticating HTTP requests. * @return the updated {@link SecretClientBuilder} object. * @throws NullPointerException if {@code credential} is {@code null}. */ public SecretClientBuilder credential(TokenCredential credential) { Objects.requireNonNull(credential); this.credential = credential; return this; } /** * Sets the logging configuration for HTTP requests and responses. * * <p> If logLevel is not provided, default value of {@link HttpLogDetailLevel * * @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses. * @return the updated {@link SecretClientBuilder} object. */ public SecretClientBuilder httpLogOptions(HttpLogOptions logOptions) { httpLogOptions = logOptions; return this; } /** * Adds a policy to the set of existing policies that are executed after * {@link SecretAsyncClient} or {@link SecretClient} required policies. * * @param policy The {@link HttpPipelinePolicy policy} to be added. * @return the updated {@link SecretClientBuilder} object. * @throws NullPointerException if {@code policy} is {@code null}. */ public SecretClientBuilder addPolicy(HttpPipelinePolicy policy) { Objects.requireNonNull(policy); policies.add(policy); return this; } /** * Sets the HTTP client to use for sending and receiving requests to and from the service. * * @param client The HTTP client to use for requests. * @return the updated {@link SecretClientBuilder} object. * @throws NullPointerException If {@code client} is {@code null}. */ public SecretClientBuilder httpClient(HttpClient client) { Objects.requireNonNull(client); this.httpClient = client; return this; } /** * Sets the HTTP pipeline to use for the service client. * * If {@code pipeline} is set, all other settings are ignored, aside from * {@link SecretClientBuilder * * @param pipeline The HTTP pipeline to use for sending service requests and receiving responses. * @return the updated {@link SecretClientBuilder} object. */ public SecretClientBuilder pipeline(HttpPipeline pipeline) { Objects.requireNonNull(pipeline); this.pipeline = pipeline; return this; } /** * Sets the configuration store that is used during construction of the service client. * * The default configuration store is a clone of the {@link Configuration * configuration store}, use {@link Configuration * * @param configuration The configuration store used to * @return The updated SecretClientBuilder object. */ public SecretClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the {@link SecretServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version the client library will have the result of potentially moving to a newer service version. * * @param version {@link SecretServiceVersion} of the service API used when making requests. * @return The updated SecretClientBuilder object. */ public SecretClientBuilder serviceVersion(SecretServiceVersion version) { this.version = version; return this; } private URL getBuildEndpoint(Configuration configuration) { if (vaultUrl != null) { return vaultUrl; } String configEndpoint = configuration.get("AZURE_KEYVAULT_ENDPOINT"); if (ImplUtils.isNullOrEmpty(configEndpoint)) { return null; } try { return new URL(configEndpoint); } catch (MalformedURLException ex) { return null; } } }
Is this an optimization thing?
private static String getCanonicalName(String account, String queueName) { return String.join("", new String[] { "/queue/", account, "/", queueName }); }
return String.join("", new String[] { "/queue/", account, "/", queueName });
private static String getCanonicalName(String account, String queueName) { return String.join("", new String[] { "/queue/", account, "/", queueName }); }
class level JavaDocs.</p> * @param storageSharedKeyCredentials A {@link StorageSharedKeyCredential}
class level JavaDocs.</p> * * @param storageSharedKeyCredentials A {@link StorageSharedKeyCredential}
```suggestion ? String.format("/file/%s/%s/%s", account, shareName, filePath.replace("\\", "/")) ```
private static String getCanonicalName(String account, String shareName, String filePath) { return ImplUtils.isNullOrEmpty(filePath) ? String.format("/file/%s/%s/%s",account, shareName, filePath.replace("\\", "/")) : String.format("/file/%s/%s",account, shareName); }
? String.format("/file/%s/%s/%s",account, shareName, filePath.replace("\\", "/"))
private static String getCanonicalName(String account, String shareName, String filePath) { return !ImplUtils.isNullOrEmpty(filePath) ? String.format("/file/%s/%s/%s", account, shareName, filePath.replace("\\", "/")) : String.format("/file/%s/%s", account, shareName); }
class level JavaDocs for code snippets. * * @param storageSharedKeyCredentials A {@link StorageSharedKeyCredential}
class level JavaDocs for code snippets.</p> * * @param storageSharedKeyCredentials A {@link StorageSharedKeyCredential}
```suggestion : String.format("/file/%s/%s", account, shareName); ```
private static String getCanonicalName(String account, String shareName, String filePath) { return ImplUtils.isNullOrEmpty(filePath) ? String.format("/file/%s/%s/%s",account, shareName, filePath.replace("\\", "/")) : String.format("/file/%s/%s",account, shareName); }
: String.format("/file/%s/%s",account, shareName);
private static String getCanonicalName(String account, String shareName, String filePath) { return !ImplUtils.isNullOrEmpty(filePath) ? String.format("/file/%s/%s/%s", account, shareName, filePath.replace("\\", "/")) : String.format("/file/%s/%s", account, shareName); }
class level JavaDocs for code snippets. * * @param storageSharedKeyCredentials A {@link StorageSharedKeyCredential}
class level JavaDocs for code snippets.</p> * * @param storageSharedKeyCredentials A {@link StorageSharedKeyCredential}
ACCOUNT_NAME_NAME sounds a little weird, might need some doc.
public EncryptedBlobClientBuilder connectionString(String connectionString) { Objects.requireNonNull(connectionString, "'connectionString' cannot be null."); Map<String, String> connectionStringPieces = Utility.parseConnectionString(connectionString); String accountName = connectionStringPieces.get(Constants.ConnectionStringConstants.ACCOUNT_NAME_NAME); String accountKey = connectionStringPieces.get(Constants.ConnectionStringConstants.ACCOUNT_KEY_NAME); if (ImplUtils.isNullOrEmpty(accountName) || ImplUtils.isNullOrEmpty(accountKey)) { throw logger.logExceptionAsError( new IllegalArgumentException("'connectionString' must contain 'AccountName' and 'AccountKey'.")); } String endpointProtocol = connectionStringPieces.get(Constants.ConnectionStringConstants.DEFAULT_ENDPOINTS_PROTOCOL_NAME); String endpointSuffix = connectionStringPieces.get(Constants.ConnectionStringConstants.ENDPOINT_SUFFIX_NAME); if (!ImplUtils.isNullOrEmpty(endpointProtocol) && !ImplUtils.isNullOrEmpty(endpointSuffix)) { endpoint(String.format("%s: endpointSuffix.replaceFirst("^\\.", ""))); } this.accountName = accountName; return credential(new StorageSharedKeyCredential(accountName, accountKey)); }
String accountName = connectionStringPieces.get(Constants.ConnectionStringConstants.ACCOUNT_NAME_NAME);
public EncryptedBlobClientBuilder connectionString(String connectionString) { StorageConnectionString storageConnectionString = StorageConnectionString.create(connectionString, logger); StorageEndpoint endpoint = storageConnectionString.getBlobEndpoint(); if (endpoint == null || endpoint.getPrimaryUri() == null) { throw logger .logExceptionAsError(new IllegalArgumentException( "connectionString missing required settings to derive blob service endpoint.")); } this.endpoint(endpoint.getPrimaryUri()); if (storageConnectionString.getAccountName() != null) { this.accountName = storageConnectionString.getAccountName(); } StorageAuthenticationSettings authSettings = storageConnectionString.getStorageAuthSettings(); if (authSettings.getType() == StorageAuthenticationSettings.Type.ACCOUNT_NAME_KEY) { this.credential(new StorageSharedKeyCredential(authSettings.getAccount().getName(), authSettings.getAccount().getAccessKey())); } else if (authSettings.getType() == StorageAuthenticationSettings.Type.SAS_TOKEN) { this.sasToken(authSettings.getSasToken()); } return this; }
class EncryptedBlobClientBuilder { private final ClientLogger logger = new ClientLogger(EncryptedBlobClientBuilder.class); private String endpoint; private String accountName; private String containerName; private String blobName; private String snapshot; private StorageSharedKeyCredential storageSharedKeyCredential; private TokenCredential tokenCredential; private SasTokenCredential sasTokenCredential; private HttpClient httpClient; private final List<HttpPipelinePolicy> additionalPolicies = new ArrayList<>(); private HttpLogOptions logOptions = new HttpLogOptions(); private RequestRetryOptions retryOptions = new RequestRetryOptions(); private HttpPipeline httpPipeline; private Configuration configuration; private AsyncKeyEncryptionKey keyWrapper; private AsyncKeyEncryptionKeyResolver keyResolver; private String keyWrapAlgorithm; private BlobServiceVersion version; /** * Creates a new instance of the EncryptedBlobClientBuilder */ public EncryptedBlobClientBuilder() { } private AzureBlobStorageImpl constructImpl() { Objects.requireNonNull(blobName, "'blobName' cannot be null."); checkValidEncryptionParameters(); /* Implicit and explicit root container access are functionally equivalent, but explicit references are easier to read and debug. */ if (ImplUtils.isNullOrEmpty(containerName)) { containerName = BlobContainerAsyncClient.ROOT_CONTAINER_NAME; } BlobServiceVersion serviceVersion = version != null ? version : BlobServiceVersion.getLatest(); if (httpPipeline != null) { return new AzureBlobStorageBuilder() .url(String.format("%s/%s/%s", endpoint, containerName, blobName)) .pipeline(httpPipeline) .version(serviceVersion.getVersion()) .build(); } String userAgentName = BlobCryptographyConfiguration.NAME; String userAgentVersion = BlobCryptographyConfiguration.VERSION; Configuration userAgentConfiguration = (configuration == null) ? Configuration.NONE : configuration; List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new BlobDecryptionPolicy(keyWrapper, keyResolver)); policies.add(new UserAgentPolicy(userAgentName, userAgentVersion, userAgentConfiguration, serviceVersion)); policies.add(new RequestIdPolicy()); policies.add(new AddDatePolicy()); if (storageSharedKeyCredential != null) { policies.add(new StorageSharedKeyCredentialPolicy(storageSharedKeyCredential)); } else if (tokenCredential != null) { policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, String.format("%s/.default", endpoint))); } else if (sasTokenCredential != null) { policies.add(new SasTokenCredentialPolicy(sasTokenCredential)); } HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(new RequestRetryPolicy(retryOptions)); policies.addAll(additionalPolicies); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new ResponseValidationPolicyBuilder() .addOptionalEcho(Constants.HeaderConstants.CLIENT_REQUEST_ID) .addOptionalEcho(Constants.HeaderConstants.ENCRYPTION_KEY_SHA256) .build()); policies.add(new HttpLoggingPolicy(logOptions)); policies.add(new ScrubEtagPolicy()); HttpPipeline pipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); return new AzureBlobStorageBuilder() .url(String.format("%s/%s/%s", endpoint, containerName, blobName)) .pipeline(pipeline) .version(serviceVersion.getVersion()) .build(); } /** * Creates a {@link EncryptedBlobClient} based on options set in the Builder. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobAsyncClient} * * @return a {@link EncryptedBlobClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. */ public EncryptedBlobClient buildEncryptedBlobClient() { return new EncryptedBlobClient(buildEncryptedBlobAsyncClient()); } /** * Creates a {@link EncryptedBlobAsyncClient} based on options set in the Builder. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobClient} * * @return a {@link EncryptedBlobAsyncClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. */ public EncryptedBlobAsyncClient buildEncryptedBlobAsyncClient() { return new EncryptedBlobAsyncClient(constructImpl(), snapshot, accountName, keyWrapper, keyWrapAlgorithm); } protected void addOptionalEncryptionPolicy(List<HttpPipelinePolicy> policies) { BlobDecryptionPolicy decryptionPolicy = new BlobDecryptionPolicy(keyWrapper, keyResolver); policies.add(decryptionPolicy); } /** * Sets the encryption key parameters for the client * * @param key An object of type {@link AsyncKeyEncryptionKey} that is used to wrap/unwrap the content encryption * key * @param keyWrapAlgorithm The {@link String} used to wrap the key. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder key(AsyncKeyEncryptionKey key, String keyWrapAlgorithm) { this.keyWrapper = key; this.keyWrapAlgorithm = keyWrapAlgorithm; return this; } /** * Sets the encryption parameters for this client * * @param keyResolver The key resolver used to select the correct key for decrypting existing blobs. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder keyResolver(AsyncKeyEncryptionKeyResolver keyResolver) { this.keyResolver = keyResolver; return this; } private void checkValidEncryptionParameters() { if (this.keyWrapper == null && this.keyResolver == null) { throw logger.logExceptionAsError(new IllegalArgumentException("Key and KeyResolver cannot both be null")); } if (this.keyWrapper != null && this.keyWrapAlgorithm == null) { throw logger.logExceptionAsError( new IllegalArgumentException("Key Wrap Algorithm must be specified with a Key.")); } } /** * Sets the {@link StorageSharedKeyCredential} used to authorize requests sent to the service. * * @param credential The credential to use for authenticating request. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ public EncryptedBlobClientBuilder credential(StorageSharedKeyCredential credential) { this.storageSharedKeyCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); this.tokenCredential = null; this.sasTokenCredential = null; return this; } /** * Sets the {@link TokenCredential} used to authorize requests sent to the service. * * @param credential The credential to use for authenticating request. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ public EncryptedBlobClientBuilder credential(TokenCredential credential) { this.tokenCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); this.storageSharedKeyCredential = null; this.sasTokenCredential = null; return this; } /** * Sets the SAS token used to authorize requests sent to the service. * * @param sasToken The SAS token to use for authenticating requests. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code sasToken} is {@code null}. */ public EncryptedBlobClientBuilder sasToken(String sasToken) { this.sasTokenCredential = new SasTokenCredential(Objects.requireNonNull(sasToken, "'sasToken' cannot be null.")); this.storageSharedKeyCredential = null; this.tokenCredential = null; return this; } /** * Clears the credential used to authorize the request. * * <p>This is for blobs that are publicly accessible.</p> * * @return the updated EncryptedBlobClientBuilder */ public EncryptedBlobClientBuilder setAnonymousAccess() { this.storageSharedKeyCredential = null; this.tokenCredential = null; this.sasTokenCredential = null; return this; } /** * Constructs a {@link StorageSharedKeyCredential} used to authorize requests sent to the service. Additionally, * if the connection string contains `DefaultEndpointsProtocol` and `EndpointSuffix` it will set the {@link * * * @param connectionString Connection string of the storage account. * @return the updated EncryptedBlobClientBuilder * @throws IllegalArgumentException If {@code connectionString} doesn't contain `AccountName` or `AccountKey`. * @throws NullPointerException If {@code connectionString} is {@code null}. */ /** * Sets the service endpoint, additionally parses it for information (SAS token, container name, blob name) * * <p>If the endpoint is to a blob in the root container, this method will fail as it will interpret the blob name * as the container name. With only one path element, it is impossible to distinguish between a container name and a * blob in the root container, so it is assumed to be the container name as this is much more common. When working * with blobs in the root container, it is best to set the endpoint to the account url and specify the blob name * separately using the {@link EncryptedBlobClientBuilder * * @param endpoint URL of the service * @return the updated EncryptedBlobClientBuilder object * @throws IllegalArgumentException If {@code endpoint} is {@code null} or is a malformed URL. */ public EncryptedBlobClientBuilder endpoint(String endpoint) { try { URL url = new URL(endpoint); BlobUrlParts parts = BlobUrlParts.parse(url); this.accountName = parts.getAccountName(); this.endpoint = parts.getScheme() + ": this.containerName = parts.getBlobContainerName(); this.blobName = parts.getBlobName(); this.snapshot = parts.getSnapshot(); String sasToken = parts.getSasQueryParameters().encode(); if (!ImplUtils.isNullOrEmpty(sasToken)) { this.sasToken(sasToken); } } catch (MalformedURLException ex) { throw logger.logExceptionAsError( new IllegalArgumentException("The Azure Storage Blob endpoint url is malformed.")); } return this; } /** * Sets the name of the container that contains the blob. * * @param containerName Name of the container. If the value {@code null} or empty the root container, {@code $root}, * will be used. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder containerName(String containerName) { this.containerName = containerName; return this; } /** * Sets the name of the blob. * * @param blobName Name of the blob. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code blobName} is {@code null} */ public EncryptedBlobClientBuilder blobName(String blobName) { this.blobName = Objects.requireNonNull(blobName, "'blobName' cannot be null."); return this; } /** * Sets the snapshot identifier of the blob. * * @param snapshot Snapshot identifier for the blob. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder snapshot(String snapshot) { this.snapshot = snapshot; return this; } /** * Sets the {@link HttpClient} to use for sending a receiving requests to and from the service. * * @param httpClient HttpClient to use for requests. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder httpClient(HttpClient httpClient) { if (this.httpClient != null && httpClient == null) { logger.info("'httpClient' is being set to 'null' when it was previously configured."); } this.httpClient = httpClient; return this; } /** * Adds a pipeline policy to apply on each request sent. * * @param pipelinePolicy a pipeline policy * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code pipelinePolicy} is {@code null}. */ public EncryptedBlobClientBuilder addPolicy(HttpPipelinePolicy pipelinePolicy) { this.additionalPolicies.add(Objects.requireNonNull(pipelinePolicy, "'pipelinePolicy' cannot be null")); return this; } /** * Sets the {@link HttpLogOptions} for service requests. * * @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code logOptions} is {@code null}. */ public EncryptedBlobClientBuilder httpLogOptions(HttpLogOptions logOptions) { this.logOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null."); return this; } /** * Sets the configuration object used to retrieve environment configuration values during building of the client. * * @param configuration Configuration store used to retrieve environment configurations. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the request retry options for all the requests made through the client. * * @param retryOptions The options used to configure retry behavior. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code retryOptions} is {@code null}. */ public EncryptedBlobClientBuilder retryOptions(RequestRetryOptions retryOptions) { this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); return this; } /** * Sets the {@link HttpPipeline} to use for the service client. * * If {@code pipeline} is set, all other settings are ignored, aside from {@link * * @param httpPipeline HttpPipeline to use for sending service requests and receiving responses. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder pipeline(HttpPipeline httpPipeline) { if (this.httpPipeline != null && httpPipeline == null) { logger.info("HttpPipeline is being set to 'null' when it was previously configured."); } this.httpPipeline = httpPipeline; return this; } /** * Sets the {@link BlobServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version the client library will have the result of potentially moving to a newer service version. * * @param version {@link BlobServiceVersion} of the service to be used when making requests. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder serviceVersion(BlobServiceVersion version) { this.version = version; return this; } }
class EncryptedBlobClientBuilder { private final ClientLogger logger = new ClientLogger(EncryptedBlobClientBuilder.class); private String endpoint; private String accountName; private String containerName; private String blobName; private String snapshot; private StorageSharedKeyCredential storageSharedKeyCredential; private TokenCredential tokenCredential; private SasTokenCredential sasTokenCredential; private HttpClient httpClient; private final List<HttpPipelinePolicy> additionalPolicies = new ArrayList<>(); private HttpLogOptions logOptions = new HttpLogOptions(); private RequestRetryOptions retryOptions = new RequestRetryOptions(); private HttpPipeline httpPipeline; private Configuration configuration; private AsyncKeyEncryptionKey keyWrapper; private AsyncKeyEncryptionKeyResolver keyResolver; private String keyWrapAlgorithm; private BlobServiceVersion version; /** * Creates a new instance of the EncryptedBlobClientBuilder */ public EncryptedBlobClientBuilder() { } private AzureBlobStorageImpl constructImpl() { Objects.requireNonNull(blobName, "'blobName' cannot be null."); checkValidEncryptionParameters(); /* Implicit and explicit root container access are functionally equivalent, but explicit references are easier to read and debug. */ if (ImplUtils.isNullOrEmpty(containerName)) { containerName = BlobContainerAsyncClient.ROOT_CONTAINER_NAME; } BlobServiceVersion serviceVersion = version != null ? version : BlobServiceVersion.getLatest(); if (httpPipeline != null) { return new AzureBlobStorageBuilder() .url(String.format("%s/%s/%s", endpoint, containerName, blobName)) .pipeline(httpPipeline) .version(serviceVersion.getVersion()) .build(); } String userAgentName = BlobCryptographyConfiguration.NAME; String userAgentVersion = BlobCryptographyConfiguration.VERSION; Configuration userAgentConfiguration = (configuration == null) ? Configuration.NONE : configuration; List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new BlobDecryptionPolicy(keyWrapper, keyResolver)); policies.add(new UserAgentPolicy(userAgentName, userAgentVersion, userAgentConfiguration, serviceVersion)); policies.add(new RequestIdPolicy()); policies.add(new AddDatePolicy()); if (storageSharedKeyCredential != null) { policies.add(new StorageSharedKeyCredentialPolicy(storageSharedKeyCredential)); } else if (tokenCredential != null) { policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, String.format("%s/.default", endpoint))); } else if (sasTokenCredential != null) { policies.add(new SasTokenCredentialPolicy(sasTokenCredential)); } HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(new RequestRetryPolicy(retryOptions)); policies.addAll(additionalPolicies); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new ResponseValidationPolicyBuilder() .addOptionalEcho(Constants.HeaderConstants.CLIENT_REQUEST_ID) .addOptionalEcho(Constants.HeaderConstants.ENCRYPTION_KEY_SHA256) .build()); policies.add(new HttpLoggingPolicy(logOptions)); policies.add(new ScrubEtagPolicy()); HttpPipeline pipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); return new AzureBlobStorageBuilder() .url(String.format("%s/%s/%s", endpoint, containerName, blobName)) .pipeline(pipeline) .version(serviceVersion.getVersion()) .build(); } /** * Creates a {@link EncryptedBlobClient} based on options set in the Builder. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobAsyncClient} * * @return a {@link EncryptedBlobClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. */ public EncryptedBlobClient buildEncryptedBlobClient() { return new EncryptedBlobClient(buildEncryptedBlobAsyncClient()); } /** * Creates a {@link EncryptedBlobAsyncClient} based on options set in the Builder. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobClient} * * @return a {@link EncryptedBlobAsyncClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. */ public EncryptedBlobAsyncClient buildEncryptedBlobAsyncClient() { return new EncryptedBlobAsyncClient(constructImpl(), snapshot, accountName, keyWrapper, keyWrapAlgorithm); } protected void addOptionalEncryptionPolicy(List<HttpPipelinePolicy> policies) { BlobDecryptionPolicy decryptionPolicy = new BlobDecryptionPolicy(keyWrapper, keyResolver); policies.add(decryptionPolicy); } /** * Sets the encryption key parameters for the client * * @param key An object of type {@link AsyncKeyEncryptionKey} that is used to wrap/unwrap the content encryption * key * @param keyWrapAlgorithm The {@link String} used to wrap the key. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder key(AsyncKeyEncryptionKey key, String keyWrapAlgorithm) { this.keyWrapper = key; this.keyWrapAlgorithm = keyWrapAlgorithm; return this; } /** * Sets the encryption parameters for this client * * @param keyResolver The key resolver used to select the correct key for decrypting existing blobs. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder keyResolver(AsyncKeyEncryptionKeyResolver keyResolver) { this.keyResolver = keyResolver; return this; } private void checkValidEncryptionParameters() { if (this.keyWrapper == null && this.keyResolver == null) { throw logger.logExceptionAsError(new IllegalArgumentException("Key and KeyResolver cannot both be null")); } if (this.keyWrapper != null && this.keyWrapAlgorithm == null) { throw logger.logExceptionAsError( new IllegalArgumentException("Key Wrap Algorithm must be specified with a Key.")); } } /** * Sets the {@link StorageSharedKeyCredential} used to authorize requests sent to the service. * * @param credential The credential to use for authenticating request. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ public EncryptedBlobClientBuilder credential(StorageSharedKeyCredential credential) { this.storageSharedKeyCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); this.tokenCredential = null; this.sasTokenCredential = null; return this; } /** * Sets the {@link TokenCredential} used to authorize requests sent to the service. * * @param credential The credential to use for authenticating request. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ public EncryptedBlobClientBuilder credential(TokenCredential credential) { this.tokenCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); this.storageSharedKeyCredential = null; this.sasTokenCredential = null; return this; } /** * Sets the SAS token used to authorize requests sent to the service. * * @param sasToken The SAS token to use for authenticating requests. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code sasToken} is {@code null}. */ public EncryptedBlobClientBuilder sasToken(String sasToken) { this.sasTokenCredential = new SasTokenCredential(Objects.requireNonNull(sasToken, "'sasToken' cannot be null.")); this.storageSharedKeyCredential = null; this.tokenCredential = null; return this; } /** * Clears the credential used to authorize the request. * * <p>This is for blobs that are publicly accessible.</p> * * @return the updated EncryptedBlobClientBuilder */ public EncryptedBlobClientBuilder setAnonymousAccess() { this.storageSharedKeyCredential = null; this.tokenCredential = null; this.sasTokenCredential = null; return this; } /** * Sets the connection string to connect to the service. * * @param connectionString Connection string of the storage account. * @return the updated EncryptedBlobClientBuilder * @throws IllegalArgumentException If {@code connectionString} is invalid. */ /** * Sets the service endpoint, additionally parses it for information (SAS token, container name, blob name) * * <p>If the endpoint is to a blob in the root container, this method will fail as it will interpret the blob name * as the container name. With only one path element, it is impossible to distinguish between a container name and a * blob in the root container, so it is assumed to be the container name as this is much more common. When working * with blobs in the root container, it is best to set the endpoint to the account url and specify the blob name * separately using the {@link EncryptedBlobClientBuilder * * @param endpoint URL of the service * @return the updated EncryptedBlobClientBuilder object * @throws IllegalArgumentException If {@code endpoint} is {@code null} or is a malformed URL. */ public EncryptedBlobClientBuilder endpoint(String endpoint) { try { URL url = new URL(endpoint); BlobUrlParts parts = BlobUrlParts.parse(url); this.accountName = parts.getAccountName(); this.endpoint = parts.getScheme() + ": this.containerName = parts.getBlobContainerName(); this.blobName = parts.getBlobName(); this.snapshot = parts.getSnapshot(); String sasToken = parts.getSasQueryParameters().encode(); if (!ImplUtils.isNullOrEmpty(sasToken)) { this.sasToken(sasToken); } } catch (MalformedURLException ex) { throw logger.logExceptionAsError( new IllegalArgumentException("The Azure Storage Blob endpoint url is malformed.")); } return this; } /** * Sets the name of the container that contains the blob. * * @param containerName Name of the container. If the value {@code null} or empty the root container, {@code $root}, * will be used. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder containerName(String containerName) { this.containerName = containerName; return this; } /** * Sets the name of the blob. * * @param blobName Name of the blob. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code blobName} is {@code null} */ public EncryptedBlobClientBuilder blobName(String blobName) { this.blobName = Objects.requireNonNull(blobName, "'blobName' cannot be null."); return this; } /** * Sets the snapshot identifier of the blob. * * @param snapshot Snapshot identifier for the blob. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder snapshot(String snapshot) { this.snapshot = snapshot; return this; } /** * Sets the {@link HttpClient} to use for sending a receiving requests to and from the service. * * @param httpClient HttpClient to use for requests. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder httpClient(HttpClient httpClient) { if (this.httpClient != null && httpClient == null) { logger.info("'httpClient' is being set to 'null' when it was previously configured."); } this.httpClient = httpClient; return this; } /** * Adds a pipeline policy to apply on each request sent. * * @param pipelinePolicy a pipeline policy * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code pipelinePolicy} is {@code null}. */ public EncryptedBlobClientBuilder addPolicy(HttpPipelinePolicy pipelinePolicy) { this.additionalPolicies.add(Objects.requireNonNull(pipelinePolicy, "'pipelinePolicy' cannot be null")); return this; } /** * Sets the {@link HttpLogOptions} for service requests. * * @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code logOptions} is {@code null}. */ public EncryptedBlobClientBuilder httpLogOptions(HttpLogOptions logOptions) { this.logOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null."); return this; } /** * Sets the configuration object used to retrieve environment configuration values during building of the client. * * @param configuration Configuration store used to retrieve environment configurations. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the request retry options for all the requests made through the client. * * @param retryOptions The options used to configure retry behavior. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code retryOptions} is {@code null}. */ public EncryptedBlobClientBuilder retryOptions(RequestRetryOptions retryOptions) { this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); return this; } /** * Sets the {@link HttpPipeline} to use for the service client. * * If {@code pipeline} is set, all other settings are ignored, aside from {@link * * @param httpPipeline HttpPipeline to use for sending service requests and receiving responses. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder pipeline(HttpPipeline httpPipeline) { if (this.httpPipeline != null && httpPipeline == null) { logger.info("HttpPipeline is being set to 'null' when it was previously configured."); } this.httpPipeline = httpPipeline; return this; } /** * Sets the {@link BlobServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version the client library will have the result of potentially moving to a newer service version. * * @param version {@link BlobServiceVersion} of the service to be used when making requests. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder serviceVersion(BlobServiceVersion version) { this.version = version; return this; } }
Why do we use different way of constructing endpoint here compare to blob, file..?
public EncryptedBlobClientBuilder connectionString(String connectionString) { Objects.requireNonNull(connectionString, "'connectionString' cannot be null."); Map<String, String> connectionStringPieces = Utility.parseConnectionString(connectionString); String accountName = connectionStringPieces.get(Constants.ConnectionStringConstants.ACCOUNT_NAME_NAME); String accountKey = connectionStringPieces.get(Constants.ConnectionStringConstants.ACCOUNT_KEY_NAME); if (ImplUtils.isNullOrEmpty(accountName) || ImplUtils.isNullOrEmpty(accountKey)) { throw logger.logExceptionAsError( new IllegalArgumentException("'connectionString' must contain 'AccountName' and 'AccountKey'.")); } String endpointProtocol = connectionStringPieces.get(Constants.ConnectionStringConstants.DEFAULT_ENDPOINTS_PROTOCOL_NAME); String endpointSuffix = connectionStringPieces.get(Constants.ConnectionStringConstants.ENDPOINT_SUFFIX_NAME); if (!ImplUtils.isNullOrEmpty(endpointProtocol) && !ImplUtils.isNullOrEmpty(endpointSuffix)) { endpoint(String.format("%s: endpointSuffix.replaceFirst("^\\.", ""))); } this.accountName = accountName; return credential(new StorageSharedKeyCredential(accountName, accountKey)); }
String endpointProtocol
public EncryptedBlobClientBuilder connectionString(String connectionString) { StorageConnectionString storageConnectionString = StorageConnectionString.create(connectionString, logger); StorageEndpoint endpoint = storageConnectionString.getBlobEndpoint(); if (endpoint == null || endpoint.getPrimaryUri() == null) { throw logger .logExceptionAsError(new IllegalArgumentException( "connectionString missing required settings to derive blob service endpoint.")); } this.endpoint(endpoint.getPrimaryUri()); if (storageConnectionString.getAccountName() != null) { this.accountName = storageConnectionString.getAccountName(); } StorageAuthenticationSettings authSettings = storageConnectionString.getStorageAuthSettings(); if (authSettings.getType() == StorageAuthenticationSettings.Type.ACCOUNT_NAME_KEY) { this.credential(new StorageSharedKeyCredential(authSettings.getAccount().getName(), authSettings.getAccount().getAccessKey())); } else if (authSettings.getType() == StorageAuthenticationSettings.Type.SAS_TOKEN) { this.sasToken(authSettings.getSasToken()); } return this; }
class EncryptedBlobClientBuilder { private final ClientLogger logger = new ClientLogger(EncryptedBlobClientBuilder.class); private String endpoint; private String accountName; private String containerName; private String blobName; private String snapshot; private StorageSharedKeyCredential storageSharedKeyCredential; private TokenCredential tokenCredential; private SasTokenCredential sasTokenCredential; private HttpClient httpClient; private final List<HttpPipelinePolicy> additionalPolicies = new ArrayList<>(); private HttpLogOptions logOptions = new HttpLogOptions(); private RequestRetryOptions retryOptions = new RequestRetryOptions(); private HttpPipeline httpPipeline; private Configuration configuration; private AsyncKeyEncryptionKey keyWrapper; private AsyncKeyEncryptionKeyResolver keyResolver; private String keyWrapAlgorithm; private BlobServiceVersion version; /** * Creates a new instance of the EncryptedBlobClientBuilder */ public EncryptedBlobClientBuilder() { } private AzureBlobStorageImpl constructImpl() { Objects.requireNonNull(blobName, "'blobName' cannot be null."); checkValidEncryptionParameters(); /* Implicit and explicit root container access are functionally equivalent, but explicit references are easier to read and debug. */ if (ImplUtils.isNullOrEmpty(containerName)) { containerName = BlobContainerAsyncClient.ROOT_CONTAINER_NAME; } BlobServiceVersion serviceVersion = version != null ? version : BlobServiceVersion.getLatest(); if (httpPipeline != null) { return new AzureBlobStorageBuilder() .url(String.format("%s/%s/%s", endpoint, containerName, blobName)) .pipeline(httpPipeline) .version(serviceVersion.getVersion()) .build(); } String userAgentName = BlobCryptographyConfiguration.NAME; String userAgentVersion = BlobCryptographyConfiguration.VERSION; Configuration userAgentConfiguration = (configuration == null) ? Configuration.NONE : configuration; List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new BlobDecryptionPolicy(keyWrapper, keyResolver)); policies.add(new UserAgentPolicy(userAgentName, userAgentVersion, userAgentConfiguration, serviceVersion)); policies.add(new RequestIdPolicy()); policies.add(new AddDatePolicy()); if (storageSharedKeyCredential != null) { policies.add(new StorageSharedKeyCredentialPolicy(storageSharedKeyCredential)); } else if (tokenCredential != null) { policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, String.format("%s/.default", endpoint))); } else if (sasTokenCredential != null) { policies.add(new SasTokenCredentialPolicy(sasTokenCredential)); } HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(new RequestRetryPolicy(retryOptions)); policies.addAll(additionalPolicies); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new ResponseValidationPolicyBuilder() .addOptionalEcho(Constants.HeaderConstants.CLIENT_REQUEST_ID) .addOptionalEcho(Constants.HeaderConstants.ENCRYPTION_KEY_SHA256) .build()); policies.add(new HttpLoggingPolicy(logOptions)); policies.add(new ScrubEtagPolicy()); HttpPipeline pipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); return new AzureBlobStorageBuilder() .url(String.format("%s/%s/%s", endpoint, containerName, blobName)) .pipeline(pipeline) .version(serviceVersion.getVersion()) .build(); } /** * Creates a {@link EncryptedBlobClient} based on options set in the Builder. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobAsyncClient} * * @return a {@link EncryptedBlobClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. */ public EncryptedBlobClient buildEncryptedBlobClient() { return new EncryptedBlobClient(buildEncryptedBlobAsyncClient()); } /** * Creates a {@link EncryptedBlobAsyncClient} based on options set in the Builder. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobClient} * * @return a {@link EncryptedBlobAsyncClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. */ public EncryptedBlobAsyncClient buildEncryptedBlobAsyncClient() { return new EncryptedBlobAsyncClient(constructImpl(), snapshot, accountName, keyWrapper, keyWrapAlgorithm); } protected void addOptionalEncryptionPolicy(List<HttpPipelinePolicy> policies) { BlobDecryptionPolicy decryptionPolicy = new BlobDecryptionPolicy(keyWrapper, keyResolver); policies.add(decryptionPolicy); } /** * Sets the encryption key parameters for the client * * @param key An object of type {@link AsyncKeyEncryptionKey} that is used to wrap/unwrap the content encryption * key * @param keyWrapAlgorithm The {@link String} used to wrap the key. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder key(AsyncKeyEncryptionKey key, String keyWrapAlgorithm) { this.keyWrapper = key; this.keyWrapAlgorithm = keyWrapAlgorithm; return this; } /** * Sets the encryption parameters for this client * * @param keyResolver The key resolver used to select the correct key for decrypting existing blobs. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder keyResolver(AsyncKeyEncryptionKeyResolver keyResolver) { this.keyResolver = keyResolver; return this; } private void checkValidEncryptionParameters() { if (this.keyWrapper == null && this.keyResolver == null) { throw logger.logExceptionAsError(new IllegalArgumentException("Key and KeyResolver cannot both be null")); } if (this.keyWrapper != null && this.keyWrapAlgorithm == null) { throw logger.logExceptionAsError( new IllegalArgumentException("Key Wrap Algorithm must be specified with a Key.")); } } /** * Sets the {@link StorageSharedKeyCredential} used to authorize requests sent to the service. * * @param credential The credential to use for authenticating request. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ public EncryptedBlobClientBuilder credential(StorageSharedKeyCredential credential) { this.storageSharedKeyCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); this.tokenCredential = null; this.sasTokenCredential = null; return this; } /** * Sets the {@link TokenCredential} used to authorize requests sent to the service. * * @param credential The credential to use for authenticating request. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ public EncryptedBlobClientBuilder credential(TokenCredential credential) { this.tokenCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); this.storageSharedKeyCredential = null; this.sasTokenCredential = null; return this; } /** * Sets the SAS token used to authorize requests sent to the service. * * @param sasToken The SAS token to use for authenticating requests. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code sasToken} is {@code null}. */ public EncryptedBlobClientBuilder sasToken(String sasToken) { this.sasTokenCredential = new SasTokenCredential(Objects.requireNonNull(sasToken, "'sasToken' cannot be null.")); this.storageSharedKeyCredential = null; this.tokenCredential = null; return this; } /** * Clears the credential used to authorize the request. * * <p>This is for blobs that are publicly accessible.</p> * * @return the updated EncryptedBlobClientBuilder */ public EncryptedBlobClientBuilder setAnonymousAccess() { this.storageSharedKeyCredential = null; this.tokenCredential = null; this.sasTokenCredential = null; return this; } /** * Constructs a {@link StorageSharedKeyCredential} used to authorize requests sent to the service. Additionally, * if the connection string contains `DefaultEndpointsProtocol` and `EndpointSuffix` it will set the {@link * * * @param connectionString Connection string of the storage account. * @return the updated EncryptedBlobClientBuilder * @throws IllegalArgumentException If {@code connectionString} doesn't contain `AccountName` or `AccountKey`. * @throws NullPointerException If {@code connectionString} is {@code null}. */ /** * Sets the service endpoint, additionally parses it for information (SAS token, container name, blob name) * * <p>If the endpoint is to a blob in the root container, this method will fail as it will interpret the blob name * as the container name. With only one path element, it is impossible to distinguish between a container name and a * blob in the root container, so it is assumed to be the container name as this is much more common. When working * with blobs in the root container, it is best to set the endpoint to the account url and specify the blob name * separately using the {@link EncryptedBlobClientBuilder * * @param endpoint URL of the service * @return the updated EncryptedBlobClientBuilder object * @throws IllegalArgumentException If {@code endpoint} is {@code null} or is a malformed URL. */ public EncryptedBlobClientBuilder endpoint(String endpoint) { try { URL url = new URL(endpoint); BlobUrlParts parts = BlobUrlParts.parse(url); this.accountName = parts.getAccountName(); this.endpoint = parts.getScheme() + ": this.containerName = parts.getBlobContainerName(); this.blobName = parts.getBlobName(); this.snapshot = parts.getSnapshot(); String sasToken = parts.getSasQueryParameters().encode(); if (!ImplUtils.isNullOrEmpty(sasToken)) { this.sasToken(sasToken); } } catch (MalformedURLException ex) { throw logger.logExceptionAsError( new IllegalArgumentException("The Azure Storage Blob endpoint url is malformed.")); } return this; } /** * Sets the name of the container that contains the blob. * * @param containerName Name of the container. If the value {@code null} or empty the root container, {@code $root}, * will be used. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder containerName(String containerName) { this.containerName = containerName; return this; } /** * Sets the name of the blob. * * @param blobName Name of the blob. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code blobName} is {@code null} */ public EncryptedBlobClientBuilder blobName(String blobName) { this.blobName = Objects.requireNonNull(blobName, "'blobName' cannot be null."); return this; } /** * Sets the snapshot identifier of the blob. * * @param snapshot Snapshot identifier for the blob. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder snapshot(String snapshot) { this.snapshot = snapshot; return this; } /** * Sets the {@link HttpClient} to use for sending a receiving requests to and from the service. * * @param httpClient HttpClient to use for requests. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder httpClient(HttpClient httpClient) { if (this.httpClient != null && httpClient == null) { logger.info("'httpClient' is being set to 'null' when it was previously configured."); } this.httpClient = httpClient; return this; } /** * Adds a pipeline policy to apply on each request sent. * * @param pipelinePolicy a pipeline policy * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code pipelinePolicy} is {@code null}. */ public EncryptedBlobClientBuilder addPolicy(HttpPipelinePolicy pipelinePolicy) { this.additionalPolicies.add(Objects.requireNonNull(pipelinePolicy, "'pipelinePolicy' cannot be null")); return this; } /** * Sets the {@link HttpLogOptions} for service requests. * * @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code logOptions} is {@code null}. */ public EncryptedBlobClientBuilder httpLogOptions(HttpLogOptions logOptions) { this.logOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null."); return this; } /** * Sets the configuration object used to retrieve environment configuration values during building of the client. * * @param configuration Configuration store used to retrieve environment configurations. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the request retry options for all the requests made through the client. * * @param retryOptions The options used to configure retry behavior. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code retryOptions} is {@code null}. */ public EncryptedBlobClientBuilder retryOptions(RequestRetryOptions retryOptions) { this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); return this; } /** * Sets the {@link HttpPipeline} to use for the service client. * * If {@code pipeline} is set, all other settings are ignored, aside from {@link * * @param httpPipeline HttpPipeline to use for sending service requests and receiving responses. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder pipeline(HttpPipeline httpPipeline) { if (this.httpPipeline != null && httpPipeline == null) { logger.info("HttpPipeline is being set to 'null' when it was previously configured."); } this.httpPipeline = httpPipeline; return this; } /** * Sets the {@link BlobServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version the client library will have the result of potentially moving to a newer service version. * * @param version {@link BlobServiceVersion} of the service to be used when making requests. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder serviceVersion(BlobServiceVersion version) { this.version = version; return this; } }
class EncryptedBlobClientBuilder { private final ClientLogger logger = new ClientLogger(EncryptedBlobClientBuilder.class); private String endpoint; private String accountName; private String containerName; private String blobName; private String snapshot; private StorageSharedKeyCredential storageSharedKeyCredential; private TokenCredential tokenCredential; private SasTokenCredential sasTokenCredential; private HttpClient httpClient; private final List<HttpPipelinePolicy> additionalPolicies = new ArrayList<>(); private HttpLogOptions logOptions = new HttpLogOptions(); private RequestRetryOptions retryOptions = new RequestRetryOptions(); private HttpPipeline httpPipeline; private Configuration configuration; private AsyncKeyEncryptionKey keyWrapper; private AsyncKeyEncryptionKeyResolver keyResolver; private String keyWrapAlgorithm; private BlobServiceVersion version; /** * Creates a new instance of the EncryptedBlobClientBuilder */ public EncryptedBlobClientBuilder() { } private AzureBlobStorageImpl constructImpl() { Objects.requireNonNull(blobName, "'blobName' cannot be null."); checkValidEncryptionParameters(); /* Implicit and explicit root container access are functionally equivalent, but explicit references are easier to read and debug. */ if (ImplUtils.isNullOrEmpty(containerName)) { containerName = BlobContainerAsyncClient.ROOT_CONTAINER_NAME; } BlobServiceVersion serviceVersion = version != null ? version : BlobServiceVersion.getLatest(); if (httpPipeline != null) { return new AzureBlobStorageBuilder() .url(String.format("%s/%s/%s", endpoint, containerName, blobName)) .pipeline(httpPipeline) .version(serviceVersion.getVersion()) .build(); } String userAgentName = BlobCryptographyConfiguration.NAME; String userAgentVersion = BlobCryptographyConfiguration.VERSION; Configuration userAgentConfiguration = (configuration == null) ? Configuration.NONE : configuration; List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new BlobDecryptionPolicy(keyWrapper, keyResolver)); policies.add(new UserAgentPolicy(userAgentName, userAgentVersion, userAgentConfiguration, serviceVersion)); policies.add(new RequestIdPolicy()); policies.add(new AddDatePolicy()); if (storageSharedKeyCredential != null) { policies.add(new StorageSharedKeyCredentialPolicy(storageSharedKeyCredential)); } else if (tokenCredential != null) { policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, String.format("%s/.default", endpoint))); } else if (sasTokenCredential != null) { policies.add(new SasTokenCredentialPolicy(sasTokenCredential)); } HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(new RequestRetryPolicy(retryOptions)); policies.addAll(additionalPolicies); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new ResponseValidationPolicyBuilder() .addOptionalEcho(Constants.HeaderConstants.CLIENT_REQUEST_ID) .addOptionalEcho(Constants.HeaderConstants.ENCRYPTION_KEY_SHA256) .build()); policies.add(new HttpLoggingPolicy(logOptions)); policies.add(new ScrubEtagPolicy()); HttpPipeline pipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); return new AzureBlobStorageBuilder() .url(String.format("%s/%s/%s", endpoint, containerName, blobName)) .pipeline(pipeline) .version(serviceVersion.getVersion()) .build(); } /** * Creates a {@link EncryptedBlobClient} based on options set in the Builder. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobAsyncClient} * * @return a {@link EncryptedBlobClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. */ public EncryptedBlobClient buildEncryptedBlobClient() { return new EncryptedBlobClient(buildEncryptedBlobAsyncClient()); } /** * Creates a {@link EncryptedBlobAsyncClient} based on options set in the Builder. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobClient} * * @return a {@link EncryptedBlobAsyncClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. */ public EncryptedBlobAsyncClient buildEncryptedBlobAsyncClient() { return new EncryptedBlobAsyncClient(constructImpl(), snapshot, accountName, keyWrapper, keyWrapAlgorithm); } protected void addOptionalEncryptionPolicy(List<HttpPipelinePolicy> policies) { BlobDecryptionPolicy decryptionPolicy = new BlobDecryptionPolicy(keyWrapper, keyResolver); policies.add(decryptionPolicy); } /** * Sets the encryption key parameters for the client * * @param key An object of type {@link AsyncKeyEncryptionKey} that is used to wrap/unwrap the content encryption * key * @param keyWrapAlgorithm The {@link String} used to wrap the key. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder key(AsyncKeyEncryptionKey key, String keyWrapAlgorithm) { this.keyWrapper = key; this.keyWrapAlgorithm = keyWrapAlgorithm; return this; } /** * Sets the encryption parameters for this client * * @param keyResolver The key resolver used to select the correct key for decrypting existing blobs. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder keyResolver(AsyncKeyEncryptionKeyResolver keyResolver) { this.keyResolver = keyResolver; return this; } private void checkValidEncryptionParameters() { if (this.keyWrapper == null && this.keyResolver == null) { throw logger.logExceptionAsError(new IllegalArgumentException("Key and KeyResolver cannot both be null")); } if (this.keyWrapper != null && this.keyWrapAlgorithm == null) { throw logger.logExceptionAsError( new IllegalArgumentException("Key Wrap Algorithm must be specified with a Key.")); } } /** * Sets the {@link StorageSharedKeyCredential} used to authorize requests sent to the service. * * @param credential The credential to use for authenticating request. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ public EncryptedBlobClientBuilder credential(StorageSharedKeyCredential credential) { this.storageSharedKeyCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); this.tokenCredential = null; this.sasTokenCredential = null; return this; } /** * Sets the {@link TokenCredential} used to authorize requests sent to the service. * * @param credential The credential to use for authenticating request. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ public EncryptedBlobClientBuilder credential(TokenCredential credential) { this.tokenCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); this.storageSharedKeyCredential = null; this.sasTokenCredential = null; return this; } /** * Sets the SAS token used to authorize requests sent to the service. * * @param sasToken The SAS token to use for authenticating requests. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code sasToken} is {@code null}. */ public EncryptedBlobClientBuilder sasToken(String sasToken) { this.sasTokenCredential = new SasTokenCredential(Objects.requireNonNull(sasToken, "'sasToken' cannot be null.")); this.storageSharedKeyCredential = null; this.tokenCredential = null; return this; } /** * Clears the credential used to authorize the request. * * <p>This is for blobs that are publicly accessible.</p> * * @return the updated EncryptedBlobClientBuilder */ public EncryptedBlobClientBuilder setAnonymousAccess() { this.storageSharedKeyCredential = null; this.tokenCredential = null; this.sasTokenCredential = null; return this; } /** * Sets the connection string to connect to the service. * * @param connectionString Connection string of the storage account. * @return the updated EncryptedBlobClientBuilder * @throws IllegalArgumentException If {@code connectionString} is invalid. */ /** * Sets the service endpoint, additionally parses it for information (SAS token, container name, blob name) * * <p>If the endpoint is to a blob in the root container, this method will fail as it will interpret the blob name * as the container name. With only one path element, it is impossible to distinguish between a container name and a * blob in the root container, so it is assumed to be the container name as this is much more common. When working * with blobs in the root container, it is best to set the endpoint to the account url and specify the blob name * separately using the {@link EncryptedBlobClientBuilder * * @param endpoint URL of the service * @return the updated EncryptedBlobClientBuilder object * @throws IllegalArgumentException If {@code endpoint} is {@code null} or is a malformed URL. */ public EncryptedBlobClientBuilder endpoint(String endpoint) { try { URL url = new URL(endpoint); BlobUrlParts parts = BlobUrlParts.parse(url); this.accountName = parts.getAccountName(); this.endpoint = parts.getScheme() + ": this.containerName = parts.getBlobContainerName(); this.blobName = parts.getBlobName(); this.snapshot = parts.getSnapshot(); String sasToken = parts.getSasQueryParameters().encode(); if (!ImplUtils.isNullOrEmpty(sasToken)) { this.sasToken(sasToken); } } catch (MalformedURLException ex) { throw logger.logExceptionAsError( new IllegalArgumentException("The Azure Storage Blob endpoint url is malformed.")); } return this; } /** * Sets the name of the container that contains the blob. * * @param containerName Name of the container. If the value {@code null} or empty the root container, {@code $root}, * will be used. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder containerName(String containerName) { this.containerName = containerName; return this; } /** * Sets the name of the blob. * * @param blobName Name of the blob. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code blobName} is {@code null} */ public EncryptedBlobClientBuilder blobName(String blobName) { this.blobName = Objects.requireNonNull(blobName, "'blobName' cannot be null."); return this; } /** * Sets the snapshot identifier of the blob. * * @param snapshot Snapshot identifier for the blob. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder snapshot(String snapshot) { this.snapshot = snapshot; return this; } /** * Sets the {@link HttpClient} to use for sending a receiving requests to and from the service. * * @param httpClient HttpClient to use for requests. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder httpClient(HttpClient httpClient) { if (this.httpClient != null && httpClient == null) { logger.info("'httpClient' is being set to 'null' when it was previously configured."); } this.httpClient = httpClient; return this; } /** * Adds a pipeline policy to apply on each request sent. * * @param pipelinePolicy a pipeline policy * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code pipelinePolicy} is {@code null}. */ public EncryptedBlobClientBuilder addPolicy(HttpPipelinePolicy pipelinePolicy) { this.additionalPolicies.add(Objects.requireNonNull(pipelinePolicy, "'pipelinePolicy' cannot be null")); return this; } /** * Sets the {@link HttpLogOptions} for service requests. * * @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code logOptions} is {@code null}. */ public EncryptedBlobClientBuilder httpLogOptions(HttpLogOptions logOptions) { this.logOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null."); return this; } /** * Sets the configuration object used to retrieve environment configuration values during building of the client. * * @param configuration Configuration store used to retrieve environment configurations. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the request retry options for all the requests made through the client. * * @param retryOptions The options used to configure retry behavior. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code retryOptions} is {@code null}. */ public EncryptedBlobClientBuilder retryOptions(RequestRetryOptions retryOptions) { this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); return this; } /** * Sets the {@link HttpPipeline} to use for the service client. * * If {@code pipeline} is set, all other settings are ignored, aside from {@link * * @param httpPipeline HttpPipeline to use for sending service requests and receiving responses. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder pipeline(HttpPipeline httpPipeline) { if (this.httpPipeline != null && httpPipeline == null) { logger.info("HttpPipeline is being set to 'null' when it was previously configured."); } this.httpPipeline = httpPipeline; return this; } /** * Sets the {@link BlobServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version the client library will have the result of potentially moving to a newer service version. * * @param version {@link BlobServiceVersion} of the service to be used when making requests. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder serviceVersion(BlobServiceVersion version) { this.version = version; return this; } }
If account name and key not null, then get into the block. No need to check sas, right?
public static StorageAuthenticationSettings fromConnectionSettings(final ConnectionSettings settings) { final String accountName = settings.getSettingValue(Constants.ConnectionStringConstants.ACCOUNT_NAME_NAME); final String accountKey = settings.getSettingValue(Constants.ConnectionStringConstants.ACCOUNT_KEY_NAME); final String sasSignature = settings.getSettingValue(Constants.ConnectionStringConstants.SHARED_ACCESS_SIGNATURE_NAME); if (accountName != null && accountKey != null && sasSignature == null) { return new StorageAuthenticationSettings(new Account(accountName, accountKey)); } if (accountKey == null && sasSignature != null) { return new StorageAuthenticationSettings(sasSignature); } return new StorageAuthenticationSettings(); }
if (accountName != null && accountKey != null && sasSignature == null) {
public static StorageAuthenticationSettings fromConnectionSettings(final ConnectionSettings settings) { final String accountName = settings.getSettingValue(Constants.ConnectionStringConstants.ACCOUNT_NAME); final String accountKey = settings.getSettingValue(Constants.ConnectionStringConstants.ACCOUNT_KEY_NAME); final String sasSignature = settings.getSettingValue(Constants.ConnectionStringConstants.SHARED_ACCESS_SIGNATURE_NAME); if (accountName != null && accountKey != null && sasSignature == null) { return new StorageAuthenticationSettings(new Account(accountName, accountKey)); } if (accountKey == null && sasSignature != null) { return new StorageAuthenticationSettings(sasSignature); } return new StorageAuthenticationSettings(); }
class StorageAuthenticationSettings { private final Type type; private final String sasToken; private final Account account; /** * @return the settings type (None, Account Name and Key, Sas token) */ public Type getType() { return this.type; } /** * @return the sas token */ public String getSasToken() { return this.sasToken; } /** * @return the account instance containing account name and key */ public Account getAccount() { return this.account; } /** * Creates {@link StorageAuthenticationSettings} from the given connection settings. * * @param settings the connection settings. * @return the StorageAuthenticationSettings. */ /** * @return get a {@link StorageAuthenticationSettings} for emulator */ public static StorageAuthenticationSettings forEmulator() { return new StorageAuthenticationSettings(new Account(Constants.ConnectionStringConstants.EMULATOR_ACCOUNT_NAME, Constants.ConnectionStringConstants.EMULATOR_ACCOUNT_KEY)); } /** * Creates default {@link StorageAuthenticationSettings} indicating absence of authentication * setting. */ private StorageAuthenticationSettings() { this.type = Type.NONE; this.account = null; this.sasToken = null; } /** * Creates {@link StorageAuthenticationSettings} indicating Sas token based authentication * settings. * * @param sasToken the sas token */ private StorageAuthenticationSettings(String sasToken) { this.type = Type.SAS_TOKEN; this.sasToken = Objects.requireNonNull(sasToken); this.account = null; } /** * Creates {@link StorageAuthenticationSettings} indicating account name and key based * authentication settings. * * @param account the account instance holding account name and key */ private StorageAuthenticationSettings(Account account) { this.type = Type.ACCOUNT_NAME_KEY; this.account = Objects.requireNonNull(account); this.sasToken = null; } /** * Authentication settings type. */ public enum Type { /** * No auth. */ NONE(), /** * Auth based on storage account name and key. */ ACCOUNT_NAME_KEY(), /** * Auth based on SAS token. */ SAS_TOKEN(), } /** * Type to hold storage account name and access key. */ public static final class Account { private String name; private String accessKey; /** * Creates Account. * * @param name the storage account name * @param accessKey the storage access key */ private Account(String name, String accessKey) { this.name = Objects.requireNonNull(name); this.accessKey = Objects.requireNonNull(accessKey); } /** * @return the storage account name */ public String getName() { return this.name; } /** * @return the storage account access key */ public String getAccessKey() { return this.accessKey; } } }
class StorageAuthenticationSettings { private final Type type; private final String sasToken; private final Account account; /** * @return the settings type (None, Account Name and Key, Sas token) */ public Type getType() { return this.type; } /** * @return the sas token */ public String getSasToken() { return this.sasToken; } /** * @return the account instance containing account name and key */ public Account getAccount() { return this.account; } /** * Creates {@link StorageAuthenticationSettings} from the given connection settings. * * @param settings the connection settings. * @return the StorageAuthenticationSettings. */ /** * @return get a {@link StorageAuthenticationSettings} for emulator */ public static StorageAuthenticationSettings forEmulator() { return new StorageAuthenticationSettings(new Account(Constants.ConnectionStringConstants.EMULATOR_ACCOUNT_NAME, Constants.ConnectionStringConstants.EMULATOR_ACCOUNT_KEY)); } /** * Creates default {@link StorageAuthenticationSettings} indicating absence of authentication * setting. */ private StorageAuthenticationSettings() { this.type = Type.NONE; this.account = null; this.sasToken = null; } /** * Creates {@link StorageAuthenticationSettings} indicating Sas token based authentication * settings. * * @param sasToken the sas token */ private StorageAuthenticationSettings(String sasToken) { this.type = Type.SAS_TOKEN; this.sasToken = Objects.requireNonNull(sasToken); this.account = null; } /** * Creates {@link StorageAuthenticationSettings} indicating account name and key based * authentication settings. * * @param account the account instance holding account name and key */ private StorageAuthenticationSettings(Account account) { this.type = Type.ACCOUNT_NAME_KEY; this.account = Objects.requireNonNull(account); this.sasToken = null; } /** * Authentication settings type. */ public enum Type { /** * No auth. */ NONE(), /** * Auth based on storage account name and key. */ ACCOUNT_NAME_KEY(), /** * Auth based on SAS token. */ SAS_TOKEN(), } /** * Type to hold storage account name and access key. */ public static final class Account { private String name; private String accessKey; /** * Creates Account. * * @param name the storage account name * @param accessKey the storage access key */ private Account(String name, String accessKey) { this.name = Objects.requireNonNull(name); this.accessKey = Objects.requireNonNull(accessKey); } /** * @return the storage account name */ public String getName() { return this.name; } /** * @return the storage account access key */ public String getAccessKey() { return this.accessKey; } } }
yes, I'm about to update the PR to use `StorageConnectionString` type here as well that also removes parseConnectionString from Utility.
public EncryptedBlobClientBuilder connectionString(String connectionString) { Objects.requireNonNull(connectionString, "'connectionString' cannot be null."); Map<String, String> connectionStringPieces = Utility.parseConnectionString(connectionString); String accountName = connectionStringPieces.get(Constants.ConnectionStringConstants.ACCOUNT_NAME_NAME); String accountKey = connectionStringPieces.get(Constants.ConnectionStringConstants.ACCOUNT_KEY_NAME); if (ImplUtils.isNullOrEmpty(accountName) || ImplUtils.isNullOrEmpty(accountKey)) { throw logger.logExceptionAsError( new IllegalArgumentException("'connectionString' must contain 'AccountName' and 'AccountKey'.")); } String endpointProtocol = connectionStringPieces.get(Constants.ConnectionStringConstants.DEFAULT_ENDPOINTS_PROTOCOL_NAME); String endpointSuffix = connectionStringPieces.get(Constants.ConnectionStringConstants.ENDPOINT_SUFFIX_NAME); if (!ImplUtils.isNullOrEmpty(endpointProtocol) && !ImplUtils.isNullOrEmpty(endpointSuffix)) { endpoint(String.format("%s: endpointSuffix.replaceFirst("^\\.", ""))); } this.accountName = accountName; return credential(new StorageSharedKeyCredential(accountName, accountKey)); }
String endpointProtocol
public EncryptedBlobClientBuilder connectionString(String connectionString) { StorageConnectionString storageConnectionString = StorageConnectionString.create(connectionString, logger); StorageEndpoint endpoint = storageConnectionString.getBlobEndpoint(); if (endpoint == null || endpoint.getPrimaryUri() == null) { throw logger .logExceptionAsError(new IllegalArgumentException( "connectionString missing required settings to derive blob service endpoint.")); } this.endpoint(endpoint.getPrimaryUri()); if (storageConnectionString.getAccountName() != null) { this.accountName = storageConnectionString.getAccountName(); } StorageAuthenticationSettings authSettings = storageConnectionString.getStorageAuthSettings(); if (authSettings.getType() == StorageAuthenticationSettings.Type.ACCOUNT_NAME_KEY) { this.credential(new StorageSharedKeyCredential(authSettings.getAccount().getName(), authSettings.getAccount().getAccessKey())); } else if (authSettings.getType() == StorageAuthenticationSettings.Type.SAS_TOKEN) { this.sasToken(authSettings.getSasToken()); } return this; }
class EncryptedBlobClientBuilder { private final ClientLogger logger = new ClientLogger(EncryptedBlobClientBuilder.class); private String endpoint; private String accountName; private String containerName; private String blobName; private String snapshot; private StorageSharedKeyCredential storageSharedKeyCredential; private TokenCredential tokenCredential; private SasTokenCredential sasTokenCredential; private HttpClient httpClient; private final List<HttpPipelinePolicy> additionalPolicies = new ArrayList<>(); private HttpLogOptions logOptions = new HttpLogOptions(); private RequestRetryOptions retryOptions = new RequestRetryOptions(); private HttpPipeline httpPipeline; private Configuration configuration; private AsyncKeyEncryptionKey keyWrapper; private AsyncKeyEncryptionKeyResolver keyResolver; private String keyWrapAlgorithm; private BlobServiceVersion version; /** * Creates a new instance of the EncryptedBlobClientBuilder */ public EncryptedBlobClientBuilder() { } private AzureBlobStorageImpl constructImpl() { Objects.requireNonNull(blobName, "'blobName' cannot be null."); checkValidEncryptionParameters(); /* Implicit and explicit root container access are functionally equivalent, but explicit references are easier to read and debug. */ if (ImplUtils.isNullOrEmpty(containerName)) { containerName = BlobContainerAsyncClient.ROOT_CONTAINER_NAME; } BlobServiceVersion serviceVersion = version != null ? version : BlobServiceVersion.getLatest(); if (httpPipeline != null) { return new AzureBlobStorageBuilder() .url(String.format("%s/%s/%s", endpoint, containerName, blobName)) .pipeline(httpPipeline) .version(serviceVersion.getVersion()) .build(); } String userAgentName = BlobCryptographyConfiguration.NAME; String userAgentVersion = BlobCryptographyConfiguration.VERSION; Configuration userAgentConfiguration = (configuration == null) ? Configuration.NONE : configuration; List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new BlobDecryptionPolicy(keyWrapper, keyResolver)); policies.add(new UserAgentPolicy(userAgentName, userAgentVersion, userAgentConfiguration, serviceVersion)); policies.add(new RequestIdPolicy()); policies.add(new AddDatePolicy()); if (storageSharedKeyCredential != null) { policies.add(new StorageSharedKeyCredentialPolicy(storageSharedKeyCredential)); } else if (tokenCredential != null) { policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, String.format("%s/.default", endpoint))); } else if (sasTokenCredential != null) { policies.add(new SasTokenCredentialPolicy(sasTokenCredential)); } HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(new RequestRetryPolicy(retryOptions)); policies.addAll(additionalPolicies); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new ResponseValidationPolicyBuilder() .addOptionalEcho(Constants.HeaderConstants.CLIENT_REQUEST_ID) .addOptionalEcho(Constants.HeaderConstants.ENCRYPTION_KEY_SHA256) .build()); policies.add(new HttpLoggingPolicy(logOptions)); policies.add(new ScrubEtagPolicy()); HttpPipeline pipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); return new AzureBlobStorageBuilder() .url(String.format("%s/%s/%s", endpoint, containerName, blobName)) .pipeline(pipeline) .version(serviceVersion.getVersion()) .build(); } /** * Creates a {@link EncryptedBlobClient} based on options set in the Builder. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobAsyncClient} * * @return a {@link EncryptedBlobClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. */ public EncryptedBlobClient buildEncryptedBlobClient() { return new EncryptedBlobClient(buildEncryptedBlobAsyncClient()); } /** * Creates a {@link EncryptedBlobAsyncClient} based on options set in the Builder. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobClient} * * @return a {@link EncryptedBlobAsyncClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. */ public EncryptedBlobAsyncClient buildEncryptedBlobAsyncClient() { return new EncryptedBlobAsyncClient(constructImpl(), snapshot, accountName, keyWrapper, keyWrapAlgorithm); } protected void addOptionalEncryptionPolicy(List<HttpPipelinePolicy> policies) { BlobDecryptionPolicy decryptionPolicy = new BlobDecryptionPolicy(keyWrapper, keyResolver); policies.add(decryptionPolicy); } /** * Sets the encryption key parameters for the client * * @param key An object of type {@link AsyncKeyEncryptionKey} that is used to wrap/unwrap the content encryption * key * @param keyWrapAlgorithm The {@link String} used to wrap the key. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder key(AsyncKeyEncryptionKey key, String keyWrapAlgorithm) { this.keyWrapper = key; this.keyWrapAlgorithm = keyWrapAlgorithm; return this; } /** * Sets the encryption parameters for this client * * @param keyResolver The key resolver used to select the correct key for decrypting existing blobs. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder keyResolver(AsyncKeyEncryptionKeyResolver keyResolver) { this.keyResolver = keyResolver; return this; } private void checkValidEncryptionParameters() { if (this.keyWrapper == null && this.keyResolver == null) { throw logger.logExceptionAsError(new IllegalArgumentException("Key and KeyResolver cannot both be null")); } if (this.keyWrapper != null && this.keyWrapAlgorithm == null) { throw logger.logExceptionAsError( new IllegalArgumentException("Key Wrap Algorithm must be specified with a Key.")); } } /** * Sets the {@link StorageSharedKeyCredential} used to authorize requests sent to the service. * * @param credential The credential to use for authenticating request. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ public EncryptedBlobClientBuilder credential(StorageSharedKeyCredential credential) { this.storageSharedKeyCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); this.tokenCredential = null; this.sasTokenCredential = null; return this; } /** * Sets the {@link TokenCredential} used to authorize requests sent to the service. * * @param credential The credential to use for authenticating request. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ public EncryptedBlobClientBuilder credential(TokenCredential credential) { this.tokenCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); this.storageSharedKeyCredential = null; this.sasTokenCredential = null; return this; } /** * Sets the SAS token used to authorize requests sent to the service. * * @param sasToken The SAS token to use for authenticating requests. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code sasToken} is {@code null}. */ public EncryptedBlobClientBuilder sasToken(String sasToken) { this.sasTokenCredential = new SasTokenCredential(Objects.requireNonNull(sasToken, "'sasToken' cannot be null.")); this.storageSharedKeyCredential = null; this.tokenCredential = null; return this; } /** * Clears the credential used to authorize the request. * * <p>This is for blobs that are publicly accessible.</p> * * @return the updated EncryptedBlobClientBuilder */ public EncryptedBlobClientBuilder setAnonymousAccess() { this.storageSharedKeyCredential = null; this.tokenCredential = null; this.sasTokenCredential = null; return this; } /** * Constructs a {@link StorageSharedKeyCredential} used to authorize requests sent to the service. Additionally, * if the connection string contains `DefaultEndpointsProtocol` and `EndpointSuffix` it will set the {@link * * * @param connectionString Connection string of the storage account. * @return the updated EncryptedBlobClientBuilder * @throws IllegalArgumentException If {@code connectionString} doesn't contain `AccountName` or `AccountKey`. * @throws NullPointerException If {@code connectionString} is {@code null}. */ /** * Sets the service endpoint, additionally parses it for information (SAS token, container name, blob name) * * <p>If the endpoint is to a blob in the root container, this method will fail as it will interpret the blob name * as the container name. With only one path element, it is impossible to distinguish between a container name and a * blob in the root container, so it is assumed to be the container name as this is much more common. When working * with blobs in the root container, it is best to set the endpoint to the account url and specify the blob name * separately using the {@link EncryptedBlobClientBuilder * * @param endpoint URL of the service * @return the updated EncryptedBlobClientBuilder object * @throws IllegalArgumentException If {@code endpoint} is {@code null} or is a malformed URL. */ public EncryptedBlobClientBuilder endpoint(String endpoint) { try { URL url = new URL(endpoint); BlobUrlParts parts = BlobUrlParts.parse(url); this.accountName = parts.getAccountName(); this.endpoint = parts.getScheme() + ": this.containerName = parts.getBlobContainerName(); this.blobName = parts.getBlobName(); this.snapshot = parts.getSnapshot(); String sasToken = parts.getSasQueryParameters().encode(); if (!ImplUtils.isNullOrEmpty(sasToken)) { this.sasToken(sasToken); } } catch (MalformedURLException ex) { throw logger.logExceptionAsError( new IllegalArgumentException("The Azure Storage Blob endpoint url is malformed.")); } return this; } /** * Sets the name of the container that contains the blob. * * @param containerName Name of the container. If the value {@code null} or empty the root container, {@code $root}, * will be used. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder containerName(String containerName) { this.containerName = containerName; return this; } /** * Sets the name of the blob. * * @param blobName Name of the blob. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code blobName} is {@code null} */ public EncryptedBlobClientBuilder blobName(String blobName) { this.blobName = Objects.requireNonNull(blobName, "'blobName' cannot be null."); return this; } /** * Sets the snapshot identifier of the blob. * * @param snapshot Snapshot identifier for the blob. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder snapshot(String snapshot) { this.snapshot = snapshot; return this; } /** * Sets the {@link HttpClient} to use for sending a receiving requests to and from the service. * * @param httpClient HttpClient to use for requests. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder httpClient(HttpClient httpClient) { if (this.httpClient != null && httpClient == null) { logger.info("'httpClient' is being set to 'null' when it was previously configured."); } this.httpClient = httpClient; return this; } /** * Adds a pipeline policy to apply on each request sent. * * @param pipelinePolicy a pipeline policy * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code pipelinePolicy} is {@code null}. */ public EncryptedBlobClientBuilder addPolicy(HttpPipelinePolicy pipelinePolicy) { this.additionalPolicies.add(Objects.requireNonNull(pipelinePolicy, "'pipelinePolicy' cannot be null")); return this; } /** * Sets the {@link HttpLogOptions} for service requests. * * @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code logOptions} is {@code null}. */ public EncryptedBlobClientBuilder httpLogOptions(HttpLogOptions logOptions) { this.logOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null."); return this; } /** * Sets the configuration object used to retrieve environment configuration values during building of the client. * * @param configuration Configuration store used to retrieve environment configurations. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the request retry options for all the requests made through the client. * * @param retryOptions The options used to configure retry behavior. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code retryOptions} is {@code null}. */ public EncryptedBlobClientBuilder retryOptions(RequestRetryOptions retryOptions) { this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); return this; } /** * Sets the {@link HttpPipeline} to use for the service client. * * If {@code pipeline} is set, all other settings are ignored, aside from {@link * * @param httpPipeline HttpPipeline to use for sending service requests and receiving responses. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder pipeline(HttpPipeline httpPipeline) { if (this.httpPipeline != null && httpPipeline == null) { logger.info("HttpPipeline is being set to 'null' when it was previously configured."); } this.httpPipeline = httpPipeline; return this; } /** * Sets the {@link BlobServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version the client library will have the result of potentially moving to a newer service version. * * @param version {@link BlobServiceVersion} of the service to be used when making requests. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder serviceVersion(BlobServiceVersion version) { this.version = version; return this; } }
class EncryptedBlobClientBuilder { private final ClientLogger logger = new ClientLogger(EncryptedBlobClientBuilder.class); private String endpoint; private String accountName; private String containerName; private String blobName; private String snapshot; private StorageSharedKeyCredential storageSharedKeyCredential; private TokenCredential tokenCredential; private SasTokenCredential sasTokenCredential; private HttpClient httpClient; private final List<HttpPipelinePolicy> additionalPolicies = new ArrayList<>(); private HttpLogOptions logOptions = new HttpLogOptions(); private RequestRetryOptions retryOptions = new RequestRetryOptions(); private HttpPipeline httpPipeline; private Configuration configuration; private AsyncKeyEncryptionKey keyWrapper; private AsyncKeyEncryptionKeyResolver keyResolver; private String keyWrapAlgorithm; private BlobServiceVersion version; /** * Creates a new instance of the EncryptedBlobClientBuilder */ public EncryptedBlobClientBuilder() { } private AzureBlobStorageImpl constructImpl() { Objects.requireNonNull(blobName, "'blobName' cannot be null."); checkValidEncryptionParameters(); /* Implicit and explicit root container access are functionally equivalent, but explicit references are easier to read and debug. */ if (ImplUtils.isNullOrEmpty(containerName)) { containerName = BlobContainerAsyncClient.ROOT_CONTAINER_NAME; } BlobServiceVersion serviceVersion = version != null ? version : BlobServiceVersion.getLatest(); if (httpPipeline != null) { return new AzureBlobStorageBuilder() .url(String.format("%s/%s/%s", endpoint, containerName, blobName)) .pipeline(httpPipeline) .version(serviceVersion.getVersion()) .build(); } String userAgentName = BlobCryptographyConfiguration.NAME; String userAgentVersion = BlobCryptographyConfiguration.VERSION; Configuration userAgentConfiguration = (configuration == null) ? Configuration.NONE : configuration; List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new BlobDecryptionPolicy(keyWrapper, keyResolver)); policies.add(new UserAgentPolicy(userAgentName, userAgentVersion, userAgentConfiguration, serviceVersion)); policies.add(new RequestIdPolicy()); policies.add(new AddDatePolicy()); if (storageSharedKeyCredential != null) { policies.add(new StorageSharedKeyCredentialPolicy(storageSharedKeyCredential)); } else if (tokenCredential != null) { policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, String.format("%s/.default", endpoint))); } else if (sasTokenCredential != null) { policies.add(new SasTokenCredentialPolicy(sasTokenCredential)); } HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(new RequestRetryPolicy(retryOptions)); policies.addAll(additionalPolicies); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new ResponseValidationPolicyBuilder() .addOptionalEcho(Constants.HeaderConstants.CLIENT_REQUEST_ID) .addOptionalEcho(Constants.HeaderConstants.ENCRYPTION_KEY_SHA256) .build()); policies.add(new HttpLoggingPolicy(logOptions)); policies.add(new ScrubEtagPolicy()); HttpPipeline pipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); return new AzureBlobStorageBuilder() .url(String.format("%s/%s/%s", endpoint, containerName, blobName)) .pipeline(pipeline) .version(serviceVersion.getVersion()) .build(); } /** * Creates a {@link EncryptedBlobClient} based on options set in the Builder. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobAsyncClient} * * @return a {@link EncryptedBlobClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. */ public EncryptedBlobClient buildEncryptedBlobClient() { return new EncryptedBlobClient(buildEncryptedBlobAsyncClient()); } /** * Creates a {@link EncryptedBlobAsyncClient} based on options set in the Builder. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobClient} * * @return a {@link EncryptedBlobAsyncClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. */ public EncryptedBlobAsyncClient buildEncryptedBlobAsyncClient() { return new EncryptedBlobAsyncClient(constructImpl(), snapshot, accountName, keyWrapper, keyWrapAlgorithm); } protected void addOptionalEncryptionPolicy(List<HttpPipelinePolicy> policies) { BlobDecryptionPolicy decryptionPolicy = new BlobDecryptionPolicy(keyWrapper, keyResolver); policies.add(decryptionPolicy); } /** * Sets the encryption key parameters for the client * * @param key An object of type {@link AsyncKeyEncryptionKey} that is used to wrap/unwrap the content encryption * key * @param keyWrapAlgorithm The {@link String} used to wrap the key. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder key(AsyncKeyEncryptionKey key, String keyWrapAlgorithm) { this.keyWrapper = key; this.keyWrapAlgorithm = keyWrapAlgorithm; return this; } /** * Sets the encryption parameters for this client * * @param keyResolver The key resolver used to select the correct key for decrypting existing blobs. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder keyResolver(AsyncKeyEncryptionKeyResolver keyResolver) { this.keyResolver = keyResolver; return this; } private void checkValidEncryptionParameters() { if (this.keyWrapper == null && this.keyResolver == null) { throw logger.logExceptionAsError(new IllegalArgumentException("Key and KeyResolver cannot both be null")); } if (this.keyWrapper != null && this.keyWrapAlgorithm == null) { throw logger.logExceptionAsError( new IllegalArgumentException("Key Wrap Algorithm must be specified with a Key.")); } } /** * Sets the {@link StorageSharedKeyCredential} used to authorize requests sent to the service. * * @param credential The credential to use for authenticating request. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ public EncryptedBlobClientBuilder credential(StorageSharedKeyCredential credential) { this.storageSharedKeyCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); this.tokenCredential = null; this.sasTokenCredential = null; return this; } /** * Sets the {@link TokenCredential} used to authorize requests sent to the service. * * @param credential The credential to use for authenticating request. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ public EncryptedBlobClientBuilder credential(TokenCredential credential) { this.tokenCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); this.storageSharedKeyCredential = null; this.sasTokenCredential = null; return this; } /** * Sets the SAS token used to authorize requests sent to the service. * * @param sasToken The SAS token to use for authenticating requests. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code sasToken} is {@code null}. */ public EncryptedBlobClientBuilder sasToken(String sasToken) { this.sasTokenCredential = new SasTokenCredential(Objects.requireNonNull(sasToken, "'sasToken' cannot be null.")); this.storageSharedKeyCredential = null; this.tokenCredential = null; return this; } /** * Clears the credential used to authorize the request. * * <p>This is for blobs that are publicly accessible.</p> * * @return the updated EncryptedBlobClientBuilder */ public EncryptedBlobClientBuilder setAnonymousAccess() { this.storageSharedKeyCredential = null; this.tokenCredential = null; this.sasTokenCredential = null; return this; } /** * Sets the connection string to connect to the service. * * @param connectionString Connection string of the storage account. * @return the updated EncryptedBlobClientBuilder * @throws IllegalArgumentException If {@code connectionString} is invalid. */ /** * Sets the service endpoint, additionally parses it for information (SAS token, container name, blob name) * * <p>If the endpoint is to a blob in the root container, this method will fail as it will interpret the blob name * as the container name. With only one path element, it is impossible to distinguish between a container name and a * blob in the root container, so it is assumed to be the container name as this is much more common. When working * with blobs in the root container, it is best to set the endpoint to the account url and specify the blob name * separately using the {@link EncryptedBlobClientBuilder * * @param endpoint URL of the service * @return the updated EncryptedBlobClientBuilder object * @throws IllegalArgumentException If {@code endpoint} is {@code null} or is a malformed URL. */ public EncryptedBlobClientBuilder endpoint(String endpoint) { try { URL url = new URL(endpoint); BlobUrlParts parts = BlobUrlParts.parse(url); this.accountName = parts.getAccountName(); this.endpoint = parts.getScheme() + ": this.containerName = parts.getBlobContainerName(); this.blobName = parts.getBlobName(); this.snapshot = parts.getSnapshot(); String sasToken = parts.getSasQueryParameters().encode(); if (!ImplUtils.isNullOrEmpty(sasToken)) { this.sasToken(sasToken); } } catch (MalformedURLException ex) { throw logger.logExceptionAsError( new IllegalArgumentException("The Azure Storage Blob endpoint url is malformed.")); } return this; } /** * Sets the name of the container that contains the blob. * * @param containerName Name of the container. If the value {@code null} or empty the root container, {@code $root}, * will be used. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder containerName(String containerName) { this.containerName = containerName; return this; } /** * Sets the name of the blob. * * @param blobName Name of the blob. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code blobName} is {@code null} */ public EncryptedBlobClientBuilder blobName(String blobName) { this.blobName = Objects.requireNonNull(blobName, "'blobName' cannot be null."); return this; } /** * Sets the snapshot identifier of the blob. * * @param snapshot Snapshot identifier for the blob. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder snapshot(String snapshot) { this.snapshot = snapshot; return this; } /** * Sets the {@link HttpClient} to use for sending a receiving requests to and from the service. * * @param httpClient HttpClient to use for requests. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder httpClient(HttpClient httpClient) { if (this.httpClient != null && httpClient == null) { logger.info("'httpClient' is being set to 'null' when it was previously configured."); } this.httpClient = httpClient; return this; } /** * Adds a pipeline policy to apply on each request sent. * * @param pipelinePolicy a pipeline policy * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code pipelinePolicy} is {@code null}. */ public EncryptedBlobClientBuilder addPolicy(HttpPipelinePolicy pipelinePolicy) { this.additionalPolicies.add(Objects.requireNonNull(pipelinePolicy, "'pipelinePolicy' cannot be null")); return this; } /** * Sets the {@link HttpLogOptions} for service requests. * * @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code logOptions} is {@code null}. */ public EncryptedBlobClientBuilder httpLogOptions(HttpLogOptions logOptions) { this.logOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null."); return this; } /** * Sets the configuration object used to retrieve environment configuration values during building of the client. * * @param configuration Configuration store used to retrieve environment configurations. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the request retry options for all the requests made through the client. * * @param retryOptions The options used to configure retry behavior. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code retryOptions} is {@code null}. */ public EncryptedBlobClientBuilder retryOptions(RequestRetryOptions retryOptions) { this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); return this; } /** * Sets the {@link HttpPipeline} to use for the service client. * * If {@code pipeline} is set, all other settings are ignored, aside from {@link * * @param httpPipeline HttpPipeline to use for sending service requests and receiving responses. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder pipeline(HttpPipeline httpPipeline) { if (this.httpPipeline != null && httpPipeline == null) { logger.info("HttpPipeline is being set to 'null' when it was previously configured."); } this.httpPipeline = httpPipeline; return this; } /** * Sets the {@link BlobServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version the client library will have the result of potentially moving to a newer service version. * * @param version {@link BlobServiceVersion} of the service to be used when making requests. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder serviceVersion(BlobServiceVersion version) { this.version = version; return this; } }
Actually as per validation rules - {account name:key} and {Sas} are mutually exclusive, we do that validation already on connection string, here it is just reinforcing for readability.
public static StorageAuthenticationSettings fromConnectionSettings(final ConnectionSettings settings) { final String accountName = settings.getSettingValue(Constants.ConnectionStringConstants.ACCOUNT_NAME_NAME); final String accountKey = settings.getSettingValue(Constants.ConnectionStringConstants.ACCOUNT_KEY_NAME); final String sasSignature = settings.getSettingValue(Constants.ConnectionStringConstants.SHARED_ACCESS_SIGNATURE_NAME); if (accountName != null && accountKey != null && sasSignature == null) { return new StorageAuthenticationSettings(new Account(accountName, accountKey)); } if (accountKey == null && sasSignature != null) { return new StorageAuthenticationSettings(sasSignature); } return new StorageAuthenticationSettings(); }
if (accountName != null && accountKey != null && sasSignature == null) {
public static StorageAuthenticationSettings fromConnectionSettings(final ConnectionSettings settings) { final String accountName = settings.getSettingValue(Constants.ConnectionStringConstants.ACCOUNT_NAME); final String accountKey = settings.getSettingValue(Constants.ConnectionStringConstants.ACCOUNT_KEY_NAME); final String sasSignature = settings.getSettingValue(Constants.ConnectionStringConstants.SHARED_ACCESS_SIGNATURE_NAME); if (accountName != null && accountKey != null && sasSignature == null) { return new StorageAuthenticationSettings(new Account(accountName, accountKey)); } if (accountKey == null && sasSignature != null) { return new StorageAuthenticationSettings(sasSignature); } return new StorageAuthenticationSettings(); }
class StorageAuthenticationSettings { private final Type type; private final String sasToken; private final Account account; /** * @return the settings type (None, Account Name and Key, Sas token) */ public Type getType() { return this.type; } /** * @return the sas token */ public String getSasToken() { return this.sasToken; } /** * @return the account instance containing account name and key */ public Account getAccount() { return this.account; } /** * Creates {@link StorageAuthenticationSettings} from the given connection settings. * * @param settings the connection settings. * @return the StorageAuthenticationSettings. */ /** * @return get a {@link StorageAuthenticationSettings} for emulator */ public static StorageAuthenticationSettings forEmulator() { return new StorageAuthenticationSettings(new Account(Constants.ConnectionStringConstants.EMULATOR_ACCOUNT_NAME, Constants.ConnectionStringConstants.EMULATOR_ACCOUNT_KEY)); } /** * Creates default {@link StorageAuthenticationSettings} indicating absence of authentication * setting. */ private StorageAuthenticationSettings() { this.type = Type.NONE; this.account = null; this.sasToken = null; } /** * Creates {@link StorageAuthenticationSettings} indicating Sas token based authentication * settings. * * @param sasToken the sas token */ private StorageAuthenticationSettings(String sasToken) { this.type = Type.SAS_TOKEN; this.sasToken = Objects.requireNonNull(sasToken); this.account = null; } /** * Creates {@link StorageAuthenticationSettings} indicating account name and key based * authentication settings. * * @param account the account instance holding account name and key */ private StorageAuthenticationSettings(Account account) { this.type = Type.ACCOUNT_NAME_KEY; this.account = Objects.requireNonNull(account); this.sasToken = null; } /** * Authentication settings type. */ public enum Type { /** * No auth. */ NONE(), /** * Auth based on storage account name and key. */ ACCOUNT_NAME_KEY(), /** * Auth based on SAS token. */ SAS_TOKEN(), } /** * Type to hold storage account name and access key. */ public static final class Account { private String name; private String accessKey; /** * Creates Account. * * @param name the storage account name * @param accessKey the storage access key */ private Account(String name, String accessKey) { this.name = Objects.requireNonNull(name); this.accessKey = Objects.requireNonNull(accessKey); } /** * @return the storage account name */ public String getName() { return this.name; } /** * @return the storage account access key */ public String getAccessKey() { return this.accessKey; } } }
class StorageAuthenticationSettings { private final Type type; private final String sasToken; private final Account account; /** * @return the settings type (None, Account Name and Key, Sas token) */ public Type getType() { return this.type; } /** * @return the sas token */ public String getSasToken() { return this.sasToken; } /** * @return the account instance containing account name and key */ public Account getAccount() { return this.account; } /** * Creates {@link StorageAuthenticationSettings} from the given connection settings. * * @param settings the connection settings. * @return the StorageAuthenticationSettings. */ /** * @return get a {@link StorageAuthenticationSettings} for emulator */ public static StorageAuthenticationSettings forEmulator() { return new StorageAuthenticationSettings(new Account(Constants.ConnectionStringConstants.EMULATOR_ACCOUNT_NAME, Constants.ConnectionStringConstants.EMULATOR_ACCOUNT_KEY)); } /** * Creates default {@link StorageAuthenticationSettings} indicating absence of authentication * setting. */ private StorageAuthenticationSettings() { this.type = Type.NONE; this.account = null; this.sasToken = null; } /** * Creates {@link StorageAuthenticationSettings} indicating Sas token based authentication * settings. * * @param sasToken the sas token */ private StorageAuthenticationSettings(String sasToken) { this.type = Type.SAS_TOKEN; this.sasToken = Objects.requireNonNull(sasToken); this.account = null; } /** * Creates {@link StorageAuthenticationSettings} indicating account name and key based * authentication settings. * * @param account the account instance holding account name and key */ private StorageAuthenticationSettings(Account account) { this.type = Type.ACCOUNT_NAME_KEY; this.account = Objects.requireNonNull(account); this.sasToken = null; } /** * Authentication settings type. */ public enum Type { /** * No auth. */ NONE(), /** * Auth based on storage account name and key. */ ACCOUNT_NAME_KEY(), /** * Auth based on SAS token. */ SAS_TOKEN(), } /** * Type to hold storage account name and access key. */ public static final class Account { private String name; private String accessKey; /** * Creates Account. * * @param name the storage account name * @param accessKey the storage access key */ private Account(String name, String accessKey) { this.name = Objects.requireNonNull(name); this.accessKey = Objects.requireNonNull(accessKey); } /** * @return the storage account name */ public String getName() { return this.name; } /** * @return the storage account access key */ public String getAccessKey() { return this.accessKey; } } }
agree, will name it to `ACCOUNT_NAME`
public EncryptedBlobClientBuilder connectionString(String connectionString) { Objects.requireNonNull(connectionString, "'connectionString' cannot be null."); Map<String, String> connectionStringPieces = Utility.parseConnectionString(connectionString); String accountName = connectionStringPieces.get(Constants.ConnectionStringConstants.ACCOUNT_NAME_NAME); String accountKey = connectionStringPieces.get(Constants.ConnectionStringConstants.ACCOUNT_KEY_NAME); if (ImplUtils.isNullOrEmpty(accountName) || ImplUtils.isNullOrEmpty(accountKey)) { throw logger.logExceptionAsError( new IllegalArgumentException("'connectionString' must contain 'AccountName' and 'AccountKey'.")); } String endpointProtocol = connectionStringPieces.get(Constants.ConnectionStringConstants.DEFAULT_ENDPOINTS_PROTOCOL_NAME); String endpointSuffix = connectionStringPieces.get(Constants.ConnectionStringConstants.ENDPOINT_SUFFIX_NAME); if (!ImplUtils.isNullOrEmpty(endpointProtocol) && !ImplUtils.isNullOrEmpty(endpointSuffix)) { endpoint(String.format("%s: endpointSuffix.replaceFirst("^\\.", ""))); } this.accountName = accountName; return credential(new StorageSharedKeyCredential(accountName, accountKey)); }
String accountName = connectionStringPieces.get(Constants.ConnectionStringConstants.ACCOUNT_NAME_NAME);
public EncryptedBlobClientBuilder connectionString(String connectionString) { StorageConnectionString storageConnectionString = StorageConnectionString.create(connectionString, logger); StorageEndpoint endpoint = storageConnectionString.getBlobEndpoint(); if (endpoint == null || endpoint.getPrimaryUri() == null) { throw logger .logExceptionAsError(new IllegalArgumentException( "connectionString missing required settings to derive blob service endpoint.")); } this.endpoint(endpoint.getPrimaryUri()); if (storageConnectionString.getAccountName() != null) { this.accountName = storageConnectionString.getAccountName(); } StorageAuthenticationSettings authSettings = storageConnectionString.getStorageAuthSettings(); if (authSettings.getType() == StorageAuthenticationSettings.Type.ACCOUNT_NAME_KEY) { this.credential(new StorageSharedKeyCredential(authSettings.getAccount().getName(), authSettings.getAccount().getAccessKey())); } else if (authSettings.getType() == StorageAuthenticationSettings.Type.SAS_TOKEN) { this.sasToken(authSettings.getSasToken()); } return this; }
class EncryptedBlobClientBuilder { private final ClientLogger logger = new ClientLogger(EncryptedBlobClientBuilder.class); private String endpoint; private String accountName; private String containerName; private String blobName; private String snapshot; private StorageSharedKeyCredential storageSharedKeyCredential; private TokenCredential tokenCredential; private SasTokenCredential sasTokenCredential; private HttpClient httpClient; private final List<HttpPipelinePolicy> additionalPolicies = new ArrayList<>(); private HttpLogOptions logOptions = new HttpLogOptions(); private RequestRetryOptions retryOptions = new RequestRetryOptions(); private HttpPipeline httpPipeline; private Configuration configuration; private AsyncKeyEncryptionKey keyWrapper; private AsyncKeyEncryptionKeyResolver keyResolver; private String keyWrapAlgorithm; private BlobServiceVersion version; /** * Creates a new instance of the EncryptedBlobClientBuilder */ public EncryptedBlobClientBuilder() { } private AzureBlobStorageImpl constructImpl() { Objects.requireNonNull(blobName, "'blobName' cannot be null."); checkValidEncryptionParameters(); /* Implicit and explicit root container access are functionally equivalent, but explicit references are easier to read and debug. */ if (ImplUtils.isNullOrEmpty(containerName)) { containerName = BlobContainerAsyncClient.ROOT_CONTAINER_NAME; } BlobServiceVersion serviceVersion = version != null ? version : BlobServiceVersion.getLatest(); if (httpPipeline != null) { return new AzureBlobStorageBuilder() .url(String.format("%s/%s/%s", endpoint, containerName, blobName)) .pipeline(httpPipeline) .version(serviceVersion.getVersion()) .build(); } String userAgentName = BlobCryptographyConfiguration.NAME; String userAgentVersion = BlobCryptographyConfiguration.VERSION; Configuration userAgentConfiguration = (configuration == null) ? Configuration.NONE : configuration; List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new BlobDecryptionPolicy(keyWrapper, keyResolver)); policies.add(new UserAgentPolicy(userAgentName, userAgentVersion, userAgentConfiguration, serviceVersion)); policies.add(new RequestIdPolicy()); policies.add(new AddDatePolicy()); if (storageSharedKeyCredential != null) { policies.add(new StorageSharedKeyCredentialPolicy(storageSharedKeyCredential)); } else if (tokenCredential != null) { policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, String.format("%s/.default", endpoint))); } else if (sasTokenCredential != null) { policies.add(new SasTokenCredentialPolicy(sasTokenCredential)); } HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(new RequestRetryPolicy(retryOptions)); policies.addAll(additionalPolicies); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new ResponseValidationPolicyBuilder() .addOptionalEcho(Constants.HeaderConstants.CLIENT_REQUEST_ID) .addOptionalEcho(Constants.HeaderConstants.ENCRYPTION_KEY_SHA256) .build()); policies.add(new HttpLoggingPolicy(logOptions)); policies.add(new ScrubEtagPolicy()); HttpPipeline pipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); return new AzureBlobStorageBuilder() .url(String.format("%s/%s/%s", endpoint, containerName, blobName)) .pipeline(pipeline) .version(serviceVersion.getVersion()) .build(); } /** * Creates a {@link EncryptedBlobClient} based on options set in the Builder. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobAsyncClient} * * @return a {@link EncryptedBlobClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. */ public EncryptedBlobClient buildEncryptedBlobClient() { return new EncryptedBlobClient(buildEncryptedBlobAsyncClient()); } /** * Creates a {@link EncryptedBlobAsyncClient} based on options set in the Builder. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobClient} * * @return a {@link EncryptedBlobAsyncClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. */ public EncryptedBlobAsyncClient buildEncryptedBlobAsyncClient() { return new EncryptedBlobAsyncClient(constructImpl(), snapshot, accountName, keyWrapper, keyWrapAlgorithm); } protected void addOptionalEncryptionPolicy(List<HttpPipelinePolicy> policies) { BlobDecryptionPolicy decryptionPolicy = new BlobDecryptionPolicy(keyWrapper, keyResolver); policies.add(decryptionPolicy); } /** * Sets the encryption key parameters for the client * * @param key An object of type {@link AsyncKeyEncryptionKey} that is used to wrap/unwrap the content encryption * key * @param keyWrapAlgorithm The {@link String} used to wrap the key. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder key(AsyncKeyEncryptionKey key, String keyWrapAlgorithm) { this.keyWrapper = key; this.keyWrapAlgorithm = keyWrapAlgorithm; return this; } /** * Sets the encryption parameters for this client * * @param keyResolver The key resolver used to select the correct key for decrypting existing blobs. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder keyResolver(AsyncKeyEncryptionKeyResolver keyResolver) { this.keyResolver = keyResolver; return this; } private void checkValidEncryptionParameters() { if (this.keyWrapper == null && this.keyResolver == null) { throw logger.logExceptionAsError(new IllegalArgumentException("Key and KeyResolver cannot both be null")); } if (this.keyWrapper != null && this.keyWrapAlgorithm == null) { throw logger.logExceptionAsError( new IllegalArgumentException("Key Wrap Algorithm must be specified with a Key.")); } } /** * Sets the {@link StorageSharedKeyCredential} used to authorize requests sent to the service. * * @param credential The credential to use for authenticating request. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ public EncryptedBlobClientBuilder credential(StorageSharedKeyCredential credential) { this.storageSharedKeyCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); this.tokenCredential = null; this.sasTokenCredential = null; return this; } /** * Sets the {@link TokenCredential} used to authorize requests sent to the service. * * @param credential The credential to use for authenticating request. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ public EncryptedBlobClientBuilder credential(TokenCredential credential) { this.tokenCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); this.storageSharedKeyCredential = null; this.sasTokenCredential = null; return this; } /** * Sets the SAS token used to authorize requests sent to the service. * * @param sasToken The SAS token to use for authenticating requests. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code sasToken} is {@code null}. */ public EncryptedBlobClientBuilder sasToken(String sasToken) { this.sasTokenCredential = new SasTokenCredential(Objects.requireNonNull(sasToken, "'sasToken' cannot be null.")); this.storageSharedKeyCredential = null; this.tokenCredential = null; return this; } /** * Clears the credential used to authorize the request. * * <p>This is for blobs that are publicly accessible.</p> * * @return the updated EncryptedBlobClientBuilder */ public EncryptedBlobClientBuilder setAnonymousAccess() { this.storageSharedKeyCredential = null; this.tokenCredential = null; this.sasTokenCredential = null; return this; } /** * Constructs a {@link StorageSharedKeyCredential} used to authorize requests sent to the service. Additionally, * if the connection string contains `DefaultEndpointsProtocol` and `EndpointSuffix` it will set the {@link * * * @param connectionString Connection string of the storage account. * @return the updated EncryptedBlobClientBuilder * @throws IllegalArgumentException If {@code connectionString} doesn't contain `AccountName` or `AccountKey`. * @throws NullPointerException If {@code connectionString} is {@code null}. */ /** * Sets the service endpoint, additionally parses it for information (SAS token, container name, blob name) * * <p>If the endpoint is to a blob in the root container, this method will fail as it will interpret the blob name * as the container name. With only one path element, it is impossible to distinguish between a container name and a * blob in the root container, so it is assumed to be the container name as this is much more common. When working * with blobs in the root container, it is best to set the endpoint to the account url and specify the blob name * separately using the {@link EncryptedBlobClientBuilder * * @param endpoint URL of the service * @return the updated EncryptedBlobClientBuilder object * @throws IllegalArgumentException If {@code endpoint} is {@code null} or is a malformed URL. */ public EncryptedBlobClientBuilder endpoint(String endpoint) { try { URL url = new URL(endpoint); BlobUrlParts parts = BlobUrlParts.parse(url); this.accountName = parts.getAccountName(); this.endpoint = parts.getScheme() + ": this.containerName = parts.getBlobContainerName(); this.blobName = parts.getBlobName(); this.snapshot = parts.getSnapshot(); String sasToken = parts.getSasQueryParameters().encode(); if (!ImplUtils.isNullOrEmpty(sasToken)) { this.sasToken(sasToken); } } catch (MalformedURLException ex) { throw logger.logExceptionAsError( new IllegalArgumentException("The Azure Storage Blob endpoint url is malformed.")); } return this; } /** * Sets the name of the container that contains the blob. * * @param containerName Name of the container. If the value {@code null} or empty the root container, {@code $root}, * will be used. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder containerName(String containerName) { this.containerName = containerName; return this; } /** * Sets the name of the blob. * * @param blobName Name of the blob. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code blobName} is {@code null} */ public EncryptedBlobClientBuilder blobName(String blobName) { this.blobName = Objects.requireNonNull(blobName, "'blobName' cannot be null."); return this; } /** * Sets the snapshot identifier of the blob. * * @param snapshot Snapshot identifier for the blob. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder snapshot(String snapshot) { this.snapshot = snapshot; return this; } /** * Sets the {@link HttpClient} to use for sending a receiving requests to and from the service. * * @param httpClient HttpClient to use for requests. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder httpClient(HttpClient httpClient) { if (this.httpClient != null && httpClient == null) { logger.info("'httpClient' is being set to 'null' when it was previously configured."); } this.httpClient = httpClient; return this; } /** * Adds a pipeline policy to apply on each request sent. * * @param pipelinePolicy a pipeline policy * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code pipelinePolicy} is {@code null}. */ public EncryptedBlobClientBuilder addPolicy(HttpPipelinePolicy pipelinePolicy) { this.additionalPolicies.add(Objects.requireNonNull(pipelinePolicy, "'pipelinePolicy' cannot be null")); return this; } /** * Sets the {@link HttpLogOptions} for service requests. * * @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code logOptions} is {@code null}. */ public EncryptedBlobClientBuilder httpLogOptions(HttpLogOptions logOptions) { this.logOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null."); return this; } /** * Sets the configuration object used to retrieve environment configuration values during building of the client. * * @param configuration Configuration store used to retrieve environment configurations. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the request retry options for all the requests made through the client. * * @param retryOptions The options used to configure retry behavior. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code retryOptions} is {@code null}. */ public EncryptedBlobClientBuilder retryOptions(RequestRetryOptions retryOptions) { this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); return this; } /** * Sets the {@link HttpPipeline} to use for the service client. * * If {@code pipeline} is set, all other settings are ignored, aside from {@link * * @param httpPipeline HttpPipeline to use for sending service requests and receiving responses. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder pipeline(HttpPipeline httpPipeline) { if (this.httpPipeline != null && httpPipeline == null) { logger.info("HttpPipeline is being set to 'null' when it was previously configured."); } this.httpPipeline = httpPipeline; return this; } /** * Sets the {@link BlobServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version the client library will have the result of potentially moving to a newer service version. * * @param version {@link BlobServiceVersion} of the service to be used when making requests. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder serviceVersion(BlobServiceVersion version) { this.version = version; return this; } }
class EncryptedBlobClientBuilder { private final ClientLogger logger = new ClientLogger(EncryptedBlobClientBuilder.class); private String endpoint; private String accountName; private String containerName; private String blobName; private String snapshot; private StorageSharedKeyCredential storageSharedKeyCredential; private TokenCredential tokenCredential; private SasTokenCredential sasTokenCredential; private HttpClient httpClient; private final List<HttpPipelinePolicy> additionalPolicies = new ArrayList<>(); private HttpLogOptions logOptions = new HttpLogOptions(); private RequestRetryOptions retryOptions = new RequestRetryOptions(); private HttpPipeline httpPipeline; private Configuration configuration; private AsyncKeyEncryptionKey keyWrapper; private AsyncKeyEncryptionKeyResolver keyResolver; private String keyWrapAlgorithm; private BlobServiceVersion version; /** * Creates a new instance of the EncryptedBlobClientBuilder */ public EncryptedBlobClientBuilder() { } private AzureBlobStorageImpl constructImpl() { Objects.requireNonNull(blobName, "'blobName' cannot be null."); checkValidEncryptionParameters(); /* Implicit and explicit root container access are functionally equivalent, but explicit references are easier to read and debug. */ if (ImplUtils.isNullOrEmpty(containerName)) { containerName = BlobContainerAsyncClient.ROOT_CONTAINER_NAME; } BlobServiceVersion serviceVersion = version != null ? version : BlobServiceVersion.getLatest(); if (httpPipeline != null) { return new AzureBlobStorageBuilder() .url(String.format("%s/%s/%s", endpoint, containerName, blobName)) .pipeline(httpPipeline) .version(serviceVersion.getVersion()) .build(); } String userAgentName = BlobCryptographyConfiguration.NAME; String userAgentVersion = BlobCryptographyConfiguration.VERSION; Configuration userAgentConfiguration = (configuration == null) ? Configuration.NONE : configuration; List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new BlobDecryptionPolicy(keyWrapper, keyResolver)); policies.add(new UserAgentPolicy(userAgentName, userAgentVersion, userAgentConfiguration, serviceVersion)); policies.add(new RequestIdPolicy()); policies.add(new AddDatePolicy()); if (storageSharedKeyCredential != null) { policies.add(new StorageSharedKeyCredentialPolicy(storageSharedKeyCredential)); } else if (tokenCredential != null) { policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, String.format("%s/.default", endpoint))); } else if (sasTokenCredential != null) { policies.add(new SasTokenCredentialPolicy(sasTokenCredential)); } HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(new RequestRetryPolicy(retryOptions)); policies.addAll(additionalPolicies); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new ResponseValidationPolicyBuilder() .addOptionalEcho(Constants.HeaderConstants.CLIENT_REQUEST_ID) .addOptionalEcho(Constants.HeaderConstants.ENCRYPTION_KEY_SHA256) .build()); policies.add(new HttpLoggingPolicy(logOptions)); policies.add(new ScrubEtagPolicy()); HttpPipeline pipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); return new AzureBlobStorageBuilder() .url(String.format("%s/%s/%s", endpoint, containerName, blobName)) .pipeline(pipeline) .version(serviceVersion.getVersion()) .build(); } /** * Creates a {@link EncryptedBlobClient} based on options set in the Builder. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobAsyncClient} * * @return a {@link EncryptedBlobClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. */ public EncryptedBlobClient buildEncryptedBlobClient() { return new EncryptedBlobClient(buildEncryptedBlobAsyncClient()); } /** * Creates a {@link EncryptedBlobAsyncClient} based on options set in the Builder. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobClient} * * @return a {@link EncryptedBlobAsyncClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. */ public EncryptedBlobAsyncClient buildEncryptedBlobAsyncClient() { return new EncryptedBlobAsyncClient(constructImpl(), snapshot, accountName, keyWrapper, keyWrapAlgorithm); } protected void addOptionalEncryptionPolicy(List<HttpPipelinePolicy> policies) { BlobDecryptionPolicy decryptionPolicy = new BlobDecryptionPolicy(keyWrapper, keyResolver); policies.add(decryptionPolicy); } /** * Sets the encryption key parameters for the client * * @param key An object of type {@link AsyncKeyEncryptionKey} that is used to wrap/unwrap the content encryption * key * @param keyWrapAlgorithm The {@link String} used to wrap the key. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder key(AsyncKeyEncryptionKey key, String keyWrapAlgorithm) { this.keyWrapper = key; this.keyWrapAlgorithm = keyWrapAlgorithm; return this; } /** * Sets the encryption parameters for this client * * @param keyResolver The key resolver used to select the correct key for decrypting existing blobs. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder keyResolver(AsyncKeyEncryptionKeyResolver keyResolver) { this.keyResolver = keyResolver; return this; } private void checkValidEncryptionParameters() { if (this.keyWrapper == null && this.keyResolver == null) { throw logger.logExceptionAsError(new IllegalArgumentException("Key and KeyResolver cannot both be null")); } if (this.keyWrapper != null && this.keyWrapAlgorithm == null) { throw logger.logExceptionAsError( new IllegalArgumentException("Key Wrap Algorithm must be specified with a Key.")); } } /** * Sets the {@link StorageSharedKeyCredential} used to authorize requests sent to the service. * * @param credential The credential to use for authenticating request. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ public EncryptedBlobClientBuilder credential(StorageSharedKeyCredential credential) { this.storageSharedKeyCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); this.tokenCredential = null; this.sasTokenCredential = null; return this; } /** * Sets the {@link TokenCredential} used to authorize requests sent to the service. * * @param credential The credential to use for authenticating request. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ public EncryptedBlobClientBuilder credential(TokenCredential credential) { this.tokenCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); this.storageSharedKeyCredential = null; this.sasTokenCredential = null; return this; } /** * Sets the SAS token used to authorize requests sent to the service. * * @param sasToken The SAS token to use for authenticating requests. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code sasToken} is {@code null}. */ public EncryptedBlobClientBuilder sasToken(String sasToken) { this.sasTokenCredential = new SasTokenCredential(Objects.requireNonNull(sasToken, "'sasToken' cannot be null.")); this.storageSharedKeyCredential = null; this.tokenCredential = null; return this; } /** * Clears the credential used to authorize the request. * * <p>This is for blobs that are publicly accessible.</p> * * @return the updated EncryptedBlobClientBuilder */ public EncryptedBlobClientBuilder setAnonymousAccess() { this.storageSharedKeyCredential = null; this.tokenCredential = null; this.sasTokenCredential = null; return this; } /** * Sets the connection string to connect to the service. * * @param connectionString Connection string of the storage account. * @return the updated EncryptedBlobClientBuilder * @throws IllegalArgumentException If {@code connectionString} is invalid. */ /** * Sets the service endpoint, additionally parses it for information (SAS token, container name, blob name) * * <p>If the endpoint is to a blob in the root container, this method will fail as it will interpret the blob name * as the container name. With only one path element, it is impossible to distinguish between a container name and a * blob in the root container, so it is assumed to be the container name as this is much more common. When working * with blobs in the root container, it is best to set the endpoint to the account url and specify the blob name * separately using the {@link EncryptedBlobClientBuilder * * @param endpoint URL of the service * @return the updated EncryptedBlobClientBuilder object * @throws IllegalArgumentException If {@code endpoint} is {@code null} or is a malformed URL. */ public EncryptedBlobClientBuilder endpoint(String endpoint) { try { URL url = new URL(endpoint); BlobUrlParts parts = BlobUrlParts.parse(url); this.accountName = parts.getAccountName(); this.endpoint = parts.getScheme() + ": this.containerName = parts.getBlobContainerName(); this.blobName = parts.getBlobName(); this.snapshot = parts.getSnapshot(); String sasToken = parts.getSasQueryParameters().encode(); if (!ImplUtils.isNullOrEmpty(sasToken)) { this.sasToken(sasToken); } } catch (MalformedURLException ex) { throw logger.logExceptionAsError( new IllegalArgumentException("The Azure Storage Blob endpoint url is malformed.")); } return this; } /** * Sets the name of the container that contains the blob. * * @param containerName Name of the container. If the value {@code null} or empty the root container, {@code $root}, * will be used. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder containerName(String containerName) { this.containerName = containerName; return this; } /** * Sets the name of the blob. * * @param blobName Name of the blob. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code blobName} is {@code null} */ public EncryptedBlobClientBuilder blobName(String blobName) { this.blobName = Objects.requireNonNull(blobName, "'blobName' cannot be null."); return this; } /** * Sets the snapshot identifier of the blob. * * @param snapshot Snapshot identifier for the blob. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder snapshot(String snapshot) { this.snapshot = snapshot; return this; } /** * Sets the {@link HttpClient} to use for sending a receiving requests to and from the service. * * @param httpClient HttpClient to use for requests. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder httpClient(HttpClient httpClient) { if (this.httpClient != null && httpClient == null) { logger.info("'httpClient' is being set to 'null' when it was previously configured."); } this.httpClient = httpClient; return this; } /** * Adds a pipeline policy to apply on each request sent. * * @param pipelinePolicy a pipeline policy * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code pipelinePolicy} is {@code null}. */ public EncryptedBlobClientBuilder addPolicy(HttpPipelinePolicy pipelinePolicy) { this.additionalPolicies.add(Objects.requireNonNull(pipelinePolicy, "'pipelinePolicy' cannot be null")); return this; } /** * Sets the {@link HttpLogOptions} for service requests. * * @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code logOptions} is {@code null}. */ public EncryptedBlobClientBuilder httpLogOptions(HttpLogOptions logOptions) { this.logOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null."); return this; } /** * Sets the configuration object used to retrieve environment configuration values during building of the client. * * @param configuration Configuration store used to retrieve environment configurations. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the request retry options for all the requests made through the client. * * @param retryOptions The options used to configure retry behavior. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code retryOptions} is {@code null}. */ public EncryptedBlobClientBuilder retryOptions(RequestRetryOptions retryOptions) { this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); return this; } /** * Sets the {@link HttpPipeline} to use for the service client. * * If {@code pipeline} is set, all other settings are ignored, aside from {@link * * @param httpPipeline HttpPipeline to use for sending service requests and receiving responses. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder pipeline(HttpPipeline httpPipeline) { if (this.httpPipeline != null && httpPipeline == null) { logger.info("HttpPipeline is being set to 'null' when it was previously configured."); } this.httpPipeline = httpPipeline; return this; } /** * Sets the {@link BlobServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version the client library will have the result of potentially moving to a newer service version. * * @param version {@link BlobServiceVersion} of the service to be used when making requests. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder serviceVersion(BlobServiceVersion version) { this.version = version; return this; } }
This should just call into the next overload so we don't have to worry about missing formatting/encoding logic in one of them
public Response<Void> deleteBlob(String containerName, String blobName) { return deleteBlobHelper(String.format(PATH_TEMPLATE, containerName, Utility.urlEncode(Utility.urlDecode(blobName))), null, null); }
return deleteBlobHelper(String.format(PATH_TEMPLATE, containerName,
public Response<Void> deleteBlob(String containerName, String blobName) { return deleteBlobHelper(String.format(PATH_TEMPLATE, containerName, Utility.urlEncode(Utility.urlDecode(blobName))), null, null); }
class BlobBatch { private static final String X_MS_VERSION = "x-ms-version"; private static final String BATCH_REQUEST_CONTENT_ID = "Batch-Request-Content-Id"; private static final String BATCH_REQUEST_URL_PATH = "Batch-Request-Url-Path"; private static final String CONTENT_ID = "Content-Id"; private static final String BATCH_BOUNDARY_TEMPLATE = "batch_%s"; private static final String REQUEST_CONTENT_TYPE_TEMPLATE = "multipart/mixed; boundary=%s"; private static final String BATCH_OPERATION_CONTENT_TYPE = "Content-Type: application/http"; private static final String BATCH_OPERATION_CONTENT_TRANSFER_ENCODING = "Content-Transfer-Encoding: binary"; private static final String BATCH_OPERATION_CONTENT_ID_TEMPLATE = "Content-ID: %d"; private static final String HTTP_VERSION = "HTTP/1.1"; private static final String OPERATION_TEMPLATE = "%s %s %s"; private static final String HEADER_TEMPLATE = "%s: %s"; private static final String PATH_TEMPLATE = "%s/%s"; /* * Track the status codes expected for the batching operations here as the batch body does not get parsed in * Azure Core where this information is maintained. */ private static final int[] EXPECTED_DELETE_STATUS_CODES = {202}; private static final int[] EXPECTED_SET_TIER_STATUS_CODES = {200, 202}; private final ClientLogger logger = new ClientLogger(BlobBatch.class); private final BlobAsyncClient blobAsyncClient; private final Deque<Mono<? extends Response<?>>> batchOperationQueue; private final List<ByteBuffer> batchRequest; private final Map<Integer, BlobBatchOperationResponse<?>> batchMapping; private final AtomicInteger contentId; private final String batchBoundary; private final String contentType; private BlobBatchType batchType; BlobBatch(String accountUrl, HttpPipeline pipeline) { this.contentId = new AtomicInteger(); this.batchBoundary = String.format(BATCH_BOUNDARY_TEMPLATE, UUID.randomUUID()); this.contentType = String.format(REQUEST_CONTENT_TYPE_TEMPLATE, batchBoundary); boolean batchHeadersPolicySet = false; HttpPipelineBuilder batchPipelineBuilder = new HttpPipelineBuilder().httpClient(this::setupBatchOperation); for (int i = 0; i < pipeline.getPolicyCount(); i++) { HttpPipelinePolicy policy = pipeline.getPolicy(i); if (policy instanceof StorageSharedKeyCredentialPolicy) { batchHeadersPolicySet = true; batchPipelineBuilder.policies(this::cleanseHeaders, this::setRequestUrl); } batchPipelineBuilder.policies(policy); } if (!batchHeadersPolicySet) { batchPipelineBuilder.policies(this::cleanseHeaders, this::setRequestUrl); } this.blobAsyncClient = new BlobClientBuilder() .endpoint(accountUrl) .blobName("") .pipeline(batchPipelineBuilder.build()) .buildAsyncClient(); this.batchOperationQueue = new ConcurrentLinkedDeque<>(); this.batchRequest = new ArrayList<>(); this.batchMapping = new ConcurrentHashMap<>(); } /** * Adds a delete blob operation to the batch. * * <p>Blob name is encoded to UTF-8 using the {@link com.azure.storage.common.Utility * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.deleteBlob * * @param containerName The container of the blob. * @param blobName The name of the blob. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ /** * Adds a delete blob operation to the batch. * * <p>Blob name is encoded to UTF-8 using the {@link com.azure.storage.common.Utility * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.deleteBlob * * @param containerName The container of the blob. * @param blobName The name of the blob. * @param deleteOptions Delete options for the blob and its snapshots. * @param blobRequestConditions Additional access conditions that must be met to allow this operation. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> deleteBlob(String containerName, String blobName, DeleteSnapshotsOptionType deleteOptions, BlobRequestConditions blobRequestConditions) { return deleteBlobHelper(String.format(PATH_TEMPLATE, containerName, Utility.urlEncode(Utility.urlDecode(blobName))), deleteOptions, blobRequestConditions); } /** * Adds a delete blob operation to the batch. * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.deleteBlob * * @param blobUrl URL of the blob. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> deleteBlob(String blobUrl) { return deleteBlobHelper(getUrlPath(blobUrl), null, null); } /** * Adds a delete blob operation to the batch. * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.deleteBlob * * @param blobUrl URL of the blob. * @param deleteOptions Delete options for the blob and its snapshots. * @param blobRequestConditions Additional access conditions that must be met to allow this operation. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> deleteBlob(String blobUrl, DeleteSnapshotsOptionType deleteOptions, BlobRequestConditions blobRequestConditions) { return deleteBlobHelper(getUrlPath(blobUrl), deleteOptions, blobRequestConditions); } private Response<Void> deleteBlobHelper(String urlPath, DeleteSnapshotsOptionType deleteOptions, BlobRequestConditions blobRequestConditions) { setBatchType(BlobBatchType.DELETE); return createBatchOperation(blobAsyncClient.deleteWithResponse(deleteOptions, blobRequestConditions), urlPath, EXPECTED_DELETE_STATUS_CODES); } /** * Adds a set tier operation to the batch. * * <p>Blob name is encoded to UTF-8 using the {@link com.azure.storage.common.Utility * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.setBlobAccessTier * * @param containerName The container of the blob. * @param blobName The name of the blob. * @param accessTier The tier to set on the blob. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> setBlobAccessTier(String containerName, String blobName, AccessTier accessTier) { return setBlobAccessTierHelper(String.format(PATH_TEMPLATE, containerName, Utility.urlEncode(Utility.urlDecode(blobName))), accessTier, null); } /** * Adds a set tier operation to the batch. * * <p>Blob name is encoded to UTF-8 using the {@link com.azure.storage.common.Utility * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.setBlobAccessTier * * @param containerName The container of the blob. * @param blobName The name of the blob. * @param accessTier The tier to set on the blob. * @param leaseId The lease ID the active lease on the blob must match. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> setBlobAccessTier(String containerName, String blobName, AccessTier accessTier, String leaseId) { return setBlobAccessTierHelper(String.format(PATH_TEMPLATE, containerName, Utility.urlEncode(Utility.urlDecode(blobName))), accessTier, leaseId); } /** * Adds a set tier operation to the batch. * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.setBlobAccessTier * * @param blobUrl URL of the blob. * @param accessTier The tier to set on the blob. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> setBlobAccessTier(String blobUrl, AccessTier accessTier) { return setBlobAccessTierHelper(getUrlPath(blobUrl), accessTier, null); } /** * Adds a set tier operation to the batch. * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.setBlobAccessTier * * @param blobUrl URL of the blob. * @param accessTier The tier to set on the blob. * @param leaseId The lease ID the active lease on the blob must match. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> setBlobAccessTier(String blobUrl, AccessTier accessTier, String leaseId) { return setBlobAccessTierHelper(getUrlPath(blobUrl), accessTier, leaseId); } private Response<Void> setBlobAccessTierHelper(String urlPath, AccessTier accessTier, String leaseId) { setBatchType(BlobBatchType.SET_TIER); return createBatchOperation(blobAsyncClient.setAccessTierWithResponse(accessTier, null, leaseId), urlPath, EXPECTED_SET_TIER_STATUS_CODES); } private <T> Response<T> createBatchOperation(Mono<Response<T>> response, String urlPath, int... expectedStatusCodes) { int id = contentId.getAndIncrement(); batchOperationQueue.add(response .subscriberContext(Context.of(BATCH_REQUEST_CONTENT_ID, id, BATCH_REQUEST_URL_PATH, urlPath))); BlobBatchOperationResponse<T> batchOperationResponse = new BlobBatchOperationResponse<>(expectedStatusCodes); batchMapping.put(id, batchOperationResponse); return batchOperationResponse; } private String getUrlPath(String url) { return UrlBuilder.parse(url).getPath(); } private void setBatchType(BlobBatchType batchType) { if (this.batchType == null) { this.batchType = batchType; } else if (this.batchType != batchType) { throw logger.logExceptionAsError(new UnsupportedOperationException(String.format(Locale.ROOT, "'BlobBatch' only supports homogeneous operations and is a %s batch.", this.batchType))); } } Flux<ByteBuffer> getBody() { if (batchOperationQueue.isEmpty()) { throw logger.logExceptionAsError(new UnsupportedOperationException("Empty batch requests aren't allowed.")); } Disposable disposable = Flux.fromStream(batchOperationQueue.stream()) .flatMap(batchOperation -> batchOperation) .subscribe(); /* Wait until the 'Flux' is disposed of (aka complete) instead of blocking as this will prevent Reactor from * throwing an exception if this was ran in a Reactor thread. */ while (!disposable.isDisposed()) { } this.batchRequest.add(ByteBuffer.wrap( String.format("--%s--%s", batchBoundary, BlobBatchHelper.HTTP_NEWLINE).getBytes(StandardCharsets.UTF_8))); return Flux.fromIterable(batchRequest); } long getContentLength() { long contentLength = 0; for (ByteBuffer request : batchRequest) { contentLength += request.remaining(); } return contentLength; } String getContentType() { return contentType; } BlobBatchOperationResponse<?> getBatchRequest(int contentId) { return batchMapping.get(contentId); } int getOperationCount() { return batchMapping.size(); } /* * This performs a cleanup operation that would be handled when the request is sent through Netty or OkHttp. * Additionally, it removes the "x-ms-version" header from the request as batch operation requests cannot have this * and it adds the header "Content-Id" that allows the request to be mapped to the response. */ private Mono<HttpResponse> cleanseHeaders(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { context.getHttpRequest().getHeaders().remove(X_MS_VERSION); Map<String, String> headers = context.getHttpRequest().getHeaders().toMap(); headers.entrySet().removeIf(header -> header.getValue() == null); context.getHttpRequest().setHeaders(new HttpHeaders(headers)); context.getHttpRequest().setHeader(CONTENT_ID, context.getData(BATCH_REQUEST_CONTENT_ID).get().toString()); return next.process(); } /* * This performs changing the request URL to the value passed through the pipeline context. This policy is used in * place of constructing a new client for each batch request that is being sent. */ private Mono<HttpResponse> setRequestUrl(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { try { UrlBuilder requestUrl = UrlBuilder.parse(context.getHttpRequest().getUrl()); requestUrl.setPath(context.getData(BATCH_REQUEST_URL_PATH).get().toString()); context.getHttpRequest().setUrl(requestUrl.toURL()); } catch (MalformedURLException ex) { throw logger.logExceptionAsError(Exceptions.propagate(new IllegalStateException(ex))); } return next.process(); } /* * This will "send" the batch operation request when triggered, it simply acts as a way to build and write the * batch operation into the overall request and then returns nothing as the response. */ private Mono<HttpResponse> setupBatchOperation(HttpRequest request) { return Mono.fromRunnable(() -> { int contentId = Integer.parseInt(request.getHeaders().remove(CONTENT_ID).getValue()); StringBuilder batchRequestBuilder = new StringBuilder(); appendWithNewline(batchRequestBuilder, "--" + batchBoundary); appendWithNewline(batchRequestBuilder, BATCH_OPERATION_CONTENT_TYPE); appendWithNewline(batchRequestBuilder, BATCH_OPERATION_CONTENT_TRANSFER_ENCODING); appendWithNewline(batchRequestBuilder, String.format(BATCH_OPERATION_CONTENT_ID_TEMPLATE, contentId)); batchRequestBuilder.append(BlobBatchHelper.HTTP_NEWLINE); String method = request.getHttpMethod().toString(); String urlPath = request.getUrl().getPath(); String urlQuery = request.getUrl().getQuery(); if (!ImplUtils.isNullOrEmpty(urlQuery)) { urlPath = urlPath + "?" + urlQuery; } appendWithNewline(batchRequestBuilder, String.format(OPERATION_TEMPLATE, method, urlPath, HTTP_VERSION)); request.getHeaders().stream() .filter(header -> !X_MS_VERSION.equalsIgnoreCase(header.getName())) .forEach(header -> appendWithNewline(batchRequestBuilder, String.format(HEADER_TEMPLATE, header.getName(), header.getValue()))); batchRequestBuilder.append(BlobBatchHelper.HTTP_NEWLINE); batchRequest.add(ByteBuffer.wrap(batchRequestBuilder.toString().getBytes(StandardCharsets.UTF_8))); batchMapping.get(contentId).setRequest(request); }); } private void appendWithNewline(StringBuilder stringBuilder, String value) { stringBuilder.append(value).append(BlobBatchHelper.HTTP_NEWLINE); } }
class BlobBatch { private static final String X_MS_VERSION = "x-ms-version"; private static final String BATCH_REQUEST_CONTENT_ID = "Batch-Request-Content-Id"; private static final String BATCH_REQUEST_URL_PATH = "Batch-Request-Url-Path"; private static final String CONTENT_ID = "Content-Id"; private static final String BATCH_BOUNDARY_TEMPLATE = "batch_%s"; private static final String REQUEST_CONTENT_TYPE_TEMPLATE = "multipart/mixed; boundary=%s"; private static final String BATCH_OPERATION_CONTENT_TYPE = "Content-Type: application/http"; private static final String BATCH_OPERATION_CONTENT_TRANSFER_ENCODING = "Content-Transfer-Encoding: binary"; private static final String BATCH_OPERATION_CONTENT_ID_TEMPLATE = "Content-ID: %d"; private static final String HTTP_VERSION = "HTTP/1.1"; private static final String OPERATION_TEMPLATE = "%s %s %s"; private static final String HEADER_TEMPLATE = "%s: %s"; private static final String PATH_TEMPLATE = "%s/%s"; /* * Track the status codes expected for the batching operations here as the batch body does not get parsed in * Azure Core where this information is maintained. */ private static final int[] EXPECTED_DELETE_STATUS_CODES = {202}; private static final int[] EXPECTED_SET_TIER_STATUS_CODES = {200, 202}; private final ClientLogger logger = new ClientLogger(BlobBatch.class); private final BlobAsyncClient blobAsyncClient; private final Deque<Mono<? extends Response<?>>> batchOperationQueue; private final List<ByteBuffer> batchRequest; private final Map<Integer, BlobBatchOperationResponse<?>> batchMapping; private final AtomicInteger contentId; private final String batchBoundary; private final String contentType; private BlobBatchType batchType; BlobBatch(String accountUrl, HttpPipeline pipeline) { this.contentId = new AtomicInteger(); this.batchBoundary = String.format(BATCH_BOUNDARY_TEMPLATE, UUID.randomUUID()); this.contentType = String.format(REQUEST_CONTENT_TYPE_TEMPLATE, batchBoundary); boolean batchHeadersPolicySet = false; HttpPipelineBuilder batchPipelineBuilder = new HttpPipelineBuilder().httpClient(this::setupBatchOperation); for (int i = 0; i < pipeline.getPolicyCount(); i++) { HttpPipelinePolicy policy = pipeline.getPolicy(i); if (policy instanceof StorageSharedKeyCredentialPolicy) { batchHeadersPolicySet = true; batchPipelineBuilder.policies(this::cleanseHeaders, this::setRequestUrl); } batchPipelineBuilder.policies(policy); } if (!batchHeadersPolicySet) { batchPipelineBuilder.policies(this::cleanseHeaders, this::setRequestUrl); } this.blobAsyncClient = new BlobClientBuilder() .endpoint(accountUrl) .blobName("") .pipeline(batchPipelineBuilder.build()) .buildAsyncClient(); this.batchOperationQueue = new ConcurrentLinkedDeque<>(); this.batchRequest = new ArrayList<>(); this.batchMapping = new ConcurrentHashMap<>(); } /** * Adds a delete blob operation to the batch. * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.deleteBlob * * @param containerName The container of the blob. * @param blobName The name of the blob. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ /** * Adds a delete blob operation to the batch. * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.deleteBlob * * @param containerName The container of the blob. * @param blobName The name of the blob. * @param deleteOptions Delete options for the blob and its snapshots. * @param blobRequestConditions Additional access conditions that must be met to allow this operation. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> deleteBlob(String containerName, String blobName, DeleteSnapshotsOptionType deleteOptions, BlobRequestConditions blobRequestConditions) { return deleteBlobHelper(String.format(PATH_TEMPLATE, containerName, Utility.urlEncode(Utility.urlDecode(blobName))), deleteOptions, blobRequestConditions); } /** * Adds a delete blob operation to the batch. * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.deleteBlob * * @param blobUrl URL of the blob. Blob name must be encoded to UTF-8. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> deleteBlob(String blobUrl) { return deleteBlobHelper(getUrlPath(blobUrl), null, null); } /** * Adds a delete blob operation to the batch. * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.deleteBlob * * @param blobUrl URL of the blob. Blob name must be encoded to UTF-8. * @param deleteOptions Delete options for the blob and its snapshots. * @param blobRequestConditions Additional access conditions that must be met to allow this operation. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> deleteBlob(String blobUrl, DeleteSnapshotsOptionType deleteOptions, BlobRequestConditions blobRequestConditions) { return deleteBlobHelper(getUrlPath(blobUrl), deleteOptions, blobRequestConditions); } private Response<Void> deleteBlobHelper(String urlPath, DeleteSnapshotsOptionType deleteOptions, BlobRequestConditions blobRequestConditions) { setBatchType(BlobBatchType.DELETE); return createBatchOperation(blobAsyncClient.deleteWithResponse(deleteOptions, blobRequestConditions), urlPath, EXPECTED_DELETE_STATUS_CODES); } /** * Adds a set tier operation to the batch. * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.setBlobAccessTier * * @param containerName The container of the blob. * @param blobName The name of the blob. * @param accessTier The tier to set on the blob. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> setBlobAccessTier(String containerName, String blobName, AccessTier accessTier) { return setBlobAccessTierHelper(String.format(PATH_TEMPLATE, containerName, Utility.urlEncode(Utility.urlDecode(blobName))), accessTier, null); } /** * Adds a set tier operation to the batch. * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.setBlobAccessTier * * @param containerName The container of the blob. * @param blobName The name of the blob. * @param accessTier The tier to set on the blob. * @param leaseId The lease ID the active lease on the blob must match. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> setBlobAccessTier(String containerName, String blobName, AccessTier accessTier, String leaseId) { return setBlobAccessTierHelper(String.format(PATH_TEMPLATE, containerName, Utility.urlEncode(Utility.urlDecode(blobName))), accessTier, leaseId); } /** * Adds a set tier operation to the batch. * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.setBlobAccessTier * * @param blobUrl URL of the blob. Blob name must be encoded to UTF-8. * @param accessTier The tier to set on the blob. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> setBlobAccessTier(String blobUrl, AccessTier accessTier) { return setBlobAccessTierHelper(getUrlPath(blobUrl), accessTier, null); } /** * Adds a set tier operation to the batch. * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.setBlobAccessTier * * @param blobUrl URL of the blob. Blob name must be encoded to UTF-8. * @param accessTier The tier to set on the blob. * @param leaseId The lease ID the active lease on the blob must match. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> setBlobAccessTier(String blobUrl, AccessTier accessTier, String leaseId) { return setBlobAccessTierHelper(getUrlPath(blobUrl), accessTier, leaseId); } private Response<Void> setBlobAccessTierHelper(String urlPath, AccessTier accessTier, String leaseId) { setBatchType(BlobBatchType.SET_TIER); return createBatchOperation(blobAsyncClient.setAccessTierWithResponse(accessTier, null, leaseId), urlPath, EXPECTED_SET_TIER_STATUS_CODES); } private <T> Response<T> createBatchOperation(Mono<Response<T>> response, String urlPath, int... expectedStatusCodes) { int id = contentId.getAndIncrement(); batchOperationQueue.add(response .subscriberContext(Context.of(BATCH_REQUEST_CONTENT_ID, id, BATCH_REQUEST_URL_PATH, urlPath))); BlobBatchOperationResponse<T> batchOperationResponse = new BlobBatchOperationResponse<>(expectedStatusCodes); batchMapping.put(id, batchOperationResponse); return batchOperationResponse; } private String getUrlPath(String url) { return UrlBuilder.parse(url).getPath(); } private void setBatchType(BlobBatchType batchType) { if (this.batchType == null) { this.batchType = batchType; } else if (this.batchType != batchType) { throw logger.logExceptionAsError(new UnsupportedOperationException(String.format(Locale.ROOT, "'BlobBatch' only supports homogeneous operations and is a %s batch.", this.batchType))); } } Flux<ByteBuffer> getBody() { if (batchOperationQueue.isEmpty()) { throw logger.logExceptionAsError(new UnsupportedOperationException("Empty batch requests aren't allowed.")); } Disposable disposable = Flux.fromStream(batchOperationQueue.stream()) .flatMap(batchOperation -> batchOperation) .subscribe(); /* Wait until the 'Flux' is disposed of (aka complete) instead of blocking as this will prevent Reactor from * throwing an exception if this was ran in a Reactor thread. */ while (!disposable.isDisposed()) { } this.batchRequest.add(ByteBuffer.wrap( String.format("--%s--%s", batchBoundary, BlobBatchHelper.HTTP_NEWLINE).getBytes(StandardCharsets.UTF_8))); return Flux.fromIterable(batchRequest); } long getContentLength() { long contentLength = 0; for (ByteBuffer request : batchRequest) { contentLength += request.remaining(); } return contentLength; } String getContentType() { return contentType; } BlobBatchOperationResponse<?> getBatchRequest(int contentId) { return batchMapping.get(contentId); } int getOperationCount() { return batchMapping.size(); } /* * This performs a cleanup operation that would be handled when the request is sent through Netty or OkHttp. * Additionally, it removes the "x-ms-version" header from the request as batch operation requests cannot have this * and it adds the header "Content-Id" that allows the request to be mapped to the response. */ private Mono<HttpResponse> cleanseHeaders(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { context.getHttpRequest().getHeaders().remove(X_MS_VERSION); Map<String, String> headers = context.getHttpRequest().getHeaders().toMap(); headers.entrySet().removeIf(header -> header.getValue() == null); context.getHttpRequest().setHeaders(new HttpHeaders(headers)); context.getHttpRequest().setHeader(CONTENT_ID, context.getData(BATCH_REQUEST_CONTENT_ID).get().toString()); return next.process(); } /* * This performs changing the request URL to the value passed through the pipeline context. This policy is used in * place of constructing a new client for each batch request that is being sent. */ private Mono<HttpResponse> setRequestUrl(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { try { UrlBuilder requestUrl = UrlBuilder.parse(context.getHttpRequest().getUrl()); requestUrl.setPath(context.getData(BATCH_REQUEST_URL_PATH).get().toString()); context.getHttpRequest().setUrl(requestUrl.toURL()); } catch (MalformedURLException ex) { throw logger.logExceptionAsError(Exceptions.propagate(new IllegalStateException(ex))); } return next.process(); } /* * This will "send" the batch operation request when triggered, it simply acts as a way to build and write the * batch operation into the overall request and then returns nothing as the response. */ private Mono<HttpResponse> setupBatchOperation(HttpRequest request) { return Mono.fromRunnable(() -> { int contentId = Integer.parseInt(request.getHeaders().remove(CONTENT_ID).getValue()); StringBuilder batchRequestBuilder = new StringBuilder(); appendWithNewline(batchRequestBuilder, "--" + batchBoundary); appendWithNewline(batchRequestBuilder, BATCH_OPERATION_CONTENT_TYPE); appendWithNewline(batchRequestBuilder, BATCH_OPERATION_CONTENT_TRANSFER_ENCODING); appendWithNewline(batchRequestBuilder, String.format(BATCH_OPERATION_CONTENT_ID_TEMPLATE, contentId)); batchRequestBuilder.append(BlobBatchHelper.HTTP_NEWLINE); String method = request.getHttpMethod().toString(); String urlPath = request.getUrl().getPath(); String urlQuery = request.getUrl().getQuery(); if (!ImplUtils.isNullOrEmpty(urlQuery)) { urlPath = urlPath + "?" + urlQuery; } appendWithNewline(batchRequestBuilder, String.format(OPERATION_TEMPLATE, method, urlPath, HTTP_VERSION)); request.getHeaders().stream() .filter(header -> !X_MS_VERSION.equalsIgnoreCase(header.getName())) .forEach(header -> appendWithNewline(batchRequestBuilder, String.format(HEADER_TEMPLATE, header.getName(), header.getValue()))); batchRequestBuilder.append(BlobBatchHelper.HTTP_NEWLINE); batchRequest.add(ByteBuffer.wrap(batchRequestBuilder.toString().getBytes(StandardCharsets.UTF_8))); batchMapping.get(contentId).setRequest(request); }); } private void appendWithNewline(StringBuilder stringBuilder, String value) { stringBuilder.append(value).append(BlobBatchHelper.HTTP_NEWLINE); } }
Same comment about calling the next overload.
public Response<Void> setBlobAccessTier(String containerName, String blobName, AccessTier accessTier) { return setBlobAccessTierHelper(String.format(PATH_TEMPLATE, containerName, Utility.urlEncode(Utility.urlDecode(blobName))), accessTier, null); }
return setBlobAccessTierHelper(String.format(PATH_TEMPLATE, containerName,
public Response<Void> setBlobAccessTier(String containerName, String blobName, AccessTier accessTier) { return setBlobAccessTierHelper(String.format(PATH_TEMPLATE, containerName, Utility.urlEncode(Utility.urlDecode(blobName))), accessTier, null); }
class BlobBatch { private static final String X_MS_VERSION = "x-ms-version"; private static final String BATCH_REQUEST_CONTENT_ID = "Batch-Request-Content-Id"; private static final String BATCH_REQUEST_URL_PATH = "Batch-Request-Url-Path"; private static final String CONTENT_ID = "Content-Id"; private static final String BATCH_BOUNDARY_TEMPLATE = "batch_%s"; private static final String REQUEST_CONTENT_TYPE_TEMPLATE = "multipart/mixed; boundary=%s"; private static final String BATCH_OPERATION_CONTENT_TYPE = "Content-Type: application/http"; private static final String BATCH_OPERATION_CONTENT_TRANSFER_ENCODING = "Content-Transfer-Encoding: binary"; private static final String BATCH_OPERATION_CONTENT_ID_TEMPLATE = "Content-ID: %d"; private static final String HTTP_VERSION = "HTTP/1.1"; private static final String OPERATION_TEMPLATE = "%s %s %s"; private static final String HEADER_TEMPLATE = "%s: %s"; private static final String PATH_TEMPLATE = "%s/%s"; /* * Track the status codes expected for the batching operations here as the batch body does not get parsed in * Azure Core where this information is maintained. */ private static final int[] EXPECTED_DELETE_STATUS_CODES = {202}; private static final int[] EXPECTED_SET_TIER_STATUS_CODES = {200, 202}; private final ClientLogger logger = new ClientLogger(BlobBatch.class); private final BlobAsyncClient blobAsyncClient; private final Deque<Mono<? extends Response<?>>> batchOperationQueue; private final List<ByteBuffer> batchRequest; private final Map<Integer, BlobBatchOperationResponse<?>> batchMapping; private final AtomicInteger contentId; private final String batchBoundary; private final String contentType; private BlobBatchType batchType; BlobBatch(String accountUrl, HttpPipeline pipeline) { this.contentId = new AtomicInteger(); this.batchBoundary = String.format(BATCH_BOUNDARY_TEMPLATE, UUID.randomUUID()); this.contentType = String.format(REQUEST_CONTENT_TYPE_TEMPLATE, batchBoundary); boolean batchHeadersPolicySet = false; HttpPipelineBuilder batchPipelineBuilder = new HttpPipelineBuilder().httpClient(this::setupBatchOperation); for (int i = 0; i < pipeline.getPolicyCount(); i++) { HttpPipelinePolicy policy = pipeline.getPolicy(i); if (policy instanceof StorageSharedKeyCredentialPolicy) { batchHeadersPolicySet = true; batchPipelineBuilder.policies(this::cleanseHeaders, this::setRequestUrl); } batchPipelineBuilder.policies(policy); } if (!batchHeadersPolicySet) { batchPipelineBuilder.policies(this::cleanseHeaders, this::setRequestUrl); } this.blobAsyncClient = new BlobClientBuilder() .endpoint(accountUrl) .blobName("") .pipeline(batchPipelineBuilder.build()) .buildAsyncClient(); this.batchOperationQueue = new ConcurrentLinkedDeque<>(); this.batchRequest = new ArrayList<>(); this.batchMapping = new ConcurrentHashMap<>(); } /** * Adds a delete blob operation to the batch. * * <p>Blob name is encoded to UTF-8 using the {@link com.azure.storage.common.Utility * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.deleteBlob * * @param containerName The container of the blob. * @param blobName The name of the blob. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> deleteBlob(String containerName, String blobName) { return deleteBlobHelper(String.format(PATH_TEMPLATE, containerName, Utility.urlEncode(Utility.urlDecode(blobName))), null, null); } /** * Adds a delete blob operation to the batch. * * <p>Blob name is encoded to UTF-8 using the {@link com.azure.storage.common.Utility * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.deleteBlob * * @param containerName The container of the blob. * @param blobName The name of the blob. * @param deleteOptions Delete options for the blob and its snapshots. * @param blobRequestConditions Additional access conditions that must be met to allow this operation. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> deleteBlob(String containerName, String blobName, DeleteSnapshotsOptionType deleteOptions, BlobRequestConditions blobRequestConditions) { return deleteBlobHelper(String.format(PATH_TEMPLATE, containerName, Utility.urlEncode(Utility.urlDecode(blobName))), deleteOptions, blobRequestConditions); } /** * Adds a delete blob operation to the batch. * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.deleteBlob * * @param blobUrl URL of the blob. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> deleteBlob(String blobUrl) { return deleteBlobHelper(getUrlPath(blobUrl), null, null); } /** * Adds a delete blob operation to the batch. * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.deleteBlob * * @param blobUrl URL of the blob. * @param deleteOptions Delete options for the blob and its snapshots. * @param blobRequestConditions Additional access conditions that must be met to allow this operation. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> deleteBlob(String blobUrl, DeleteSnapshotsOptionType deleteOptions, BlobRequestConditions blobRequestConditions) { return deleteBlobHelper(getUrlPath(blobUrl), deleteOptions, blobRequestConditions); } private Response<Void> deleteBlobHelper(String urlPath, DeleteSnapshotsOptionType deleteOptions, BlobRequestConditions blobRequestConditions) { setBatchType(BlobBatchType.DELETE); return createBatchOperation(blobAsyncClient.deleteWithResponse(deleteOptions, blobRequestConditions), urlPath, EXPECTED_DELETE_STATUS_CODES); } /** * Adds a set tier operation to the batch. * * <p>Blob name is encoded to UTF-8 using the {@link com.azure.storage.common.Utility * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.setBlobAccessTier * * @param containerName The container of the blob. * @param blobName The name of the blob. * @param accessTier The tier to set on the blob. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ /** * Adds a set tier operation to the batch. * * <p>Blob name is encoded to UTF-8 using the {@link com.azure.storage.common.Utility * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.setBlobAccessTier * * @param containerName The container of the blob. * @param blobName The name of the blob. * @param accessTier The tier to set on the blob. * @param leaseId The lease ID the active lease on the blob must match. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> setBlobAccessTier(String containerName, String blobName, AccessTier accessTier, String leaseId) { return setBlobAccessTierHelper(String.format(PATH_TEMPLATE, containerName, Utility.urlEncode(Utility.urlDecode(blobName))), accessTier, leaseId); } /** * Adds a set tier operation to the batch. * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.setBlobAccessTier * * @param blobUrl URL of the blob. * @param accessTier The tier to set on the blob. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> setBlobAccessTier(String blobUrl, AccessTier accessTier) { return setBlobAccessTierHelper(getUrlPath(blobUrl), accessTier, null); } /** * Adds a set tier operation to the batch. * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.setBlobAccessTier * * @param blobUrl URL of the blob. * @param accessTier The tier to set on the blob. * @param leaseId The lease ID the active lease on the blob must match. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> setBlobAccessTier(String blobUrl, AccessTier accessTier, String leaseId) { return setBlobAccessTierHelper(getUrlPath(blobUrl), accessTier, leaseId); } private Response<Void> setBlobAccessTierHelper(String urlPath, AccessTier accessTier, String leaseId) { setBatchType(BlobBatchType.SET_TIER); return createBatchOperation(blobAsyncClient.setAccessTierWithResponse(accessTier, null, leaseId), urlPath, EXPECTED_SET_TIER_STATUS_CODES); } private <T> Response<T> createBatchOperation(Mono<Response<T>> response, String urlPath, int... expectedStatusCodes) { int id = contentId.getAndIncrement(); batchOperationQueue.add(response .subscriberContext(Context.of(BATCH_REQUEST_CONTENT_ID, id, BATCH_REQUEST_URL_PATH, urlPath))); BlobBatchOperationResponse<T> batchOperationResponse = new BlobBatchOperationResponse<>(expectedStatusCodes); batchMapping.put(id, batchOperationResponse); return batchOperationResponse; } private String getUrlPath(String url) { return UrlBuilder.parse(url).getPath(); } private void setBatchType(BlobBatchType batchType) { if (this.batchType == null) { this.batchType = batchType; } else if (this.batchType != batchType) { throw logger.logExceptionAsError(new UnsupportedOperationException(String.format(Locale.ROOT, "'BlobBatch' only supports homogeneous operations and is a %s batch.", this.batchType))); } } Flux<ByteBuffer> getBody() { if (batchOperationQueue.isEmpty()) { throw logger.logExceptionAsError(new UnsupportedOperationException("Empty batch requests aren't allowed.")); } Disposable disposable = Flux.fromStream(batchOperationQueue.stream()) .flatMap(batchOperation -> batchOperation) .subscribe(); /* Wait until the 'Flux' is disposed of (aka complete) instead of blocking as this will prevent Reactor from * throwing an exception if this was ran in a Reactor thread. */ while (!disposable.isDisposed()) { } this.batchRequest.add(ByteBuffer.wrap( String.format("--%s--%s", batchBoundary, BlobBatchHelper.HTTP_NEWLINE).getBytes(StandardCharsets.UTF_8))); return Flux.fromIterable(batchRequest); } long getContentLength() { long contentLength = 0; for (ByteBuffer request : batchRequest) { contentLength += request.remaining(); } return contentLength; } String getContentType() { return contentType; } BlobBatchOperationResponse<?> getBatchRequest(int contentId) { return batchMapping.get(contentId); } int getOperationCount() { return batchMapping.size(); } /* * This performs a cleanup operation that would be handled when the request is sent through Netty or OkHttp. * Additionally, it removes the "x-ms-version" header from the request as batch operation requests cannot have this * and it adds the header "Content-Id" that allows the request to be mapped to the response. */ private Mono<HttpResponse> cleanseHeaders(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { context.getHttpRequest().getHeaders().remove(X_MS_VERSION); Map<String, String> headers = context.getHttpRequest().getHeaders().toMap(); headers.entrySet().removeIf(header -> header.getValue() == null); context.getHttpRequest().setHeaders(new HttpHeaders(headers)); context.getHttpRequest().setHeader(CONTENT_ID, context.getData(BATCH_REQUEST_CONTENT_ID).get().toString()); return next.process(); } /* * This performs changing the request URL to the value passed through the pipeline context. This policy is used in * place of constructing a new client for each batch request that is being sent. */ private Mono<HttpResponse> setRequestUrl(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { try { UrlBuilder requestUrl = UrlBuilder.parse(context.getHttpRequest().getUrl()); requestUrl.setPath(context.getData(BATCH_REQUEST_URL_PATH).get().toString()); context.getHttpRequest().setUrl(requestUrl.toURL()); } catch (MalformedURLException ex) { throw logger.logExceptionAsError(Exceptions.propagate(new IllegalStateException(ex))); } return next.process(); } /* * This will "send" the batch operation request when triggered, it simply acts as a way to build and write the * batch operation into the overall request and then returns nothing as the response. */ private Mono<HttpResponse> setupBatchOperation(HttpRequest request) { return Mono.fromRunnable(() -> { int contentId = Integer.parseInt(request.getHeaders().remove(CONTENT_ID).getValue()); StringBuilder batchRequestBuilder = new StringBuilder(); appendWithNewline(batchRequestBuilder, "--" + batchBoundary); appendWithNewline(batchRequestBuilder, BATCH_OPERATION_CONTENT_TYPE); appendWithNewline(batchRequestBuilder, BATCH_OPERATION_CONTENT_TRANSFER_ENCODING); appendWithNewline(batchRequestBuilder, String.format(BATCH_OPERATION_CONTENT_ID_TEMPLATE, contentId)); batchRequestBuilder.append(BlobBatchHelper.HTTP_NEWLINE); String method = request.getHttpMethod().toString(); String urlPath = request.getUrl().getPath(); String urlQuery = request.getUrl().getQuery(); if (!ImplUtils.isNullOrEmpty(urlQuery)) { urlPath = urlPath + "?" + urlQuery; } appendWithNewline(batchRequestBuilder, String.format(OPERATION_TEMPLATE, method, urlPath, HTTP_VERSION)); request.getHeaders().stream() .filter(header -> !X_MS_VERSION.equalsIgnoreCase(header.getName())) .forEach(header -> appendWithNewline(batchRequestBuilder, String.format(HEADER_TEMPLATE, header.getName(), header.getValue()))); batchRequestBuilder.append(BlobBatchHelper.HTTP_NEWLINE); batchRequest.add(ByteBuffer.wrap(batchRequestBuilder.toString().getBytes(StandardCharsets.UTF_8))); batchMapping.get(contentId).setRequest(request); }); } private void appendWithNewline(StringBuilder stringBuilder, String value) { stringBuilder.append(value).append(BlobBatchHelper.HTTP_NEWLINE); } }
class BlobBatch { private static final String X_MS_VERSION = "x-ms-version"; private static final String BATCH_REQUEST_CONTENT_ID = "Batch-Request-Content-Id"; private static final String BATCH_REQUEST_URL_PATH = "Batch-Request-Url-Path"; private static final String CONTENT_ID = "Content-Id"; private static final String BATCH_BOUNDARY_TEMPLATE = "batch_%s"; private static final String REQUEST_CONTENT_TYPE_TEMPLATE = "multipart/mixed; boundary=%s"; private static final String BATCH_OPERATION_CONTENT_TYPE = "Content-Type: application/http"; private static final String BATCH_OPERATION_CONTENT_TRANSFER_ENCODING = "Content-Transfer-Encoding: binary"; private static final String BATCH_OPERATION_CONTENT_ID_TEMPLATE = "Content-ID: %d"; private static final String HTTP_VERSION = "HTTP/1.1"; private static final String OPERATION_TEMPLATE = "%s %s %s"; private static final String HEADER_TEMPLATE = "%s: %s"; private static final String PATH_TEMPLATE = "%s/%s"; /* * Track the status codes expected for the batching operations here as the batch body does not get parsed in * Azure Core where this information is maintained. */ private static final int[] EXPECTED_DELETE_STATUS_CODES = {202}; private static final int[] EXPECTED_SET_TIER_STATUS_CODES = {200, 202}; private final ClientLogger logger = new ClientLogger(BlobBatch.class); private final BlobAsyncClient blobAsyncClient; private final Deque<Mono<? extends Response<?>>> batchOperationQueue; private final List<ByteBuffer> batchRequest; private final Map<Integer, BlobBatchOperationResponse<?>> batchMapping; private final AtomicInteger contentId; private final String batchBoundary; private final String contentType; private BlobBatchType batchType; BlobBatch(String accountUrl, HttpPipeline pipeline) { this.contentId = new AtomicInteger(); this.batchBoundary = String.format(BATCH_BOUNDARY_TEMPLATE, UUID.randomUUID()); this.contentType = String.format(REQUEST_CONTENT_TYPE_TEMPLATE, batchBoundary); boolean batchHeadersPolicySet = false; HttpPipelineBuilder batchPipelineBuilder = new HttpPipelineBuilder().httpClient(this::setupBatchOperation); for (int i = 0; i < pipeline.getPolicyCount(); i++) { HttpPipelinePolicy policy = pipeline.getPolicy(i); if (policy instanceof StorageSharedKeyCredentialPolicy) { batchHeadersPolicySet = true; batchPipelineBuilder.policies(this::cleanseHeaders, this::setRequestUrl); } batchPipelineBuilder.policies(policy); } if (!batchHeadersPolicySet) { batchPipelineBuilder.policies(this::cleanseHeaders, this::setRequestUrl); } this.blobAsyncClient = new BlobClientBuilder() .endpoint(accountUrl) .blobName("") .pipeline(batchPipelineBuilder.build()) .buildAsyncClient(); this.batchOperationQueue = new ConcurrentLinkedDeque<>(); this.batchRequest = new ArrayList<>(); this.batchMapping = new ConcurrentHashMap<>(); } /** * Adds a delete blob operation to the batch. * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.deleteBlob * * @param containerName The container of the blob. * @param blobName The name of the blob. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> deleteBlob(String containerName, String blobName) { return deleteBlobHelper(String.format(PATH_TEMPLATE, containerName, Utility.urlEncode(Utility.urlDecode(blobName))), null, null); } /** * Adds a delete blob operation to the batch. * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.deleteBlob * * @param containerName The container of the blob. * @param blobName The name of the blob. * @param deleteOptions Delete options for the blob and its snapshots. * @param blobRequestConditions Additional access conditions that must be met to allow this operation. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> deleteBlob(String containerName, String blobName, DeleteSnapshotsOptionType deleteOptions, BlobRequestConditions blobRequestConditions) { return deleteBlobHelper(String.format(PATH_TEMPLATE, containerName, Utility.urlEncode(Utility.urlDecode(blobName))), deleteOptions, blobRequestConditions); } /** * Adds a delete blob operation to the batch. * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.deleteBlob * * @param blobUrl URL of the blob. Blob name must be encoded to UTF-8. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> deleteBlob(String blobUrl) { return deleteBlobHelper(getUrlPath(blobUrl), null, null); } /** * Adds a delete blob operation to the batch. * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.deleteBlob * * @param blobUrl URL of the blob. Blob name must be encoded to UTF-8. * @param deleteOptions Delete options for the blob and its snapshots. * @param blobRequestConditions Additional access conditions that must be met to allow this operation. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> deleteBlob(String blobUrl, DeleteSnapshotsOptionType deleteOptions, BlobRequestConditions blobRequestConditions) { return deleteBlobHelper(getUrlPath(blobUrl), deleteOptions, blobRequestConditions); } private Response<Void> deleteBlobHelper(String urlPath, DeleteSnapshotsOptionType deleteOptions, BlobRequestConditions blobRequestConditions) { setBatchType(BlobBatchType.DELETE); return createBatchOperation(blobAsyncClient.deleteWithResponse(deleteOptions, blobRequestConditions), urlPath, EXPECTED_DELETE_STATUS_CODES); } /** * Adds a set tier operation to the batch. * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.setBlobAccessTier * * @param containerName The container of the blob. * @param blobName The name of the blob. * @param accessTier The tier to set on the blob. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ /** * Adds a set tier operation to the batch. * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.setBlobAccessTier * * @param containerName The container of the blob. * @param blobName The name of the blob. * @param accessTier The tier to set on the blob. * @param leaseId The lease ID the active lease on the blob must match. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> setBlobAccessTier(String containerName, String blobName, AccessTier accessTier, String leaseId) { return setBlobAccessTierHelper(String.format(PATH_TEMPLATE, containerName, Utility.urlEncode(Utility.urlDecode(blobName))), accessTier, leaseId); } /** * Adds a set tier operation to the batch. * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.setBlobAccessTier * * @param blobUrl URL of the blob. Blob name must be encoded to UTF-8. * @param accessTier The tier to set on the blob. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> setBlobAccessTier(String blobUrl, AccessTier accessTier) { return setBlobAccessTierHelper(getUrlPath(blobUrl), accessTier, null); } /** * Adds a set tier operation to the batch. * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.setBlobAccessTier * * @param blobUrl URL of the blob. Blob name must be encoded to UTF-8. * @param accessTier The tier to set on the blob. * @param leaseId The lease ID the active lease on the blob must match. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> setBlobAccessTier(String blobUrl, AccessTier accessTier, String leaseId) { return setBlobAccessTierHelper(getUrlPath(blobUrl), accessTier, leaseId); } private Response<Void> setBlobAccessTierHelper(String urlPath, AccessTier accessTier, String leaseId) { setBatchType(BlobBatchType.SET_TIER); return createBatchOperation(blobAsyncClient.setAccessTierWithResponse(accessTier, null, leaseId), urlPath, EXPECTED_SET_TIER_STATUS_CODES); } private <T> Response<T> createBatchOperation(Mono<Response<T>> response, String urlPath, int... expectedStatusCodes) { int id = contentId.getAndIncrement(); batchOperationQueue.add(response .subscriberContext(Context.of(BATCH_REQUEST_CONTENT_ID, id, BATCH_REQUEST_URL_PATH, urlPath))); BlobBatchOperationResponse<T> batchOperationResponse = new BlobBatchOperationResponse<>(expectedStatusCodes); batchMapping.put(id, batchOperationResponse); return batchOperationResponse; } private String getUrlPath(String url) { return UrlBuilder.parse(url).getPath(); } private void setBatchType(BlobBatchType batchType) { if (this.batchType == null) { this.batchType = batchType; } else if (this.batchType != batchType) { throw logger.logExceptionAsError(new UnsupportedOperationException(String.format(Locale.ROOT, "'BlobBatch' only supports homogeneous operations and is a %s batch.", this.batchType))); } } Flux<ByteBuffer> getBody() { if (batchOperationQueue.isEmpty()) { throw logger.logExceptionAsError(new UnsupportedOperationException("Empty batch requests aren't allowed.")); } Disposable disposable = Flux.fromStream(batchOperationQueue.stream()) .flatMap(batchOperation -> batchOperation) .subscribe(); /* Wait until the 'Flux' is disposed of (aka complete) instead of blocking as this will prevent Reactor from * throwing an exception if this was ran in a Reactor thread. */ while (!disposable.isDisposed()) { } this.batchRequest.add(ByteBuffer.wrap( String.format("--%s--%s", batchBoundary, BlobBatchHelper.HTTP_NEWLINE).getBytes(StandardCharsets.UTF_8))); return Flux.fromIterable(batchRequest); } long getContentLength() { long contentLength = 0; for (ByteBuffer request : batchRequest) { contentLength += request.remaining(); } return contentLength; } String getContentType() { return contentType; } BlobBatchOperationResponse<?> getBatchRequest(int contentId) { return batchMapping.get(contentId); } int getOperationCount() { return batchMapping.size(); } /* * This performs a cleanup operation that would be handled when the request is sent through Netty or OkHttp. * Additionally, it removes the "x-ms-version" header from the request as batch operation requests cannot have this * and it adds the header "Content-Id" that allows the request to be mapped to the response. */ private Mono<HttpResponse> cleanseHeaders(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { context.getHttpRequest().getHeaders().remove(X_MS_VERSION); Map<String, String> headers = context.getHttpRequest().getHeaders().toMap(); headers.entrySet().removeIf(header -> header.getValue() == null); context.getHttpRequest().setHeaders(new HttpHeaders(headers)); context.getHttpRequest().setHeader(CONTENT_ID, context.getData(BATCH_REQUEST_CONTENT_ID).get().toString()); return next.process(); } /* * This performs changing the request URL to the value passed through the pipeline context. This policy is used in * place of constructing a new client for each batch request that is being sent. */ private Mono<HttpResponse> setRequestUrl(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { try { UrlBuilder requestUrl = UrlBuilder.parse(context.getHttpRequest().getUrl()); requestUrl.setPath(context.getData(BATCH_REQUEST_URL_PATH).get().toString()); context.getHttpRequest().setUrl(requestUrl.toURL()); } catch (MalformedURLException ex) { throw logger.logExceptionAsError(Exceptions.propagate(new IllegalStateException(ex))); } return next.process(); } /* * This will "send" the batch operation request when triggered, it simply acts as a way to build and write the * batch operation into the overall request and then returns nothing as the response. */ private Mono<HttpResponse> setupBatchOperation(HttpRequest request) { return Mono.fromRunnable(() -> { int contentId = Integer.parseInt(request.getHeaders().remove(CONTENT_ID).getValue()); StringBuilder batchRequestBuilder = new StringBuilder(); appendWithNewline(batchRequestBuilder, "--" + batchBoundary); appendWithNewline(batchRequestBuilder, BATCH_OPERATION_CONTENT_TYPE); appendWithNewline(batchRequestBuilder, BATCH_OPERATION_CONTENT_TRANSFER_ENCODING); appendWithNewline(batchRequestBuilder, String.format(BATCH_OPERATION_CONTENT_ID_TEMPLATE, contentId)); batchRequestBuilder.append(BlobBatchHelper.HTTP_NEWLINE); String method = request.getHttpMethod().toString(); String urlPath = request.getUrl().getPath(); String urlQuery = request.getUrl().getQuery(); if (!ImplUtils.isNullOrEmpty(urlQuery)) { urlPath = urlPath + "?" + urlQuery; } appendWithNewline(batchRequestBuilder, String.format(OPERATION_TEMPLATE, method, urlPath, HTTP_VERSION)); request.getHeaders().stream() .filter(header -> !X_MS_VERSION.equalsIgnoreCase(header.getName())) .forEach(header -> appendWithNewline(batchRequestBuilder, String.format(HEADER_TEMPLATE, header.getName(), header.getValue()))); batchRequestBuilder.append(BlobBatchHelper.HTTP_NEWLINE); batchRequest.add(ByteBuffer.wrap(batchRequestBuilder.toString().getBytes(StandardCharsets.UTF_8))); batchMapping.get(contentId).setRequest(request); }); } private void appendWithNewline(StringBuilder stringBuilder, String value) { stringBuilder.append(value).append(BlobBatchHelper.HTTP_NEWLINE); } }
Overload to skip encoding?
public BlobUrlParts setBlobName(String blobName) { this.blobName = Utility.urlEncode(Utility.urlDecode(blobName)); return this; }
this.blobName = Utility.urlEncode(Utility.urlDecode(blobName));
public BlobUrlParts setBlobName(String blobName) { this.blobName = Utility.urlEncode(Utility.urlDecode(blobName)); return this; }
class BlobUrlParts { private static final Pattern IP_V4_URL_PATTERN = Pattern .compile("(?:\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})|(?:localhost)"); private final ClientLogger logger = new ClientLogger(BlobUrlParts.class); private String scheme; private String host; private String containerName; private String blobName; private String snapshot; private String accountName; private BlobServiceSasQueryParameters blobServiceSasQueryParameters; private Map<String, String[]> unparsedParameters; /** * Initializes a BlobUrlParts object which helps aid in the construction of a Blob Storage URL. */ public BlobUrlParts() { unparsedParameters = new HashMap<>(); } /** * Gets the accountname, ex. "myaccountname". * * @return the account name. */ public String getAccountName() { return accountName; } /** * Sets the account name. * * @param accountName The account name. * @return the updated BlobURLParts object. */ public BlobUrlParts setAccountName(String accountName) { this.accountName = accountName; return this; } /** * Gets the URL scheme, ex. "https: * * @return the URL scheme. */ public String getScheme() { return scheme; } /** * Sets the URL scheme, ex. "https: * * @param scheme The URL scheme. * @return the updated BlobUrlParts object. */ public BlobUrlParts setScheme(String scheme) { this.scheme = scheme; return this; } /** * Gets the URL host, ex. "account.blob.core.windows.net". * * @return the URL host. */ public String getHost() { return host; } /** * Sets the URL host, ex. "account.blob.core.windows.net". * * @param host The URL host. * @return the updated BlobUrlParts object. */ public BlobUrlParts setHost(String host) { this.host = host; return this; } /** * Gets the container name that will be used as part of the URL path. * * @return the container name. */ public String getBlobContainerName() { return containerName; } /** * Sets the container name that will be used as part of the URL path. * * @param containerName The container nme. * @return the updated BlobUrlParts object. */ public BlobUrlParts setContainerName(String containerName) { this.containerName = containerName; return this; } /** * Gets the blob name that will be used as part of the URL path. * * <p>Blob name is encoded to UTF-8 using the {@link com.azure.storage.common.Utility * * @return the blob name. */ public String getBlobName() { return blobName; } /** * Sets the blob name that will be used as part of the URL path. * * <p>Blob name is encoded to UTF-8 using the {@link com.azure.storage.common.Utility * * @param blobName The blob name. * @return the updated BlobUrlParts object. */ /** * Gets the snapshot identifier that will be used as part of the query string if set. * * @return the snapshot identifier. */ public String getSnapshot() { return snapshot; } /** * Sets the snapshot identifier that will be used as part of the query string if set. * * @param snapshot The snapshot identifier. * @return the updated BlobUrlParts object. */ public BlobUrlParts setSnapshot(String snapshot) { this.snapshot = snapshot; return this; } /** * Gets the {@link BlobServiceSasQueryParameters} representing the SAS query parameters that will be used to * generate the SAS token for this URL. * * @return the {@link BlobServiceSasQueryParameters} of the URL */ public BlobServiceSasQueryParameters getSasQueryParameters() { return blobServiceSasQueryParameters; } /** * Sets the {@link BlobServiceSasQueryParameters} representing the SAS query parameters that will be used to * generate the SAS token for this URL. * * @param blobServiceSasQueryParameters The SAS query parameters. * @return the updated BlobUrlParts object. */ public BlobUrlParts setSasQueryParameters(BlobServiceSasQueryParameters blobServiceSasQueryParameters) { this.blobServiceSasQueryParameters = blobServiceSasQueryParameters; return this; } /** * Gets the query string parameters that aren't part of the SAS token that will be used by this URL. * * @return the non-SAS token query string values. */ public Map<String, String[]> getUnparsedParameters() { return unparsedParameters; } /** * Sets the query string parameters that aren't part of the SAS token that will be used by this URL. * * @param unparsedParameters The non-SAS token query string values. * @return the updated BlobUrlParts object. */ public BlobUrlParts setUnparsedParameters(Map<String, String[]> unparsedParameters) { this.unparsedParameters = unparsedParameters; return this; } /** * Converts the blob URL parts to a {@link URL}. * * <p>Blob name is encoded to UTF-8 using the {@link com.azure.storage.common.Utility * * @return A {@code URL} to the blob resource composed of all the elements in this object. * @throws IllegalStateException The fields present on the BlobUrlParts object were insufficient to construct a * valid URL or were ill-formatted. */ public URL toUrl() { UrlBuilder url = new UrlBuilder().setScheme(this.scheme).setHost(this.host); StringBuilder path = new StringBuilder(); if ((this.containerName == null || this.containerName.isEmpty()) && this.blobName != null) { this.containerName = BlobContainerAsyncClient.ROOT_CONTAINER_NAME; } if (this.containerName != null) { path.append(this.containerName); if (this.blobName != null) { path.append('/'); path.append(this.blobName); } } url.setPath(path.toString()); if (this.snapshot != null) { url.setQueryParameter(Constants.UrlConstants.SNAPSHOT_QUERY_PARAMETER, this.snapshot); } if (this.blobServiceSasQueryParameters != null) { String encodedSAS = this.blobServiceSasQueryParameters.encode(); if (encodedSAS.length() != 0) { url.setQuery(encodedSAS); } } for (Map.Entry<String, String[]> entry : this.unparsedParameters.entrySet()) { url.setQueryParameter(entry.getKey(), Utility.urlEncode(String.join(",", entry.getValue()))); } try { return url.toURL(); } catch (MalformedURLException ex) { throw logger.logExceptionAsError(new IllegalStateException("The URL parts created a malformed URL.")); } } /** * Parses a string into a BlobUrlParts. * * <p>Query parameters will be parsed into two properties, {@link BlobServiceSasQueryParameters} which contains * all SAS token related values and {@link * parameters.</p> * * <p>If a URL points to a blob in the root container, and the root container is referenced implicitly, i.e. there * is no path element for the container, the name of this blob in the root container will be set as the * containerName field in the resulting {@code BlobURLParts}.</p> * * <p>Blob name is encoded to UTF-8 using the {@link com.azure.storage.common.Utility * * @param url The {@code URL} to be parsed. * @return A {@link BlobUrlParts} object containing all the components of a BlobURL. * @throws IllegalArgumentException If {@code url} is a malformed {@link URL}. */ public static BlobUrlParts parse(String url) { try { return parse(new URL(url)); } catch (MalformedURLException e) { throw new IllegalArgumentException("Invalid URL format. URL: " + url); } } /** * Parses an existing URL into a BlobUrlParts. * * <p>Query parameters will be parsed into two properties, {@link BlobServiceSasQueryParameters} which contains * all SAS token related values and {@link * parameters.</p> * * <p>If a URL points to a blob in the root container, and the root container is referenced implicitly, i.e. there * is no path element for the container, the name of this blob in the root container will be set as the * containerName field in the resulting {@code BlobURLParts}.</p> * * <p>Blob name is encoded to UTF-8 using the {@link com.azure.storage.common.Utility * * @param url The {@code URL} to be parsed. * @return A {@link BlobUrlParts} object containing all the components of a BlobURL. */ public static BlobUrlParts parse(URL url) { BlobUrlParts parts = new BlobUrlParts().setScheme(url.getProtocol()); if (IP_V4_URL_PATTERN.matcher(url.getHost()).find()) { parseIpUrl(url, parts); } else { parseNonIpUrl(url, parts); } Map<String, String[]> queryParamsMap = parseQueryString(url.getQuery()); String[] snapshotArray = queryParamsMap.remove("snapshot"); if (snapshotArray != null) { parts.setSnapshot(snapshotArray[0]); } BlobServiceSasQueryParameters blobServiceSasQueryParameters = new BlobServiceSasQueryParameters(queryParamsMap, true); return parts.setSasQueryParameters(blobServiceSasQueryParameters) .setUnparsedParameters(queryParamsMap); } /* * Parse the IP url into its host, account name, container name, and blob name. */ private static void parseIpUrl(URL url, BlobUrlParts parts) { parts.setHost(url.getAuthority()); String path = url.getPath(); if (path.charAt(0) == '/') { path = path.substring(1); } String[] pathPieces = path.split("/", 3); parts.setAccountName(pathPieces[0]); if (pathPieces.length >= 3) { parts.setContainerName(pathPieces[1]); parts.setBlobName(pathPieces[2]); } else if (pathPieces.length == 2) { parts.setContainerName(pathPieces[1]); } } /* * Parse the non-IP url into its host, account name, container name, and blob name. */ private static void parseNonIpUrl(URL url, BlobUrlParts parts) { String host = url.getHost(); parts.setHost(host); if (!ImplUtils.isNullOrEmpty(host)) { int accountNameIndex = host.indexOf('.'); if (accountNameIndex == -1) { parts.setAccountName(host); } else { parts.setAccountName(host.substring(0, accountNameIndex)); } } String path = url.getPath(); if (!ImplUtils.isNullOrEmpty(path)) { if (path.charAt(0) == '/') { path = path.substring(1); } int containerEndIndex = path.indexOf('/'); if (containerEndIndex == -1) { parts.setContainerName(path); } else { parts.setContainerName(path.substring(0, containerEndIndex)); parts.setBlobName(path.substring(containerEndIndex + 1)); } } } /** * Parses a query string into a one to many hashmap. * * @param queryParams The string of query params to parse. * @return A {@code HashMap<String, String[]>} of the key values. */ private static TreeMap<String, String[]> parseQueryString(String queryParams) { final TreeMap<String, String[]> retVals = new TreeMap<>(Comparator.naturalOrder()); if (ImplUtils.isNullOrEmpty(queryParams)) { return retVals; } final String[] valuePairs = queryParams.split("&"); for (String valuePair : valuePairs) { final int equalDex = valuePair.indexOf("="); String key = Utility.urlDecode(valuePair.substring(0, equalDex)).toLowerCase(Locale.ROOT); String value = Utility.urlDecode(valuePair.substring(equalDex + 1)); String[] keyValues = retVals.get(key); if (keyValues == null) { keyValues = new String[]{value}; } else { final String[] newValues = new String[keyValues.length + 1]; System.arraycopy(keyValues, 0, newValues, 0, keyValues.length); newValues[newValues.length - 1] = value; keyValues = newValues; } retVals.put(key, keyValues); } return retVals; } }
class BlobUrlParts { private static final Pattern IP_V4_URL_PATTERN = Pattern .compile("(?:\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})|(?:localhost)"); private final ClientLogger logger = new ClientLogger(BlobUrlParts.class); private String scheme; private String host; private String containerName; private String blobName; private String snapshot; private String accountName; private BlobServiceSasQueryParameters blobServiceSasQueryParameters; private Map<String, String[]> unparsedParameters; /** * Initializes a BlobUrlParts object which helps aid in the construction of a Blob Storage URL. */ public BlobUrlParts() { unparsedParameters = new HashMap<>(); } /** * Gets the accountname, ex. "myaccountname". * * @return the account name. */ public String getAccountName() { return accountName; } /** * Sets the account name. * * @param accountName The account name. * @return the updated BlobURLParts object. */ public BlobUrlParts setAccountName(String accountName) { this.accountName = accountName; return this; } /** * Gets the URL scheme, ex. "https: * * @return the URL scheme. */ public String getScheme() { return scheme; } /** * Sets the URL scheme, ex. "https: * * @param scheme The URL scheme. * @return the updated BlobUrlParts object. */ public BlobUrlParts setScheme(String scheme) { this.scheme = scheme; return this; } /** * Gets the URL host, ex. "account.blob.core.windows.net". * * @return the URL host. */ public String getHost() { return host; } /** * Sets the URL host, ex. "account.blob.core.windows.net". * * @param host The URL host. * @return the updated BlobUrlParts object. */ public BlobUrlParts setHost(String host) { this.host = host; return this; } /** * Gets the container name that will be used as part of the URL path. * * @return the container name. */ public String getBlobContainerName() { return containerName; } /** * Sets the container name that will be used as part of the URL path. * * @param containerName The container nme. * @return the updated BlobUrlParts object. */ public BlobUrlParts setContainerName(String containerName) { this.containerName = containerName; return this; } /** * Decodes and gets the blob name that will be used as part of the URL path. * * @return the decoded blob name. */ public String getBlobName() { return (blobName == null) ? null : Utility.urlDecode(blobName); } /** * Sets the blob name that will be used as part of the URL path. * * @param blobName The blob name. * @return the updated BlobUrlParts object. */ /** * Gets the snapshot identifier that will be used as part of the query string if set. * * @return the snapshot identifier. */ public String getSnapshot() { return snapshot; } /** * Sets the snapshot identifier that will be used as part of the query string if set. * * @param snapshot The snapshot identifier. * @return the updated BlobUrlParts object. */ public BlobUrlParts setSnapshot(String snapshot) { this.snapshot = snapshot; return this; } /** * Gets the {@link BlobServiceSasQueryParameters} representing the SAS query parameters that will be used to * generate the SAS token for this URL. * * @return the {@link BlobServiceSasQueryParameters} of the URL */ public BlobServiceSasQueryParameters getSasQueryParameters() { return blobServiceSasQueryParameters; } /** * Sets the {@link BlobServiceSasQueryParameters} representing the SAS query parameters that will be used to * generate the SAS token for this URL. * * @param blobServiceSasQueryParameters The SAS query parameters. * @return the updated BlobUrlParts object. */ public BlobUrlParts setSasQueryParameters(BlobServiceSasQueryParameters blobServiceSasQueryParameters) { this.blobServiceSasQueryParameters = blobServiceSasQueryParameters; return this; } /** * Gets the query string parameters that aren't part of the SAS token that will be used by this URL. * * @return the non-SAS token query string values. */ public Map<String, String[]> getUnparsedParameters() { return unparsedParameters; } /** * Sets the query string parameters that aren't part of the SAS token that will be used by this URL. * * @param unparsedParameters The non-SAS token query string values. * @return the updated BlobUrlParts object. */ public BlobUrlParts setUnparsedParameters(Map<String, String[]> unparsedParameters) { this.unparsedParameters = unparsedParameters; return this; } /** * Converts the blob URL parts to a {@link URL}. * * @return A {@code URL} to the blob resource composed of all the elements in this object. * @throws IllegalStateException The fields present on the BlobUrlParts object were insufficient to construct a * valid URL or were ill-formatted. */ public URL toUrl() { UrlBuilder url = new UrlBuilder().setScheme(this.scheme).setHost(this.host); StringBuilder path = new StringBuilder(); if ((this.containerName == null || this.containerName.isEmpty()) && this.blobName != null) { this.containerName = BlobContainerAsyncClient.ROOT_CONTAINER_NAME; } if (this.containerName != null) { path.append(this.containerName); if (this.blobName != null) { path.append('/'); path.append(this.blobName); } } url.setPath(path.toString()); if (this.snapshot != null) { url.setQueryParameter(Constants.UrlConstants.SNAPSHOT_QUERY_PARAMETER, this.snapshot); } if (this.blobServiceSasQueryParameters != null) { String encodedSAS = this.blobServiceSasQueryParameters.encode(); if (encodedSAS.length() != 0) { url.setQuery(encodedSAS); } } for (Map.Entry<String, String[]> entry : this.unparsedParameters.entrySet()) { url.setQueryParameter(entry.getKey(), Utility.urlEncode(String.join(",", entry.getValue()))); } try { return url.toURL(); } catch (MalformedURLException ex) { throw logger.logExceptionAsError(new IllegalStateException("The URL parts created a malformed URL.", ex)); } } /** * Parses a string into a BlobUrlParts. * * <p>Query parameters will be parsed into two properties, {@link BlobServiceSasQueryParameters} which contains * all SAS token related values and {@link * parameters.</p> * * <p>If a URL points to a blob in the root container, and the root container is referenced implicitly, i.e. there * is no path element for the container, the name of this blob in the root container will be set as the * containerName field in the resulting {@code BlobURLParts}.</p> * * @param url The {@code URL} to be parsed. * @return A {@link BlobUrlParts} object containing all the components of a BlobURL. * @throws IllegalArgumentException If {@code url} is a malformed {@link URL}. */ public static BlobUrlParts parse(String url) { try { return parse(new URL(url)); } catch (MalformedURLException e) { throw new IllegalArgumentException("Invalid URL format. URL: " + url); } } /** * Parses an existing URL into a BlobUrlParts. * * <p>Query parameters will be parsed into two properties, {@link BlobServiceSasQueryParameters} which contains * all SAS token related values and {@link * parameters.</p> * * <p>If a URL points to a blob in the root container, and the root container is referenced implicitly, i.e. there * is no path element for the container, the name of this blob in the root container will be set as the * containerName field in the resulting {@code BlobURLParts}.</p> * * @param url The {@code URL} to be parsed. * @return A {@link BlobUrlParts} object containing all the components of a BlobURL. */ public static BlobUrlParts parse(URL url) { BlobUrlParts parts = new BlobUrlParts().setScheme(url.getProtocol()); if (IP_V4_URL_PATTERN.matcher(url.getHost()).find()) { parseIpUrl(url, parts); } else { parseNonIpUrl(url, parts); } Map<String, String[]> queryParamsMap = parseQueryString(url.getQuery()); String[] snapshotArray = queryParamsMap.remove("snapshot"); if (snapshotArray != null) { parts.setSnapshot(snapshotArray[0]); } BlobServiceSasQueryParameters blobServiceSasQueryParameters = new BlobServiceSasQueryParameters(queryParamsMap, true); return parts.setSasQueryParameters(blobServiceSasQueryParameters) .setUnparsedParameters(queryParamsMap); } /* * Parse the IP url into its host, account name, container name, and blob name. */ private static void parseIpUrl(URL url, BlobUrlParts parts) { parts.setHost(url.getAuthority()); String path = url.getPath(); if (path.charAt(0) == '/') { path = path.substring(1); } String[] pathPieces = path.split("/", 3); parts.setAccountName(pathPieces[0]); if (pathPieces.length >= 3) { parts.setContainerName(pathPieces[1]); parts.setBlobName(pathPieces[2]); } else if (pathPieces.length == 2) { parts.setContainerName(pathPieces[1]); } } /* * Parse the non-IP url into its host, account name, container name, and blob name. */ private static void parseNonIpUrl(URL url, BlobUrlParts parts) { String host = url.getHost(); parts.setHost(host); if (!ImplUtils.isNullOrEmpty(host)) { int accountNameIndex = host.indexOf('.'); if (accountNameIndex == -1) { parts.setAccountName(host); } else { parts.setAccountName(host.substring(0, accountNameIndex)); } } String path = url.getPath(); if (!ImplUtils.isNullOrEmpty(path)) { if (path.charAt(0) == '/') { path = path.substring(1); } int containerEndIndex = path.indexOf('/'); if (containerEndIndex == -1) { parts.setContainerName(path); } else { parts.setContainerName(path.substring(0, containerEndIndex)); parts.setBlobName(path.substring(containerEndIndex + 1)); } } } /** * Parses a query string into a one to many hashmap. * * @param queryParams The string of query params to parse. * @return A {@code HashMap<String, String[]>} of the key values. */ private static TreeMap<String, String[]> parseQueryString(String queryParams) { final TreeMap<String, String[]> retVals = new TreeMap<>(Comparator.naturalOrder()); if (ImplUtils.isNullOrEmpty(queryParams)) { return retVals; } final String[] valuePairs = queryParams.split("&"); for (String valuePair : valuePairs) { final int equalDex = valuePair.indexOf("="); String key = Utility.urlDecode(valuePair.substring(0, equalDex)).toLowerCase(Locale.ROOT); String value = Utility.urlDecode(valuePair.substring(equalDex + 1)); String[] keyValues = retVals.get(key); if (keyValues == null) { keyValues = new String[]{value}; } else { final String[] newValues = new String[keyValues.length + 1]; System.arraycopy(keyValues, 0, newValues, 0, keyValues.length); newValues[newValues.length - 1] = value; keyValues = newValues; } retVals.put(key, keyValues); } return retVals; } }
You are right, let's make it call the next overload.
public Response<Void> deleteBlob(String containerName, String blobName) { return deleteBlobHelper(String.format(PATH_TEMPLATE, containerName, Utility.urlEncode(Utility.urlDecode(blobName))), null, null); }
return deleteBlobHelper(String.format(PATH_TEMPLATE, containerName,
public Response<Void> deleteBlob(String containerName, String blobName) { return deleteBlobHelper(String.format(PATH_TEMPLATE, containerName, Utility.urlEncode(Utility.urlDecode(blobName))), null, null); }
class BlobBatch { private static final String X_MS_VERSION = "x-ms-version"; private static final String BATCH_REQUEST_CONTENT_ID = "Batch-Request-Content-Id"; private static final String BATCH_REQUEST_URL_PATH = "Batch-Request-Url-Path"; private static final String CONTENT_ID = "Content-Id"; private static final String BATCH_BOUNDARY_TEMPLATE = "batch_%s"; private static final String REQUEST_CONTENT_TYPE_TEMPLATE = "multipart/mixed; boundary=%s"; private static final String BATCH_OPERATION_CONTENT_TYPE = "Content-Type: application/http"; private static final String BATCH_OPERATION_CONTENT_TRANSFER_ENCODING = "Content-Transfer-Encoding: binary"; private static final String BATCH_OPERATION_CONTENT_ID_TEMPLATE = "Content-ID: %d"; private static final String HTTP_VERSION = "HTTP/1.1"; private static final String OPERATION_TEMPLATE = "%s %s %s"; private static final String HEADER_TEMPLATE = "%s: %s"; private static final String PATH_TEMPLATE = "%s/%s"; /* * Track the status codes expected for the batching operations here as the batch body does not get parsed in * Azure Core where this information is maintained. */ private static final int[] EXPECTED_DELETE_STATUS_CODES = {202}; private static final int[] EXPECTED_SET_TIER_STATUS_CODES = {200, 202}; private final ClientLogger logger = new ClientLogger(BlobBatch.class); private final BlobAsyncClient blobAsyncClient; private final Deque<Mono<? extends Response<?>>> batchOperationQueue; private final List<ByteBuffer> batchRequest; private final Map<Integer, BlobBatchOperationResponse<?>> batchMapping; private final AtomicInteger contentId; private final String batchBoundary; private final String contentType; private BlobBatchType batchType; BlobBatch(String accountUrl, HttpPipeline pipeline) { this.contentId = new AtomicInteger(); this.batchBoundary = String.format(BATCH_BOUNDARY_TEMPLATE, UUID.randomUUID()); this.contentType = String.format(REQUEST_CONTENT_TYPE_TEMPLATE, batchBoundary); boolean batchHeadersPolicySet = false; HttpPipelineBuilder batchPipelineBuilder = new HttpPipelineBuilder().httpClient(this::setupBatchOperation); for (int i = 0; i < pipeline.getPolicyCount(); i++) { HttpPipelinePolicy policy = pipeline.getPolicy(i); if (policy instanceof StorageSharedKeyCredentialPolicy) { batchHeadersPolicySet = true; batchPipelineBuilder.policies(this::cleanseHeaders, this::setRequestUrl); } batchPipelineBuilder.policies(policy); } if (!batchHeadersPolicySet) { batchPipelineBuilder.policies(this::cleanseHeaders, this::setRequestUrl); } this.blobAsyncClient = new BlobClientBuilder() .endpoint(accountUrl) .blobName("") .pipeline(batchPipelineBuilder.build()) .buildAsyncClient(); this.batchOperationQueue = new ConcurrentLinkedDeque<>(); this.batchRequest = new ArrayList<>(); this.batchMapping = new ConcurrentHashMap<>(); } /** * Adds a delete blob operation to the batch. * * <p>Blob name is encoded to UTF-8 using the {@link com.azure.storage.common.Utility * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.deleteBlob * * @param containerName The container of the blob. * @param blobName The name of the blob. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ /** * Adds a delete blob operation to the batch. * * <p>Blob name is encoded to UTF-8 using the {@link com.azure.storage.common.Utility * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.deleteBlob * * @param containerName The container of the blob. * @param blobName The name of the blob. * @param deleteOptions Delete options for the blob and its snapshots. * @param blobRequestConditions Additional access conditions that must be met to allow this operation. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> deleteBlob(String containerName, String blobName, DeleteSnapshotsOptionType deleteOptions, BlobRequestConditions blobRequestConditions) { return deleteBlobHelper(String.format(PATH_TEMPLATE, containerName, Utility.urlEncode(Utility.urlDecode(blobName))), deleteOptions, blobRequestConditions); } /** * Adds a delete blob operation to the batch. * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.deleteBlob * * @param blobUrl URL of the blob. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> deleteBlob(String blobUrl) { return deleteBlobHelper(getUrlPath(blobUrl), null, null); } /** * Adds a delete blob operation to the batch. * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.deleteBlob * * @param blobUrl URL of the blob. * @param deleteOptions Delete options for the blob and its snapshots. * @param blobRequestConditions Additional access conditions that must be met to allow this operation. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> deleteBlob(String blobUrl, DeleteSnapshotsOptionType deleteOptions, BlobRequestConditions blobRequestConditions) { return deleteBlobHelper(getUrlPath(blobUrl), deleteOptions, blobRequestConditions); } private Response<Void> deleteBlobHelper(String urlPath, DeleteSnapshotsOptionType deleteOptions, BlobRequestConditions blobRequestConditions) { setBatchType(BlobBatchType.DELETE); return createBatchOperation(blobAsyncClient.deleteWithResponse(deleteOptions, blobRequestConditions), urlPath, EXPECTED_DELETE_STATUS_CODES); } /** * Adds a set tier operation to the batch. * * <p>Blob name is encoded to UTF-8 using the {@link com.azure.storage.common.Utility * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.setBlobAccessTier * * @param containerName The container of the blob. * @param blobName The name of the blob. * @param accessTier The tier to set on the blob. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> setBlobAccessTier(String containerName, String blobName, AccessTier accessTier) { return setBlobAccessTierHelper(String.format(PATH_TEMPLATE, containerName, Utility.urlEncode(Utility.urlDecode(blobName))), accessTier, null); } /** * Adds a set tier operation to the batch. * * <p>Blob name is encoded to UTF-8 using the {@link com.azure.storage.common.Utility * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.setBlobAccessTier * * @param containerName The container of the blob. * @param blobName The name of the blob. * @param accessTier The tier to set on the blob. * @param leaseId The lease ID the active lease on the blob must match. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> setBlobAccessTier(String containerName, String blobName, AccessTier accessTier, String leaseId) { return setBlobAccessTierHelper(String.format(PATH_TEMPLATE, containerName, Utility.urlEncode(Utility.urlDecode(blobName))), accessTier, leaseId); } /** * Adds a set tier operation to the batch. * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.setBlobAccessTier * * @param blobUrl URL of the blob. * @param accessTier The tier to set on the blob. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> setBlobAccessTier(String blobUrl, AccessTier accessTier) { return setBlobAccessTierHelper(getUrlPath(blobUrl), accessTier, null); } /** * Adds a set tier operation to the batch. * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.setBlobAccessTier * * @param blobUrl URL of the blob. * @param accessTier The tier to set on the blob. * @param leaseId The lease ID the active lease on the blob must match. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> setBlobAccessTier(String blobUrl, AccessTier accessTier, String leaseId) { return setBlobAccessTierHelper(getUrlPath(blobUrl), accessTier, leaseId); } private Response<Void> setBlobAccessTierHelper(String urlPath, AccessTier accessTier, String leaseId) { setBatchType(BlobBatchType.SET_TIER); return createBatchOperation(blobAsyncClient.setAccessTierWithResponse(accessTier, null, leaseId), urlPath, EXPECTED_SET_TIER_STATUS_CODES); } private <T> Response<T> createBatchOperation(Mono<Response<T>> response, String urlPath, int... expectedStatusCodes) { int id = contentId.getAndIncrement(); batchOperationQueue.add(response .subscriberContext(Context.of(BATCH_REQUEST_CONTENT_ID, id, BATCH_REQUEST_URL_PATH, urlPath))); BlobBatchOperationResponse<T> batchOperationResponse = new BlobBatchOperationResponse<>(expectedStatusCodes); batchMapping.put(id, batchOperationResponse); return batchOperationResponse; } private String getUrlPath(String url) { return UrlBuilder.parse(url).getPath(); } private void setBatchType(BlobBatchType batchType) { if (this.batchType == null) { this.batchType = batchType; } else if (this.batchType != batchType) { throw logger.logExceptionAsError(new UnsupportedOperationException(String.format(Locale.ROOT, "'BlobBatch' only supports homogeneous operations and is a %s batch.", this.batchType))); } } Flux<ByteBuffer> getBody() { if (batchOperationQueue.isEmpty()) { throw logger.logExceptionAsError(new UnsupportedOperationException("Empty batch requests aren't allowed.")); } Disposable disposable = Flux.fromStream(batchOperationQueue.stream()) .flatMap(batchOperation -> batchOperation) .subscribe(); /* Wait until the 'Flux' is disposed of (aka complete) instead of blocking as this will prevent Reactor from * throwing an exception if this was ran in a Reactor thread. */ while (!disposable.isDisposed()) { } this.batchRequest.add(ByteBuffer.wrap( String.format("--%s--%s", batchBoundary, BlobBatchHelper.HTTP_NEWLINE).getBytes(StandardCharsets.UTF_8))); return Flux.fromIterable(batchRequest); } long getContentLength() { long contentLength = 0; for (ByteBuffer request : batchRequest) { contentLength += request.remaining(); } return contentLength; } String getContentType() { return contentType; } BlobBatchOperationResponse<?> getBatchRequest(int contentId) { return batchMapping.get(contentId); } int getOperationCount() { return batchMapping.size(); } /* * This performs a cleanup operation that would be handled when the request is sent through Netty or OkHttp. * Additionally, it removes the "x-ms-version" header from the request as batch operation requests cannot have this * and it adds the header "Content-Id" that allows the request to be mapped to the response. */ private Mono<HttpResponse> cleanseHeaders(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { context.getHttpRequest().getHeaders().remove(X_MS_VERSION); Map<String, String> headers = context.getHttpRequest().getHeaders().toMap(); headers.entrySet().removeIf(header -> header.getValue() == null); context.getHttpRequest().setHeaders(new HttpHeaders(headers)); context.getHttpRequest().setHeader(CONTENT_ID, context.getData(BATCH_REQUEST_CONTENT_ID).get().toString()); return next.process(); } /* * This performs changing the request URL to the value passed through the pipeline context. This policy is used in * place of constructing a new client for each batch request that is being sent. */ private Mono<HttpResponse> setRequestUrl(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { try { UrlBuilder requestUrl = UrlBuilder.parse(context.getHttpRequest().getUrl()); requestUrl.setPath(context.getData(BATCH_REQUEST_URL_PATH).get().toString()); context.getHttpRequest().setUrl(requestUrl.toURL()); } catch (MalformedURLException ex) { throw logger.logExceptionAsError(Exceptions.propagate(new IllegalStateException(ex))); } return next.process(); } /* * This will "send" the batch operation request when triggered, it simply acts as a way to build and write the * batch operation into the overall request and then returns nothing as the response. */ private Mono<HttpResponse> setupBatchOperation(HttpRequest request) { return Mono.fromRunnable(() -> { int contentId = Integer.parseInt(request.getHeaders().remove(CONTENT_ID).getValue()); StringBuilder batchRequestBuilder = new StringBuilder(); appendWithNewline(batchRequestBuilder, "--" + batchBoundary); appendWithNewline(batchRequestBuilder, BATCH_OPERATION_CONTENT_TYPE); appendWithNewline(batchRequestBuilder, BATCH_OPERATION_CONTENT_TRANSFER_ENCODING); appendWithNewline(batchRequestBuilder, String.format(BATCH_OPERATION_CONTENT_ID_TEMPLATE, contentId)); batchRequestBuilder.append(BlobBatchHelper.HTTP_NEWLINE); String method = request.getHttpMethod().toString(); String urlPath = request.getUrl().getPath(); String urlQuery = request.getUrl().getQuery(); if (!ImplUtils.isNullOrEmpty(urlQuery)) { urlPath = urlPath + "?" + urlQuery; } appendWithNewline(batchRequestBuilder, String.format(OPERATION_TEMPLATE, method, urlPath, HTTP_VERSION)); request.getHeaders().stream() .filter(header -> !X_MS_VERSION.equalsIgnoreCase(header.getName())) .forEach(header -> appendWithNewline(batchRequestBuilder, String.format(HEADER_TEMPLATE, header.getName(), header.getValue()))); batchRequestBuilder.append(BlobBatchHelper.HTTP_NEWLINE); batchRequest.add(ByteBuffer.wrap(batchRequestBuilder.toString().getBytes(StandardCharsets.UTF_8))); batchMapping.get(contentId).setRequest(request); }); } private void appendWithNewline(StringBuilder stringBuilder, String value) { stringBuilder.append(value).append(BlobBatchHelper.HTTP_NEWLINE); } }
class BlobBatch { private static final String X_MS_VERSION = "x-ms-version"; private static final String BATCH_REQUEST_CONTENT_ID = "Batch-Request-Content-Id"; private static final String BATCH_REQUEST_URL_PATH = "Batch-Request-Url-Path"; private static final String CONTENT_ID = "Content-Id"; private static final String BATCH_BOUNDARY_TEMPLATE = "batch_%s"; private static final String REQUEST_CONTENT_TYPE_TEMPLATE = "multipart/mixed; boundary=%s"; private static final String BATCH_OPERATION_CONTENT_TYPE = "Content-Type: application/http"; private static final String BATCH_OPERATION_CONTENT_TRANSFER_ENCODING = "Content-Transfer-Encoding: binary"; private static final String BATCH_OPERATION_CONTENT_ID_TEMPLATE = "Content-ID: %d"; private static final String HTTP_VERSION = "HTTP/1.1"; private static final String OPERATION_TEMPLATE = "%s %s %s"; private static final String HEADER_TEMPLATE = "%s: %s"; private static final String PATH_TEMPLATE = "%s/%s"; /* * Track the status codes expected for the batching operations here as the batch body does not get parsed in * Azure Core where this information is maintained. */ private static final int[] EXPECTED_DELETE_STATUS_CODES = {202}; private static final int[] EXPECTED_SET_TIER_STATUS_CODES = {200, 202}; private final ClientLogger logger = new ClientLogger(BlobBatch.class); private final BlobAsyncClient blobAsyncClient; private final Deque<Mono<? extends Response<?>>> batchOperationQueue; private final List<ByteBuffer> batchRequest; private final Map<Integer, BlobBatchOperationResponse<?>> batchMapping; private final AtomicInteger contentId; private final String batchBoundary; private final String contentType; private BlobBatchType batchType; BlobBatch(String accountUrl, HttpPipeline pipeline) { this.contentId = new AtomicInteger(); this.batchBoundary = String.format(BATCH_BOUNDARY_TEMPLATE, UUID.randomUUID()); this.contentType = String.format(REQUEST_CONTENT_TYPE_TEMPLATE, batchBoundary); boolean batchHeadersPolicySet = false; HttpPipelineBuilder batchPipelineBuilder = new HttpPipelineBuilder().httpClient(this::setupBatchOperation); for (int i = 0; i < pipeline.getPolicyCount(); i++) { HttpPipelinePolicy policy = pipeline.getPolicy(i); if (policy instanceof StorageSharedKeyCredentialPolicy) { batchHeadersPolicySet = true; batchPipelineBuilder.policies(this::cleanseHeaders, this::setRequestUrl); } batchPipelineBuilder.policies(policy); } if (!batchHeadersPolicySet) { batchPipelineBuilder.policies(this::cleanseHeaders, this::setRequestUrl); } this.blobAsyncClient = new BlobClientBuilder() .endpoint(accountUrl) .blobName("") .pipeline(batchPipelineBuilder.build()) .buildAsyncClient(); this.batchOperationQueue = new ConcurrentLinkedDeque<>(); this.batchRequest = new ArrayList<>(); this.batchMapping = new ConcurrentHashMap<>(); } /** * Adds a delete blob operation to the batch. * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.deleteBlob * * @param containerName The container of the blob. * @param blobName The name of the blob. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ /** * Adds a delete blob operation to the batch. * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.deleteBlob * * @param containerName The container of the blob. * @param blobName The name of the blob. * @param deleteOptions Delete options for the blob and its snapshots. * @param blobRequestConditions Additional access conditions that must be met to allow this operation. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> deleteBlob(String containerName, String blobName, DeleteSnapshotsOptionType deleteOptions, BlobRequestConditions blobRequestConditions) { return deleteBlobHelper(String.format(PATH_TEMPLATE, containerName, Utility.urlEncode(Utility.urlDecode(blobName))), deleteOptions, blobRequestConditions); } /** * Adds a delete blob operation to the batch. * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.deleteBlob * * @param blobUrl URL of the blob. Blob name must be encoded to UTF-8. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> deleteBlob(String blobUrl) { return deleteBlobHelper(getUrlPath(blobUrl), null, null); } /** * Adds a delete blob operation to the batch. * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.deleteBlob * * @param blobUrl URL of the blob. Blob name must be encoded to UTF-8. * @param deleteOptions Delete options for the blob and its snapshots. * @param blobRequestConditions Additional access conditions that must be met to allow this operation. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> deleteBlob(String blobUrl, DeleteSnapshotsOptionType deleteOptions, BlobRequestConditions blobRequestConditions) { return deleteBlobHelper(getUrlPath(blobUrl), deleteOptions, blobRequestConditions); } private Response<Void> deleteBlobHelper(String urlPath, DeleteSnapshotsOptionType deleteOptions, BlobRequestConditions blobRequestConditions) { setBatchType(BlobBatchType.DELETE); return createBatchOperation(blobAsyncClient.deleteWithResponse(deleteOptions, blobRequestConditions), urlPath, EXPECTED_DELETE_STATUS_CODES); } /** * Adds a set tier operation to the batch. * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.setBlobAccessTier * * @param containerName The container of the blob. * @param blobName The name of the blob. * @param accessTier The tier to set on the blob. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> setBlobAccessTier(String containerName, String blobName, AccessTier accessTier) { return setBlobAccessTierHelper(String.format(PATH_TEMPLATE, containerName, Utility.urlEncode(Utility.urlDecode(blobName))), accessTier, null); } /** * Adds a set tier operation to the batch. * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.setBlobAccessTier * * @param containerName The container of the blob. * @param blobName The name of the blob. * @param accessTier The tier to set on the blob. * @param leaseId The lease ID the active lease on the blob must match. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> setBlobAccessTier(String containerName, String blobName, AccessTier accessTier, String leaseId) { return setBlobAccessTierHelper(String.format(PATH_TEMPLATE, containerName, Utility.urlEncode(Utility.urlDecode(blobName))), accessTier, leaseId); } /** * Adds a set tier operation to the batch. * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.setBlobAccessTier * * @param blobUrl URL of the blob. Blob name must be encoded to UTF-8. * @param accessTier The tier to set on the blob. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> setBlobAccessTier(String blobUrl, AccessTier accessTier) { return setBlobAccessTierHelper(getUrlPath(blobUrl), accessTier, null); } /** * Adds a set tier operation to the batch. * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.setBlobAccessTier * * @param blobUrl URL of the blob. Blob name must be encoded to UTF-8. * @param accessTier The tier to set on the blob. * @param leaseId The lease ID the active lease on the blob must match. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> setBlobAccessTier(String blobUrl, AccessTier accessTier, String leaseId) { return setBlobAccessTierHelper(getUrlPath(blobUrl), accessTier, leaseId); } private Response<Void> setBlobAccessTierHelper(String urlPath, AccessTier accessTier, String leaseId) { setBatchType(BlobBatchType.SET_TIER); return createBatchOperation(blobAsyncClient.setAccessTierWithResponse(accessTier, null, leaseId), urlPath, EXPECTED_SET_TIER_STATUS_CODES); } private <T> Response<T> createBatchOperation(Mono<Response<T>> response, String urlPath, int... expectedStatusCodes) { int id = contentId.getAndIncrement(); batchOperationQueue.add(response .subscriberContext(Context.of(BATCH_REQUEST_CONTENT_ID, id, BATCH_REQUEST_URL_PATH, urlPath))); BlobBatchOperationResponse<T> batchOperationResponse = new BlobBatchOperationResponse<>(expectedStatusCodes); batchMapping.put(id, batchOperationResponse); return batchOperationResponse; } private String getUrlPath(String url) { return UrlBuilder.parse(url).getPath(); } private void setBatchType(BlobBatchType batchType) { if (this.batchType == null) { this.batchType = batchType; } else if (this.batchType != batchType) { throw logger.logExceptionAsError(new UnsupportedOperationException(String.format(Locale.ROOT, "'BlobBatch' only supports homogeneous operations and is a %s batch.", this.batchType))); } } Flux<ByteBuffer> getBody() { if (batchOperationQueue.isEmpty()) { throw logger.logExceptionAsError(new UnsupportedOperationException("Empty batch requests aren't allowed.")); } Disposable disposable = Flux.fromStream(batchOperationQueue.stream()) .flatMap(batchOperation -> batchOperation) .subscribe(); /* Wait until the 'Flux' is disposed of (aka complete) instead of blocking as this will prevent Reactor from * throwing an exception if this was ran in a Reactor thread. */ while (!disposable.isDisposed()) { } this.batchRequest.add(ByteBuffer.wrap( String.format("--%s--%s", batchBoundary, BlobBatchHelper.HTTP_NEWLINE).getBytes(StandardCharsets.UTF_8))); return Flux.fromIterable(batchRequest); } long getContentLength() { long contentLength = 0; for (ByteBuffer request : batchRequest) { contentLength += request.remaining(); } return contentLength; } String getContentType() { return contentType; } BlobBatchOperationResponse<?> getBatchRequest(int contentId) { return batchMapping.get(contentId); } int getOperationCount() { return batchMapping.size(); } /* * This performs a cleanup operation that would be handled when the request is sent through Netty or OkHttp. * Additionally, it removes the "x-ms-version" header from the request as batch operation requests cannot have this * and it adds the header "Content-Id" that allows the request to be mapped to the response. */ private Mono<HttpResponse> cleanseHeaders(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { context.getHttpRequest().getHeaders().remove(X_MS_VERSION); Map<String, String> headers = context.getHttpRequest().getHeaders().toMap(); headers.entrySet().removeIf(header -> header.getValue() == null); context.getHttpRequest().setHeaders(new HttpHeaders(headers)); context.getHttpRequest().setHeader(CONTENT_ID, context.getData(BATCH_REQUEST_CONTENT_ID).get().toString()); return next.process(); } /* * This performs changing the request URL to the value passed through the pipeline context. This policy is used in * place of constructing a new client for each batch request that is being sent. */ private Mono<HttpResponse> setRequestUrl(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { try { UrlBuilder requestUrl = UrlBuilder.parse(context.getHttpRequest().getUrl()); requestUrl.setPath(context.getData(BATCH_REQUEST_URL_PATH).get().toString()); context.getHttpRequest().setUrl(requestUrl.toURL()); } catch (MalformedURLException ex) { throw logger.logExceptionAsError(Exceptions.propagate(new IllegalStateException(ex))); } return next.process(); } /* * This will "send" the batch operation request when triggered, it simply acts as a way to build and write the * batch operation into the overall request and then returns nothing as the response. */ private Mono<HttpResponse> setupBatchOperation(HttpRequest request) { return Mono.fromRunnable(() -> { int contentId = Integer.parseInt(request.getHeaders().remove(CONTENT_ID).getValue()); StringBuilder batchRequestBuilder = new StringBuilder(); appendWithNewline(batchRequestBuilder, "--" + batchBoundary); appendWithNewline(batchRequestBuilder, BATCH_OPERATION_CONTENT_TYPE); appendWithNewline(batchRequestBuilder, BATCH_OPERATION_CONTENT_TRANSFER_ENCODING); appendWithNewline(batchRequestBuilder, String.format(BATCH_OPERATION_CONTENT_ID_TEMPLATE, contentId)); batchRequestBuilder.append(BlobBatchHelper.HTTP_NEWLINE); String method = request.getHttpMethod().toString(); String urlPath = request.getUrl().getPath(); String urlQuery = request.getUrl().getQuery(); if (!ImplUtils.isNullOrEmpty(urlQuery)) { urlPath = urlPath + "?" + urlQuery; } appendWithNewline(batchRequestBuilder, String.format(OPERATION_TEMPLATE, method, urlPath, HTTP_VERSION)); request.getHeaders().stream() .filter(header -> !X_MS_VERSION.equalsIgnoreCase(header.getName())) .forEach(header -> appendWithNewline(batchRequestBuilder, String.format(HEADER_TEMPLATE, header.getName(), header.getValue()))); batchRequestBuilder.append(BlobBatchHelper.HTTP_NEWLINE); batchRequest.add(ByteBuffer.wrap(batchRequestBuilder.toString().getBytes(StandardCharsets.UTF_8))); batchMapping.get(contentId).setRequest(request); }); } private void appendWithNewline(StringBuilder stringBuilder, String value) { stringBuilder.append(value).append(BlobBatchHelper.HTTP_NEWLINE); } }
See answer about overloads and double encoding above.
public BlobUrlParts setBlobName(String blobName) { this.blobName = Utility.urlEncode(Utility.urlDecode(blobName)); return this; }
this.blobName = Utility.urlEncode(Utility.urlDecode(blobName));
public BlobUrlParts setBlobName(String blobName) { this.blobName = Utility.urlEncode(Utility.urlDecode(blobName)); return this; }
class BlobUrlParts { private static final Pattern IP_V4_URL_PATTERN = Pattern .compile("(?:\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})|(?:localhost)"); private final ClientLogger logger = new ClientLogger(BlobUrlParts.class); private String scheme; private String host; private String containerName; private String blobName; private String snapshot; private String accountName; private BlobServiceSasQueryParameters blobServiceSasQueryParameters; private Map<String, String[]> unparsedParameters; /** * Initializes a BlobUrlParts object which helps aid in the construction of a Blob Storage URL. */ public BlobUrlParts() { unparsedParameters = new HashMap<>(); } /** * Gets the accountname, ex. "myaccountname". * * @return the account name. */ public String getAccountName() { return accountName; } /** * Sets the account name. * * @param accountName The account name. * @return the updated BlobURLParts object. */ public BlobUrlParts setAccountName(String accountName) { this.accountName = accountName; return this; } /** * Gets the URL scheme, ex. "https: * * @return the URL scheme. */ public String getScheme() { return scheme; } /** * Sets the URL scheme, ex. "https: * * @param scheme The URL scheme. * @return the updated BlobUrlParts object. */ public BlobUrlParts setScheme(String scheme) { this.scheme = scheme; return this; } /** * Gets the URL host, ex. "account.blob.core.windows.net". * * @return the URL host. */ public String getHost() { return host; } /** * Sets the URL host, ex. "account.blob.core.windows.net". * * @param host The URL host. * @return the updated BlobUrlParts object. */ public BlobUrlParts setHost(String host) { this.host = host; return this; } /** * Gets the container name that will be used as part of the URL path. * * @return the container name. */ public String getBlobContainerName() { return containerName; } /** * Sets the container name that will be used as part of the URL path. * * @param containerName The container nme. * @return the updated BlobUrlParts object. */ public BlobUrlParts setContainerName(String containerName) { this.containerName = containerName; return this; } /** * Gets the blob name that will be used as part of the URL path. * * <p>Blob name is encoded to UTF-8 using the {@link com.azure.storage.common.Utility * * @return the blob name. */ public String getBlobName() { return blobName; } /** * Sets the blob name that will be used as part of the URL path. * * <p>Blob name is encoded to UTF-8 using the {@link com.azure.storage.common.Utility * * @param blobName The blob name. * @return the updated BlobUrlParts object. */ /** * Gets the snapshot identifier that will be used as part of the query string if set. * * @return the snapshot identifier. */ public String getSnapshot() { return snapshot; } /** * Sets the snapshot identifier that will be used as part of the query string if set. * * @param snapshot The snapshot identifier. * @return the updated BlobUrlParts object. */ public BlobUrlParts setSnapshot(String snapshot) { this.snapshot = snapshot; return this; } /** * Gets the {@link BlobServiceSasQueryParameters} representing the SAS query parameters that will be used to * generate the SAS token for this URL. * * @return the {@link BlobServiceSasQueryParameters} of the URL */ public BlobServiceSasQueryParameters getSasQueryParameters() { return blobServiceSasQueryParameters; } /** * Sets the {@link BlobServiceSasQueryParameters} representing the SAS query parameters that will be used to * generate the SAS token for this URL. * * @param blobServiceSasQueryParameters The SAS query parameters. * @return the updated BlobUrlParts object. */ public BlobUrlParts setSasQueryParameters(BlobServiceSasQueryParameters blobServiceSasQueryParameters) { this.blobServiceSasQueryParameters = blobServiceSasQueryParameters; return this; } /** * Gets the query string parameters that aren't part of the SAS token that will be used by this URL. * * @return the non-SAS token query string values. */ public Map<String, String[]> getUnparsedParameters() { return unparsedParameters; } /** * Sets the query string parameters that aren't part of the SAS token that will be used by this URL. * * @param unparsedParameters The non-SAS token query string values. * @return the updated BlobUrlParts object. */ public BlobUrlParts setUnparsedParameters(Map<String, String[]> unparsedParameters) { this.unparsedParameters = unparsedParameters; return this; } /** * Converts the blob URL parts to a {@link URL}. * * <p>Blob name is encoded to UTF-8 using the {@link com.azure.storage.common.Utility * * @return A {@code URL} to the blob resource composed of all the elements in this object. * @throws IllegalStateException The fields present on the BlobUrlParts object were insufficient to construct a * valid URL or were ill-formatted. */ public URL toUrl() { UrlBuilder url = new UrlBuilder().setScheme(this.scheme).setHost(this.host); StringBuilder path = new StringBuilder(); if ((this.containerName == null || this.containerName.isEmpty()) && this.blobName != null) { this.containerName = BlobContainerAsyncClient.ROOT_CONTAINER_NAME; } if (this.containerName != null) { path.append(this.containerName); if (this.blobName != null) { path.append('/'); path.append(this.blobName); } } url.setPath(path.toString()); if (this.snapshot != null) { url.setQueryParameter(Constants.UrlConstants.SNAPSHOT_QUERY_PARAMETER, this.snapshot); } if (this.blobServiceSasQueryParameters != null) { String encodedSAS = this.blobServiceSasQueryParameters.encode(); if (encodedSAS.length() != 0) { url.setQuery(encodedSAS); } } for (Map.Entry<String, String[]> entry : this.unparsedParameters.entrySet()) { url.setQueryParameter(entry.getKey(), Utility.urlEncode(String.join(",", entry.getValue()))); } try { return url.toURL(); } catch (MalformedURLException ex) { throw logger.logExceptionAsError(new IllegalStateException("The URL parts created a malformed URL.")); } } /** * Parses a string into a BlobUrlParts. * * <p>Query parameters will be parsed into two properties, {@link BlobServiceSasQueryParameters} which contains * all SAS token related values and {@link * parameters.</p> * * <p>If a URL points to a blob in the root container, and the root container is referenced implicitly, i.e. there * is no path element for the container, the name of this blob in the root container will be set as the * containerName field in the resulting {@code BlobURLParts}.</p> * * <p>Blob name is encoded to UTF-8 using the {@link com.azure.storage.common.Utility * * @param url The {@code URL} to be parsed. * @return A {@link BlobUrlParts} object containing all the components of a BlobURL. * @throws IllegalArgumentException If {@code url} is a malformed {@link URL}. */ public static BlobUrlParts parse(String url) { try { return parse(new URL(url)); } catch (MalformedURLException e) { throw new IllegalArgumentException("Invalid URL format. URL: " + url); } } /** * Parses an existing URL into a BlobUrlParts. * * <p>Query parameters will be parsed into two properties, {@link BlobServiceSasQueryParameters} which contains * all SAS token related values and {@link * parameters.</p> * * <p>If a URL points to a blob in the root container, and the root container is referenced implicitly, i.e. there * is no path element for the container, the name of this blob in the root container will be set as the * containerName field in the resulting {@code BlobURLParts}.</p> * * <p>Blob name is encoded to UTF-8 using the {@link com.azure.storage.common.Utility * * @param url The {@code URL} to be parsed. * @return A {@link BlobUrlParts} object containing all the components of a BlobURL. */ public static BlobUrlParts parse(URL url) { BlobUrlParts parts = new BlobUrlParts().setScheme(url.getProtocol()); if (IP_V4_URL_PATTERN.matcher(url.getHost()).find()) { parseIpUrl(url, parts); } else { parseNonIpUrl(url, parts); } Map<String, String[]> queryParamsMap = parseQueryString(url.getQuery()); String[] snapshotArray = queryParamsMap.remove("snapshot"); if (snapshotArray != null) { parts.setSnapshot(snapshotArray[0]); } BlobServiceSasQueryParameters blobServiceSasQueryParameters = new BlobServiceSasQueryParameters(queryParamsMap, true); return parts.setSasQueryParameters(blobServiceSasQueryParameters) .setUnparsedParameters(queryParamsMap); } /* * Parse the IP url into its host, account name, container name, and blob name. */ private static void parseIpUrl(URL url, BlobUrlParts parts) { parts.setHost(url.getAuthority()); String path = url.getPath(); if (path.charAt(0) == '/') { path = path.substring(1); } String[] pathPieces = path.split("/", 3); parts.setAccountName(pathPieces[0]); if (pathPieces.length >= 3) { parts.setContainerName(pathPieces[1]); parts.setBlobName(pathPieces[2]); } else if (pathPieces.length == 2) { parts.setContainerName(pathPieces[1]); } } /* * Parse the non-IP url into its host, account name, container name, and blob name. */ private static void parseNonIpUrl(URL url, BlobUrlParts parts) { String host = url.getHost(); parts.setHost(host); if (!ImplUtils.isNullOrEmpty(host)) { int accountNameIndex = host.indexOf('.'); if (accountNameIndex == -1) { parts.setAccountName(host); } else { parts.setAccountName(host.substring(0, accountNameIndex)); } } String path = url.getPath(); if (!ImplUtils.isNullOrEmpty(path)) { if (path.charAt(0) == '/') { path = path.substring(1); } int containerEndIndex = path.indexOf('/'); if (containerEndIndex == -1) { parts.setContainerName(path); } else { parts.setContainerName(path.substring(0, containerEndIndex)); parts.setBlobName(path.substring(containerEndIndex + 1)); } } } /** * Parses a query string into a one to many hashmap. * * @param queryParams The string of query params to parse. * @return A {@code HashMap<String, String[]>} of the key values. */ private static TreeMap<String, String[]> parseQueryString(String queryParams) { final TreeMap<String, String[]> retVals = new TreeMap<>(Comparator.naturalOrder()); if (ImplUtils.isNullOrEmpty(queryParams)) { return retVals; } final String[] valuePairs = queryParams.split("&"); for (String valuePair : valuePairs) { final int equalDex = valuePair.indexOf("="); String key = Utility.urlDecode(valuePair.substring(0, equalDex)).toLowerCase(Locale.ROOT); String value = Utility.urlDecode(valuePair.substring(equalDex + 1)); String[] keyValues = retVals.get(key); if (keyValues == null) { keyValues = new String[]{value}; } else { final String[] newValues = new String[keyValues.length + 1]; System.arraycopy(keyValues, 0, newValues, 0, keyValues.length); newValues[newValues.length - 1] = value; keyValues = newValues; } retVals.put(key, keyValues); } return retVals; } }
class BlobUrlParts { private static final Pattern IP_V4_URL_PATTERN = Pattern .compile("(?:\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})|(?:localhost)"); private final ClientLogger logger = new ClientLogger(BlobUrlParts.class); private String scheme; private String host; private String containerName; private String blobName; private String snapshot; private String accountName; private BlobServiceSasQueryParameters blobServiceSasQueryParameters; private Map<String, String[]> unparsedParameters; /** * Initializes a BlobUrlParts object which helps aid in the construction of a Blob Storage URL. */ public BlobUrlParts() { unparsedParameters = new HashMap<>(); } /** * Gets the accountname, ex. "myaccountname". * * @return the account name. */ public String getAccountName() { return accountName; } /** * Sets the account name. * * @param accountName The account name. * @return the updated BlobURLParts object. */ public BlobUrlParts setAccountName(String accountName) { this.accountName = accountName; return this; } /** * Gets the URL scheme, ex. "https: * * @return the URL scheme. */ public String getScheme() { return scheme; } /** * Sets the URL scheme, ex. "https: * * @param scheme The URL scheme. * @return the updated BlobUrlParts object. */ public BlobUrlParts setScheme(String scheme) { this.scheme = scheme; return this; } /** * Gets the URL host, ex. "account.blob.core.windows.net". * * @return the URL host. */ public String getHost() { return host; } /** * Sets the URL host, ex. "account.blob.core.windows.net". * * @param host The URL host. * @return the updated BlobUrlParts object. */ public BlobUrlParts setHost(String host) { this.host = host; return this; } /** * Gets the container name that will be used as part of the URL path. * * @return the container name. */ public String getBlobContainerName() { return containerName; } /** * Sets the container name that will be used as part of the URL path. * * @param containerName The container nme. * @return the updated BlobUrlParts object. */ public BlobUrlParts setContainerName(String containerName) { this.containerName = containerName; return this; } /** * Decodes and gets the blob name that will be used as part of the URL path. * * @return the decoded blob name. */ public String getBlobName() { return (blobName == null) ? null : Utility.urlDecode(blobName); } /** * Sets the blob name that will be used as part of the URL path. * * @param blobName The blob name. * @return the updated BlobUrlParts object. */ /** * Gets the snapshot identifier that will be used as part of the query string if set. * * @return the snapshot identifier. */ public String getSnapshot() { return snapshot; } /** * Sets the snapshot identifier that will be used as part of the query string if set. * * @param snapshot The snapshot identifier. * @return the updated BlobUrlParts object. */ public BlobUrlParts setSnapshot(String snapshot) { this.snapshot = snapshot; return this; } /** * Gets the {@link BlobServiceSasQueryParameters} representing the SAS query parameters that will be used to * generate the SAS token for this URL. * * @return the {@link BlobServiceSasQueryParameters} of the URL */ public BlobServiceSasQueryParameters getSasQueryParameters() { return blobServiceSasQueryParameters; } /** * Sets the {@link BlobServiceSasQueryParameters} representing the SAS query parameters that will be used to * generate the SAS token for this URL. * * @param blobServiceSasQueryParameters The SAS query parameters. * @return the updated BlobUrlParts object. */ public BlobUrlParts setSasQueryParameters(BlobServiceSasQueryParameters blobServiceSasQueryParameters) { this.blobServiceSasQueryParameters = blobServiceSasQueryParameters; return this; } /** * Gets the query string parameters that aren't part of the SAS token that will be used by this URL. * * @return the non-SAS token query string values. */ public Map<String, String[]> getUnparsedParameters() { return unparsedParameters; } /** * Sets the query string parameters that aren't part of the SAS token that will be used by this URL. * * @param unparsedParameters The non-SAS token query string values. * @return the updated BlobUrlParts object. */ public BlobUrlParts setUnparsedParameters(Map<String, String[]> unparsedParameters) { this.unparsedParameters = unparsedParameters; return this; } /** * Converts the blob URL parts to a {@link URL}. * * @return A {@code URL} to the blob resource composed of all the elements in this object. * @throws IllegalStateException The fields present on the BlobUrlParts object were insufficient to construct a * valid URL or were ill-formatted. */ public URL toUrl() { UrlBuilder url = new UrlBuilder().setScheme(this.scheme).setHost(this.host); StringBuilder path = new StringBuilder(); if ((this.containerName == null || this.containerName.isEmpty()) && this.blobName != null) { this.containerName = BlobContainerAsyncClient.ROOT_CONTAINER_NAME; } if (this.containerName != null) { path.append(this.containerName); if (this.blobName != null) { path.append('/'); path.append(this.blobName); } } url.setPath(path.toString()); if (this.snapshot != null) { url.setQueryParameter(Constants.UrlConstants.SNAPSHOT_QUERY_PARAMETER, this.snapshot); } if (this.blobServiceSasQueryParameters != null) { String encodedSAS = this.blobServiceSasQueryParameters.encode(); if (encodedSAS.length() != 0) { url.setQuery(encodedSAS); } } for (Map.Entry<String, String[]> entry : this.unparsedParameters.entrySet()) { url.setQueryParameter(entry.getKey(), Utility.urlEncode(String.join(",", entry.getValue()))); } try { return url.toURL(); } catch (MalformedURLException ex) { throw logger.logExceptionAsError(new IllegalStateException("The URL parts created a malformed URL.", ex)); } } /** * Parses a string into a BlobUrlParts. * * <p>Query parameters will be parsed into two properties, {@link BlobServiceSasQueryParameters} which contains * all SAS token related values and {@link * parameters.</p> * * <p>If a URL points to a blob in the root container, and the root container is referenced implicitly, i.e. there * is no path element for the container, the name of this blob in the root container will be set as the * containerName field in the resulting {@code BlobURLParts}.</p> * * @param url The {@code URL} to be parsed. * @return A {@link BlobUrlParts} object containing all the components of a BlobURL. * @throws IllegalArgumentException If {@code url} is a malformed {@link URL}. */ public static BlobUrlParts parse(String url) { try { return parse(new URL(url)); } catch (MalformedURLException e) { throw new IllegalArgumentException("Invalid URL format. URL: " + url); } } /** * Parses an existing URL into a BlobUrlParts. * * <p>Query parameters will be parsed into two properties, {@link BlobServiceSasQueryParameters} which contains * all SAS token related values and {@link * parameters.</p> * * <p>If a URL points to a blob in the root container, and the root container is referenced implicitly, i.e. there * is no path element for the container, the name of this blob in the root container will be set as the * containerName field in the resulting {@code BlobURLParts}.</p> * * @param url The {@code URL} to be parsed. * @return A {@link BlobUrlParts} object containing all the components of a BlobURL. */ public static BlobUrlParts parse(URL url) { BlobUrlParts parts = new BlobUrlParts().setScheme(url.getProtocol()); if (IP_V4_URL_PATTERN.matcher(url.getHost()).find()) { parseIpUrl(url, parts); } else { parseNonIpUrl(url, parts); } Map<String, String[]> queryParamsMap = parseQueryString(url.getQuery()); String[] snapshotArray = queryParamsMap.remove("snapshot"); if (snapshotArray != null) { parts.setSnapshot(snapshotArray[0]); } BlobServiceSasQueryParameters blobServiceSasQueryParameters = new BlobServiceSasQueryParameters(queryParamsMap, true); return parts.setSasQueryParameters(blobServiceSasQueryParameters) .setUnparsedParameters(queryParamsMap); } /* * Parse the IP url into its host, account name, container name, and blob name. */ private static void parseIpUrl(URL url, BlobUrlParts parts) { parts.setHost(url.getAuthority()); String path = url.getPath(); if (path.charAt(0) == '/') { path = path.substring(1); } String[] pathPieces = path.split("/", 3); parts.setAccountName(pathPieces[0]); if (pathPieces.length >= 3) { parts.setContainerName(pathPieces[1]); parts.setBlobName(pathPieces[2]); } else if (pathPieces.length == 2) { parts.setContainerName(pathPieces[1]); } } /* * Parse the non-IP url into its host, account name, container name, and blob name. */ private static void parseNonIpUrl(URL url, BlobUrlParts parts) { String host = url.getHost(); parts.setHost(host); if (!ImplUtils.isNullOrEmpty(host)) { int accountNameIndex = host.indexOf('.'); if (accountNameIndex == -1) { parts.setAccountName(host); } else { parts.setAccountName(host.substring(0, accountNameIndex)); } } String path = url.getPath(); if (!ImplUtils.isNullOrEmpty(path)) { if (path.charAt(0) == '/') { path = path.substring(1); } int containerEndIndex = path.indexOf('/'); if (containerEndIndex == -1) { parts.setContainerName(path); } else { parts.setContainerName(path.substring(0, containerEndIndex)); parts.setBlobName(path.substring(containerEndIndex + 1)); } } } /** * Parses a query string into a one to many hashmap. * * @param queryParams The string of query params to parse. * @return A {@code HashMap<String, String[]>} of the key values. */ private static TreeMap<String, String[]> parseQueryString(String queryParams) { final TreeMap<String, String[]> retVals = new TreeMap<>(Comparator.naturalOrder()); if (ImplUtils.isNullOrEmpty(queryParams)) { return retVals; } final String[] valuePairs = queryParams.split("&"); for (String valuePair : valuePairs) { final int equalDex = valuePair.indexOf("="); String key = Utility.urlDecode(valuePair.substring(0, equalDex)).toLowerCase(Locale.ROOT); String value = Utility.urlDecode(valuePair.substring(equalDex + 1)); String[] keyValues = retVals.get(key); if (keyValues == null) { keyValues = new String[]{value}; } else { final String[] newValues = new String[keyValues.length + 1]; System.arraycopy(keyValues, 0, newValues, 0, keyValues.length); newValues[newValues.length - 1] = value; keyValues = newValues; } retVals.put(key, keyValues); } return retVals; } }
we could possibly introduce KVCredentialPolicy in the pipeline and pass in default azure credential there instead of the builder. This will showcae better use of custom pipeline
public CryptographyAsyncClient createAsyncClientWithPipeline() { HttpPipeline pipeline = new HttpPipelineBuilder().build(); CryptographyAsyncClient cryptographyAsyncClient = new CryptographyClientBuilder() .pipeline(pipeline) .keyIdentifier("<YOUR-KEY-ID") .credential(new DefaultAzureCredentialBuilder().build()) .buildAsyncClient(); return cryptographyAsyncClient; }
HttpPipeline pipeline = new HttpPipelineBuilder().build();
public CryptographyAsyncClient createAsyncClientWithPipeline() { HttpPipeline pipeline = new HttpPipelineBuilder().build(); CryptographyAsyncClient cryptographyAsyncClient = new CryptographyClientBuilder() .pipeline(pipeline) .keyIdentifier("<YOUR-KEY-ID") .credential(new DefaultAzureCredentialBuilder().build()) .buildAsyncClient(); return cryptographyAsyncClient; }
class CryptographyAsyncClientJavaDocCodeSnippets { private String key1 = "key1"; private String key2 = "key2"; private String value1 = "val1"; private String value2 = "val2"; /** * Generates code sample for creating a {@link KeyAsyncClient} * @return An instance of {@link KeyAsyncClient} */ public CryptographyAsyncClient createAsyncClientWithHttpClient() { CryptographyAsyncClient cryptographyAsyncClient = new CryptographyClientBuilder() .keyIdentifier("<Your-Key-ID>") .credential(new DefaultAzureCredentialBuilder().build()) .httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)) .httpClient(HttpClient.createDefault()) .buildAsyncClient(); return cryptographyAsyncClient; } /** * Generates code sample for creating a {@link KeyAsyncClient} * @return An instance of {@link KeyAsyncClient} */ public CryptographyAsyncClient createAsyncClient() { CryptographyAsyncClient cryptographyAsyncClient = new CryptographyClientBuilder() .keyIdentifier("<YOUR-KEY-ID>") .credential(new DefaultAzureCredentialBuilder().build()) .buildAsyncClient(); return cryptographyAsyncClient; } /** * Generates code sample for creating a {@link KeyAsyncClient} * @return An instance of {@link KeyAsyncClient} */ /** * Generates a code sample for using {@link CryptographyAsyncClient */ public void getKeyWithResponseSnippets() { CryptographyAsyncClient cryptographyAsyncClient = createAsyncClient(); cryptographyAsyncClient.getKeyWithResponse() .subscriberContext(reactor.util.context.Context.of(key1, value1, key2, value2)) .subscribe(keyResponse -> System.out.printf("Key is returned with name %s and id %s \n", keyResponse.getValue().getName(), keyResponse.getValue().getId())); } /** * Generates a code sample for using {@link CryptographyAsyncClient */ public void getKeySnippets() { CryptographyAsyncClient cryptographyAsyncClient = createAsyncClient(); cryptographyAsyncClient.getKey() .subscriberContext(reactor.util.context.Context.of(key1, value1, key2, value2)) .subscribe(key -> System.out.printf("Key is returned with name %s and id %s \n", key.getName(), key.getId())); } /** * Generates a code sample for using {@link CryptographyAsyncClient * {@link CryptographyAsyncClient */ public void encrypt() { CryptographyAsyncClient cryptographyAsyncClient = createAsyncClient(); byte[] iv = {(byte) 0x1a, (byte) 0xf3, (byte) 0x8c, (byte) 0x2d, (byte) 0xc2, (byte) 0xb9, (byte) 0x6f, (byte) 0xfd, (byte) 0xd8, (byte) 0x66, (byte) 0x94, (byte) 0x09, (byte) 0x23, (byte) 0x41, (byte) 0xbc, (byte) 0x04}; byte[] authData = { (byte) 0x54, (byte) 0x68, (byte) 0x65, (byte) 0x20, (byte) 0x73, (byte) 0x65, (byte) 0x63, (byte) 0x6f, (byte) 0x6e, (byte) 0x64, (byte) 0x20, (byte) 0x70, (byte) 0x72, (byte) 0x69, (byte) 0x6e, (byte) 0x63, (byte) 0x69, (byte) 0x70, (byte) 0x6c, (byte) 0x65, (byte) 0x20, (byte) 0x6f, (byte) 0x66, (byte) 0x20, (byte) 0x41, (byte) 0x75, (byte) 0x67, (byte) 0x75, (byte) 0x73, (byte) 0x74, (byte) 0x65, (byte) 0x20, (byte) 0x4b, (byte) 0x65, (byte) 0x72, (byte) 0x63, (byte) 0x6b, (byte) 0x68, (byte) 0x6f, (byte) 0x66, (byte) 0x66, (byte) 0x73 }; byte[] plainText = new byte[100]; new Random(0x1234567L).nextBytes(plainText); cryptographyAsyncClient.encrypt(EncryptionAlgorithm.RSA_OAEP, plainText) .subscriberContext(reactor.util.context.Context.of(key1, value1, key2, value2)) .subscribe(encryptResult -> System.out.printf("Received encrypted content of length %d with algorithm %s \n", encryptResult.getCipherText().length, encryptResult.getAlgorithm().toString())); cryptographyAsyncClient.encrypt(EncryptionAlgorithm.A192CBC_HS384, plainText, iv, authData) .subscriberContext(reactor.util.context.Context.of(key1, value1, key2, value2)) .subscribe(encryptResult -> System.out.printf("Received encrypted content of length %d with algorithm %s \n", encryptResult.getCipherText().length, encryptResult.getAlgorithm().toString())); } /** * Generates a code sample for using {@link CryptographyAsyncClient * {@link CryptographyAsyncClient */ public void decrypt() { CryptographyAsyncClient cryptographyAsyncClient = createAsyncClient(); byte[] iv = {(byte) 0x1a, (byte) 0xf3, (byte) 0x8c, (byte) 0x2d, (byte) 0xc2, (byte) 0xb9, (byte) 0x6f, (byte) 0xfd, (byte) 0xd8, (byte) 0x66, (byte) 0x94, (byte) 0x09, (byte) 0x23, (byte) 0x41, (byte) 0xbc, (byte) 0x04}; byte[] authData = { (byte) 0x54, (byte) 0x68, (byte) 0x65, (byte) 0x20, (byte) 0x73, (byte) 0x65, (byte) 0x63, (byte) 0x6f, (byte) 0x6e, (byte) 0x64, (byte) 0x20, (byte) 0x70, (byte) 0x72, (byte) 0x69, (byte) 0x6e, (byte) 0x63, (byte) 0x69, (byte) 0x70, (byte) 0x6c, (byte) 0x65, (byte) 0x20, (byte) 0x6f, (byte) 0x66, (byte) 0x20, (byte) 0x41, (byte) 0x75, (byte) 0x67, (byte) 0x75, (byte) 0x73, (byte) 0x74, (byte) 0x65, (byte) 0x20, (byte) 0x4b, (byte) 0x65, (byte) 0x72, (byte) 0x63, (byte) 0x6b, (byte) 0x68, (byte) 0x6f, (byte) 0x66, (byte) 0x66, (byte) 0x73 }; byte[] authTag = {(byte) 0x65, (byte) 0x2c, (byte) 0x3f, (byte) 0xa3, (byte) 0x6b, (byte) 0x0a, (byte) 0x7c, (byte) 0x5b, (byte) 0x32, (byte) 0x19, (byte) 0xfa, (byte) 0xb3, (byte) 0xa3, (byte) 0x0b, (byte) 0xc1, (byte) 0xc4}; byte[] plainText = new byte[100]; new Random(0x1234567L).nextBytes(plainText); cryptographyAsyncClient.decrypt(EncryptionAlgorithm.RSA_OAEP, plainText) .subscriberContext(reactor.util.context.Context.of(key1, value1, key2, value2)) .subscribe(decryptResult -> System.out.printf("Received decrypted content of length %d\n", decryptResult.getPlainText().length)); cryptographyAsyncClient.decrypt(EncryptionAlgorithm.A192CBC_HS384, plainText, iv, authData, authTag) .subscriberContext(reactor.util.context.Context.of(key1, value1, key2, value2)) .subscribe(encryptResult -> System.out.printf("Received decrypted content of length %d with algorithm %s \n", encryptResult.getPlainText().length)); } /** * Generates a code sample for using {@link CryptographyAsyncClient * {@link CryptographyAsyncClient * * @throws NoSuchAlgorithmException when the specified algorithm doesn't exist. */ public void signVerify() throws NoSuchAlgorithmException { CryptographyAsyncClient cryptographyAsyncClient = createAsyncClient(); byte[] signature = new byte[100]; byte[] data = new byte[100]; new Random(0x1234567L).nextBytes(data); MessageDigest md = MessageDigest.getInstance("SHA-256"); md.update(data); byte[] digest = md.digest(); cryptographyAsyncClient.sign(SignatureAlgorithm.ES256, digest) .subscriberContext(reactor.util.context.Context.of(key1, value1, key2, value2)) .subscribe(signResult -> System.out.printf("Received signature of length %d with algorithm %s", signResult.getSignature().length)); cryptographyAsyncClient.verify(SignatureAlgorithm.ES256, digest, signature) .subscriberContext(reactor.util.context.Context.of(key1, value1, key2, value2)) .subscribe(verifyResult -> System.out.printf("Verification status %s", verifyResult.isValid())); } /** * Generates a code sample for using {@link CryptographyAsyncClient * {@link CryptographyAsyncClient * * @throws NoSuchAlgorithmException when the specified algorithm doesn't exist. */ public void signDataVerifyData() throws NoSuchAlgorithmException { CryptographyAsyncClient cryptographyAsyncClient = createAsyncClient(); byte[] signature = new byte[100]; byte[] data = new byte[100]; new Random(0x1234567L).nextBytes(data); cryptographyAsyncClient.sign(SignatureAlgorithm.ES256, data) .subscriberContext(reactor.util.context.Context.of(key1, value1, key2, value2)) .subscribe(signResult -> System.out.printf("Received signature of length %d with algorithm %s", signResult.getSignature().length)); cryptographyAsyncClient.verify(SignatureAlgorithm.ES256, data, signature) .subscriberContext(reactor.util.context.Context.of(key1, value1, key2, value2)) .subscribe(verifyResult -> System.out.printf("Verification status %s", verifyResult.isValid())); } /** * Generates a code sample for using {@link CryptographyAsyncClient * {@link CryptographyAsyncClient */ public void wrapKeyUnwrapKey() { CryptographyAsyncClient cryptographyAsyncClient = createAsyncClient(); byte[] encryptedKey = new byte[100]; byte[] key = new byte[100]; new Random(0x1234567L).nextBytes(key); cryptographyAsyncClient.wrapKey(KeyWrapAlgorithm.RSA_OAEP, key) .subscriberContext(reactor.util.context.Context.of(key1, value1, key2, value2)) .subscribe(keyWrapResult -> System.out.printf("Received encypted key of length %d with algorithm %s", keyWrapResult.getEncryptedKey().length, keyWrapResult.getAlgorithm().toString())); cryptographyAsyncClient.unwrapKey(KeyWrapAlgorithm.RSA_OAEP, encryptedKey) .subscriberContext(reactor.util.context.Context.of(key1, value1, key2, value2)) .subscribe(keyUnwrapResult -> System.out.printf("Received key of length %d", keyUnwrapResult.getKey().length)); } /** * Implementation not provided for this method * @return {@code null} */ private TokenCredential getKeyVaultCredential() { return null; } }
class CryptographyAsyncClientJavaDocCodeSnippets { private String key1 = "key1"; private String key2 = "key2"; private String value1 = "val1"; private String value2 = "val2"; /** * Generates code sample for creating a {@link KeyAsyncClient} * @return An instance of {@link KeyAsyncClient} */ public CryptographyAsyncClient createAsyncClientWithHttpClient() { CryptographyAsyncClient cryptographyAsyncClient = new CryptographyClientBuilder() .keyIdentifier("<Your-Key-ID>") .credential(new DefaultAzureCredentialBuilder().build()) .httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)) .httpClient(HttpClient.createDefault()) .buildAsyncClient(); return cryptographyAsyncClient; } /** * Generates code sample for creating a {@link KeyAsyncClient} * @return An instance of {@link KeyAsyncClient} */ public CryptographyAsyncClient createAsyncClient() { CryptographyAsyncClient cryptographyAsyncClient = new CryptographyClientBuilder() .keyIdentifier("<YOUR-KEY-ID>") .credential(new DefaultAzureCredentialBuilder().build()) .buildAsyncClient(); return cryptographyAsyncClient; } /** * Generates code sample for creating a {@link KeyAsyncClient} * @return An instance of {@link KeyAsyncClient} */ /** * Generates a code sample for using {@link CryptographyAsyncClient */ public void getKeyWithResponseSnippets() { CryptographyAsyncClient cryptographyAsyncClient = createAsyncClient(); cryptographyAsyncClient.getKeyWithResponse() .subscriberContext(reactor.util.context.Context.of(key1, value1, key2, value2)) .subscribe(keyResponse -> System.out.printf("Key is returned with name %s and id %s \n", keyResponse.getValue().getName(), keyResponse.getValue().getId())); } /** * Generates a code sample for using {@link CryptographyAsyncClient */ public void getKeySnippets() { CryptographyAsyncClient cryptographyAsyncClient = createAsyncClient(); cryptographyAsyncClient.getKey() .subscriberContext(reactor.util.context.Context.of(key1, value1, key2, value2)) .subscribe(key -> System.out.printf("Key is returned with name %s and id %s \n", key.getName(), key.getId())); } /** * Generates a code sample for using {@link CryptographyAsyncClient * {@link CryptographyAsyncClient */ public void encrypt() { CryptographyAsyncClient cryptographyAsyncClient = createAsyncClient(); byte[] iv = {(byte) 0x1a, (byte) 0xf3, (byte) 0x8c, (byte) 0x2d, (byte) 0xc2, (byte) 0xb9, (byte) 0x6f, (byte) 0xfd, (byte) 0xd8, (byte) 0x66, (byte) 0x94, (byte) 0x09, (byte) 0x23, (byte) 0x41, (byte) 0xbc, (byte) 0x04}; byte[] authData = { (byte) 0x54, (byte) 0x68, (byte) 0x65, (byte) 0x20, (byte) 0x73, (byte) 0x65, (byte) 0x63, (byte) 0x6f, (byte) 0x6e, (byte) 0x64, (byte) 0x20, (byte) 0x70, (byte) 0x72, (byte) 0x69, (byte) 0x6e, (byte) 0x63, (byte) 0x69, (byte) 0x70, (byte) 0x6c, (byte) 0x65, (byte) 0x20, (byte) 0x6f, (byte) 0x66, (byte) 0x20, (byte) 0x41, (byte) 0x75, (byte) 0x67, (byte) 0x75, (byte) 0x73, (byte) 0x74, (byte) 0x65, (byte) 0x20, (byte) 0x4b, (byte) 0x65, (byte) 0x72, (byte) 0x63, (byte) 0x6b, (byte) 0x68, (byte) 0x6f, (byte) 0x66, (byte) 0x66, (byte) 0x73 }; byte[] plainText = new byte[100]; new Random(0x1234567L).nextBytes(plainText); cryptographyAsyncClient.encrypt(EncryptionAlgorithm.RSA_OAEP, plainText) .subscriberContext(reactor.util.context.Context.of(key1, value1, key2, value2)) .subscribe(encryptResult -> System.out.printf("Received encrypted content of length %d with algorithm %s \n", encryptResult.getCipherText().length, encryptResult.getAlgorithm().toString())); cryptographyAsyncClient.encrypt(EncryptionAlgorithm.A192CBC_HS384, plainText, iv, authData) .subscriberContext(reactor.util.context.Context.of(key1, value1, key2, value2)) .subscribe(encryptResult -> System.out.printf("Received encrypted content of length %d with algorithm %s \n", encryptResult.getCipherText().length, encryptResult.getAlgorithm().toString())); } /** * Generates a code sample for using {@link CryptographyAsyncClient * {@link CryptographyAsyncClient */ public void decrypt() { CryptographyAsyncClient cryptographyAsyncClient = createAsyncClient(); byte[] iv = {(byte) 0x1a, (byte) 0xf3, (byte) 0x8c, (byte) 0x2d, (byte) 0xc2, (byte) 0xb9, (byte) 0x6f, (byte) 0xfd, (byte) 0xd8, (byte) 0x66, (byte) 0x94, (byte) 0x09, (byte) 0x23, (byte) 0x41, (byte) 0xbc, (byte) 0x04}; byte[] authData = { (byte) 0x54, (byte) 0x68, (byte) 0x65, (byte) 0x20, (byte) 0x73, (byte) 0x65, (byte) 0x63, (byte) 0x6f, (byte) 0x6e, (byte) 0x64, (byte) 0x20, (byte) 0x70, (byte) 0x72, (byte) 0x69, (byte) 0x6e, (byte) 0x63, (byte) 0x69, (byte) 0x70, (byte) 0x6c, (byte) 0x65, (byte) 0x20, (byte) 0x6f, (byte) 0x66, (byte) 0x20, (byte) 0x41, (byte) 0x75, (byte) 0x67, (byte) 0x75, (byte) 0x73, (byte) 0x74, (byte) 0x65, (byte) 0x20, (byte) 0x4b, (byte) 0x65, (byte) 0x72, (byte) 0x63, (byte) 0x6b, (byte) 0x68, (byte) 0x6f, (byte) 0x66, (byte) 0x66, (byte) 0x73 }; byte[] authTag = {(byte) 0x65, (byte) 0x2c, (byte) 0x3f, (byte) 0xa3, (byte) 0x6b, (byte) 0x0a, (byte) 0x7c, (byte) 0x5b, (byte) 0x32, (byte) 0x19, (byte) 0xfa, (byte) 0xb3, (byte) 0xa3, (byte) 0x0b, (byte) 0xc1, (byte) 0xc4}; byte[] plainText = new byte[100]; new Random(0x1234567L).nextBytes(plainText); cryptographyAsyncClient.decrypt(EncryptionAlgorithm.RSA_OAEP, plainText) .subscriberContext(reactor.util.context.Context.of(key1, value1, key2, value2)) .subscribe(decryptResult -> System.out.printf("Received decrypted content of length %d\n", decryptResult.getPlainText().length)); cryptographyAsyncClient.decrypt(EncryptionAlgorithm.A192CBC_HS384, plainText, iv, authData, authTag) .subscriberContext(reactor.util.context.Context.of(key1, value1, key2, value2)) .subscribe(encryptResult -> System.out.printf("Received decrypted content of length %d with algorithm %s \n", encryptResult.getPlainText().length)); } /** * Generates a code sample for using {@link CryptographyAsyncClient * {@link CryptographyAsyncClient * * @throws NoSuchAlgorithmException when the specified algorithm doesn't exist. */ public void signVerify() throws NoSuchAlgorithmException { CryptographyAsyncClient cryptographyAsyncClient = createAsyncClient(); byte[] signature = new byte[100]; byte[] data = new byte[100]; new Random(0x1234567L).nextBytes(data); MessageDigest md = MessageDigest.getInstance("SHA-256"); md.update(data); byte[] digest = md.digest(); cryptographyAsyncClient.sign(SignatureAlgorithm.ES256, digest) .subscriberContext(reactor.util.context.Context.of(key1, value1, key2, value2)) .subscribe(signResult -> System.out.printf("Received signature of length %d with algorithm %s", signResult.getSignature().length)); cryptographyAsyncClient.verify(SignatureAlgorithm.ES256, digest, signature) .subscriberContext(reactor.util.context.Context.of(key1, value1, key2, value2)) .subscribe(verifyResult -> System.out.printf("Verification status %s", verifyResult.isValid())); } /** * Generates a code sample for using {@link CryptographyAsyncClient * {@link CryptographyAsyncClient * * @throws NoSuchAlgorithmException when the specified algorithm doesn't exist. */ public void signDataVerifyData() throws NoSuchAlgorithmException { CryptographyAsyncClient cryptographyAsyncClient = createAsyncClient(); byte[] signature = new byte[100]; byte[] data = new byte[100]; new Random(0x1234567L).nextBytes(data); cryptographyAsyncClient.sign(SignatureAlgorithm.ES256, data) .subscriberContext(reactor.util.context.Context.of(key1, value1, key2, value2)) .subscribe(signResult -> System.out.printf("Received signature of length %d with algorithm %s", signResult.getSignature().length)); cryptographyAsyncClient.verify(SignatureAlgorithm.ES256, data, signature) .subscriberContext(reactor.util.context.Context.of(key1, value1, key2, value2)) .subscribe(verifyResult -> System.out.printf("Verification status %s", verifyResult.isValid())); } /** * Generates a code sample for using {@link CryptographyAsyncClient * {@link CryptographyAsyncClient */ public void wrapKeyUnwrapKey() { CryptographyAsyncClient cryptographyAsyncClient = createAsyncClient(); byte[] encryptedKey = new byte[100]; byte[] key = new byte[100]; new Random(0x1234567L).nextBytes(key); cryptographyAsyncClient.wrapKey(KeyWrapAlgorithm.RSA_OAEP, key) .subscriberContext(reactor.util.context.Context.of(key1, value1, key2, value2)) .subscribe(keyWrapResult -> System.out.printf("Received encypted key of length %d with algorithm %s", keyWrapResult.getEncryptedKey().length, keyWrapResult.getAlgorithm().toString())); cryptographyAsyncClient.unwrapKey(KeyWrapAlgorithm.RSA_OAEP, encryptedKey) .subscriberContext(reactor.util.context.Context.of(key1, value1, key2, value2)) .subscribe(keyUnwrapResult -> System.out.printf("Received key of length %d", keyUnwrapResult.getKey().length)); } /** * Implementation not provided for this method * @return {@code null} */ private TokenCredential getKeyVaultCredential() { return null; } }
Discussed offline with @g2vinay . He has updates to the code snippets in his PR - #5701
public CryptographyAsyncClient createAsyncClientWithPipeline() { HttpPipeline pipeline = new HttpPipelineBuilder().build(); CryptographyAsyncClient cryptographyAsyncClient = new CryptographyClientBuilder() .pipeline(pipeline) .keyIdentifier("<YOUR-KEY-ID") .credential(new DefaultAzureCredentialBuilder().build()) .buildAsyncClient(); return cryptographyAsyncClient; }
HttpPipeline pipeline = new HttpPipelineBuilder().build();
public CryptographyAsyncClient createAsyncClientWithPipeline() { HttpPipeline pipeline = new HttpPipelineBuilder().build(); CryptographyAsyncClient cryptographyAsyncClient = new CryptographyClientBuilder() .pipeline(pipeline) .keyIdentifier("<YOUR-KEY-ID") .credential(new DefaultAzureCredentialBuilder().build()) .buildAsyncClient(); return cryptographyAsyncClient; }
class CryptographyAsyncClientJavaDocCodeSnippets { private String key1 = "key1"; private String key2 = "key2"; private String value1 = "val1"; private String value2 = "val2"; /** * Generates code sample for creating a {@link KeyAsyncClient} * @return An instance of {@link KeyAsyncClient} */ public CryptographyAsyncClient createAsyncClientWithHttpClient() { CryptographyAsyncClient cryptographyAsyncClient = new CryptographyClientBuilder() .keyIdentifier("<Your-Key-ID>") .credential(new DefaultAzureCredentialBuilder().build()) .httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)) .httpClient(HttpClient.createDefault()) .buildAsyncClient(); return cryptographyAsyncClient; } /** * Generates code sample for creating a {@link KeyAsyncClient} * @return An instance of {@link KeyAsyncClient} */ public CryptographyAsyncClient createAsyncClient() { CryptographyAsyncClient cryptographyAsyncClient = new CryptographyClientBuilder() .keyIdentifier("<YOUR-KEY-ID>") .credential(new DefaultAzureCredentialBuilder().build()) .buildAsyncClient(); return cryptographyAsyncClient; } /** * Generates code sample for creating a {@link KeyAsyncClient} * @return An instance of {@link KeyAsyncClient} */ /** * Generates a code sample for using {@link CryptographyAsyncClient */ public void getKeyWithResponseSnippets() { CryptographyAsyncClient cryptographyAsyncClient = createAsyncClient(); cryptographyAsyncClient.getKeyWithResponse() .subscriberContext(reactor.util.context.Context.of(key1, value1, key2, value2)) .subscribe(keyResponse -> System.out.printf("Key is returned with name %s and id %s \n", keyResponse.getValue().getName(), keyResponse.getValue().getId())); } /** * Generates a code sample for using {@link CryptographyAsyncClient */ public void getKeySnippets() { CryptographyAsyncClient cryptographyAsyncClient = createAsyncClient(); cryptographyAsyncClient.getKey() .subscriberContext(reactor.util.context.Context.of(key1, value1, key2, value2)) .subscribe(key -> System.out.printf("Key is returned with name %s and id %s \n", key.getName(), key.getId())); } /** * Generates a code sample for using {@link CryptographyAsyncClient * {@link CryptographyAsyncClient */ public void encrypt() { CryptographyAsyncClient cryptographyAsyncClient = createAsyncClient(); byte[] iv = {(byte) 0x1a, (byte) 0xf3, (byte) 0x8c, (byte) 0x2d, (byte) 0xc2, (byte) 0xb9, (byte) 0x6f, (byte) 0xfd, (byte) 0xd8, (byte) 0x66, (byte) 0x94, (byte) 0x09, (byte) 0x23, (byte) 0x41, (byte) 0xbc, (byte) 0x04}; byte[] authData = { (byte) 0x54, (byte) 0x68, (byte) 0x65, (byte) 0x20, (byte) 0x73, (byte) 0x65, (byte) 0x63, (byte) 0x6f, (byte) 0x6e, (byte) 0x64, (byte) 0x20, (byte) 0x70, (byte) 0x72, (byte) 0x69, (byte) 0x6e, (byte) 0x63, (byte) 0x69, (byte) 0x70, (byte) 0x6c, (byte) 0x65, (byte) 0x20, (byte) 0x6f, (byte) 0x66, (byte) 0x20, (byte) 0x41, (byte) 0x75, (byte) 0x67, (byte) 0x75, (byte) 0x73, (byte) 0x74, (byte) 0x65, (byte) 0x20, (byte) 0x4b, (byte) 0x65, (byte) 0x72, (byte) 0x63, (byte) 0x6b, (byte) 0x68, (byte) 0x6f, (byte) 0x66, (byte) 0x66, (byte) 0x73 }; byte[] plainText = new byte[100]; new Random(0x1234567L).nextBytes(plainText); cryptographyAsyncClient.encrypt(EncryptionAlgorithm.RSA_OAEP, plainText) .subscriberContext(reactor.util.context.Context.of(key1, value1, key2, value2)) .subscribe(encryptResult -> System.out.printf("Received encrypted content of length %d with algorithm %s \n", encryptResult.getCipherText().length, encryptResult.getAlgorithm().toString())); cryptographyAsyncClient.encrypt(EncryptionAlgorithm.A192CBC_HS384, plainText, iv, authData) .subscriberContext(reactor.util.context.Context.of(key1, value1, key2, value2)) .subscribe(encryptResult -> System.out.printf("Received encrypted content of length %d with algorithm %s \n", encryptResult.getCipherText().length, encryptResult.getAlgorithm().toString())); } /** * Generates a code sample for using {@link CryptographyAsyncClient * {@link CryptographyAsyncClient */ public void decrypt() { CryptographyAsyncClient cryptographyAsyncClient = createAsyncClient(); byte[] iv = {(byte) 0x1a, (byte) 0xf3, (byte) 0x8c, (byte) 0x2d, (byte) 0xc2, (byte) 0xb9, (byte) 0x6f, (byte) 0xfd, (byte) 0xd8, (byte) 0x66, (byte) 0x94, (byte) 0x09, (byte) 0x23, (byte) 0x41, (byte) 0xbc, (byte) 0x04}; byte[] authData = { (byte) 0x54, (byte) 0x68, (byte) 0x65, (byte) 0x20, (byte) 0x73, (byte) 0x65, (byte) 0x63, (byte) 0x6f, (byte) 0x6e, (byte) 0x64, (byte) 0x20, (byte) 0x70, (byte) 0x72, (byte) 0x69, (byte) 0x6e, (byte) 0x63, (byte) 0x69, (byte) 0x70, (byte) 0x6c, (byte) 0x65, (byte) 0x20, (byte) 0x6f, (byte) 0x66, (byte) 0x20, (byte) 0x41, (byte) 0x75, (byte) 0x67, (byte) 0x75, (byte) 0x73, (byte) 0x74, (byte) 0x65, (byte) 0x20, (byte) 0x4b, (byte) 0x65, (byte) 0x72, (byte) 0x63, (byte) 0x6b, (byte) 0x68, (byte) 0x6f, (byte) 0x66, (byte) 0x66, (byte) 0x73 }; byte[] authTag = {(byte) 0x65, (byte) 0x2c, (byte) 0x3f, (byte) 0xa3, (byte) 0x6b, (byte) 0x0a, (byte) 0x7c, (byte) 0x5b, (byte) 0x32, (byte) 0x19, (byte) 0xfa, (byte) 0xb3, (byte) 0xa3, (byte) 0x0b, (byte) 0xc1, (byte) 0xc4}; byte[] plainText = new byte[100]; new Random(0x1234567L).nextBytes(plainText); cryptographyAsyncClient.decrypt(EncryptionAlgorithm.RSA_OAEP, plainText) .subscriberContext(reactor.util.context.Context.of(key1, value1, key2, value2)) .subscribe(decryptResult -> System.out.printf("Received decrypted content of length %d\n", decryptResult.getPlainText().length)); cryptographyAsyncClient.decrypt(EncryptionAlgorithm.A192CBC_HS384, plainText, iv, authData, authTag) .subscriberContext(reactor.util.context.Context.of(key1, value1, key2, value2)) .subscribe(encryptResult -> System.out.printf("Received decrypted content of length %d with algorithm %s \n", encryptResult.getPlainText().length)); } /** * Generates a code sample for using {@link CryptographyAsyncClient * {@link CryptographyAsyncClient * * @throws NoSuchAlgorithmException when the specified algorithm doesn't exist. */ public void signVerify() throws NoSuchAlgorithmException { CryptographyAsyncClient cryptographyAsyncClient = createAsyncClient(); byte[] signature = new byte[100]; byte[] data = new byte[100]; new Random(0x1234567L).nextBytes(data); MessageDigest md = MessageDigest.getInstance("SHA-256"); md.update(data); byte[] digest = md.digest(); cryptographyAsyncClient.sign(SignatureAlgorithm.ES256, digest) .subscriberContext(reactor.util.context.Context.of(key1, value1, key2, value2)) .subscribe(signResult -> System.out.printf("Received signature of length %d with algorithm %s", signResult.getSignature().length)); cryptographyAsyncClient.verify(SignatureAlgorithm.ES256, digest, signature) .subscriberContext(reactor.util.context.Context.of(key1, value1, key2, value2)) .subscribe(verifyResult -> System.out.printf("Verification status %s", verifyResult.isValid())); } /** * Generates a code sample for using {@link CryptographyAsyncClient * {@link CryptographyAsyncClient * * @throws NoSuchAlgorithmException when the specified algorithm doesn't exist. */ public void signDataVerifyData() throws NoSuchAlgorithmException { CryptographyAsyncClient cryptographyAsyncClient = createAsyncClient(); byte[] signature = new byte[100]; byte[] data = new byte[100]; new Random(0x1234567L).nextBytes(data); cryptographyAsyncClient.sign(SignatureAlgorithm.ES256, data) .subscriberContext(reactor.util.context.Context.of(key1, value1, key2, value2)) .subscribe(signResult -> System.out.printf("Received signature of length %d with algorithm %s", signResult.getSignature().length)); cryptographyAsyncClient.verify(SignatureAlgorithm.ES256, data, signature) .subscriberContext(reactor.util.context.Context.of(key1, value1, key2, value2)) .subscribe(verifyResult -> System.out.printf("Verification status %s", verifyResult.isValid())); } /** * Generates a code sample for using {@link CryptographyAsyncClient * {@link CryptographyAsyncClient */ public void wrapKeyUnwrapKey() { CryptographyAsyncClient cryptographyAsyncClient = createAsyncClient(); byte[] encryptedKey = new byte[100]; byte[] key = new byte[100]; new Random(0x1234567L).nextBytes(key); cryptographyAsyncClient.wrapKey(KeyWrapAlgorithm.RSA_OAEP, key) .subscriberContext(reactor.util.context.Context.of(key1, value1, key2, value2)) .subscribe(keyWrapResult -> System.out.printf("Received encypted key of length %d with algorithm %s", keyWrapResult.getEncryptedKey().length, keyWrapResult.getAlgorithm().toString())); cryptographyAsyncClient.unwrapKey(KeyWrapAlgorithm.RSA_OAEP, encryptedKey) .subscriberContext(reactor.util.context.Context.of(key1, value1, key2, value2)) .subscribe(keyUnwrapResult -> System.out.printf("Received key of length %d", keyUnwrapResult.getKey().length)); } /** * Implementation not provided for this method * @return {@code null} */ private TokenCredential getKeyVaultCredential() { return null; } }
class CryptographyAsyncClientJavaDocCodeSnippets { private String key1 = "key1"; private String key2 = "key2"; private String value1 = "val1"; private String value2 = "val2"; /** * Generates code sample for creating a {@link KeyAsyncClient} * @return An instance of {@link KeyAsyncClient} */ public CryptographyAsyncClient createAsyncClientWithHttpClient() { CryptographyAsyncClient cryptographyAsyncClient = new CryptographyClientBuilder() .keyIdentifier("<Your-Key-ID>") .credential(new DefaultAzureCredentialBuilder().build()) .httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)) .httpClient(HttpClient.createDefault()) .buildAsyncClient(); return cryptographyAsyncClient; } /** * Generates code sample for creating a {@link KeyAsyncClient} * @return An instance of {@link KeyAsyncClient} */ public CryptographyAsyncClient createAsyncClient() { CryptographyAsyncClient cryptographyAsyncClient = new CryptographyClientBuilder() .keyIdentifier("<YOUR-KEY-ID>") .credential(new DefaultAzureCredentialBuilder().build()) .buildAsyncClient(); return cryptographyAsyncClient; } /** * Generates code sample for creating a {@link KeyAsyncClient} * @return An instance of {@link KeyAsyncClient} */ /** * Generates a code sample for using {@link CryptographyAsyncClient */ public void getKeyWithResponseSnippets() { CryptographyAsyncClient cryptographyAsyncClient = createAsyncClient(); cryptographyAsyncClient.getKeyWithResponse() .subscriberContext(reactor.util.context.Context.of(key1, value1, key2, value2)) .subscribe(keyResponse -> System.out.printf("Key is returned with name %s and id %s \n", keyResponse.getValue().getName(), keyResponse.getValue().getId())); } /** * Generates a code sample for using {@link CryptographyAsyncClient */ public void getKeySnippets() { CryptographyAsyncClient cryptographyAsyncClient = createAsyncClient(); cryptographyAsyncClient.getKey() .subscriberContext(reactor.util.context.Context.of(key1, value1, key2, value2)) .subscribe(key -> System.out.printf("Key is returned with name %s and id %s \n", key.getName(), key.getId())); } /** * Generates a code sample for using {@link CryptographyAsyncClient * {@link CryptographyAsyncClient */ public void encrypt() { CryptographyAsyncClient cryptographyAsyncClient = createAsyncClient(); byte[] iv = {(byte) 0x1a, (byte) 0xf3, (byte) 0x8c, (byte) 0x2d, (byte) 0xc2, (byte) 0xb9, (byte) 0x6f, (byte) 0xfd, (byte) 0xd8, (byte) 0x66, (byte) 0x94, (byte) 0x09, (byte) 0x23, (byte) 0x41, (byte) 0xbc, (byte) 0x04}; byte[] authData = { (byte) 0x54, (byte) 0x68, (byte) 0x65, (byte) 0x20, (byte) 0x73, (byte) 0x65, (byte) 0x63, (byte) 0x6f, (byte) 0x6e, (byte) 0x64, (byte) 0x20, (byte) 0x70, (byte) 0x72, (byte) 0x69, (byte) 0x6e, (byte) 0x63, (byte) 0x69, (byte) 0x70, (byte) 0x6c, (byte) 0x65, (byte) 0x20, (byte) 0x6f, (byte) 0x66, (byte) 0x20, (byte) 0x41, (byte) 0x75, (byte) 0x67, (byte) 0x75, (byte) 0x73, (byte) 0x74, (byte) 0x65, (byte) 0x20, (byte) 0x4b, (byte) 0x65, (byte) 0x72, (byte) 0x63, (byte) 0x6b, (byte) 0x68, (byte) 0x6f, (byte) 0x66, (byte) 0x66, (byte) 0x73 }; byte[] plainText = new byte[100]; new Random(0x1234567L).nextBytes(plainText); cryptographyAsyncClient.encrypt(EncryptionAlgorithm.RSA_OAEP, plainText) .subscriberContext(reactor.util.context.Context.of(key1, value1, key2, value2)) .subscribe(encryptResult -> System.out.printf("Received encrypted content of length %d with algorithm %s \n", encryptResult.getCipherText().length, encryptResult.getAlgorithm().toString())); cryptographyAsyncClient.encrypt(EncryptionAlgorithm.A192CBC_HS384, plainText, iv, authData) .subscriberContext(reactor.util.context.Context.of(key1, value1, key2, value2)) .subscribe(encryptResult -> System.out.printf("Received encrypted content of length %d with algorithm %s \n", encryptResult.getCipherText().length, encryptResult.getAlgorithm().toString())); } /** * Generates a code sample for using {@link CryptographyAsyncClient * {@link CryptographyAsyncClient */ public void decrypt() { CryptographyAsyncClient cryptographyAsyncClient = createAsyncClient(); byte[] iv = {(byte) 0x1a, (byte) 0xf3, (byte) 0x8c, (byte) 0x2d, (byte) 0xc2, (byte) 0xb9, (byte) 0x6f, (byte) 0xfd, (byte) 0xd8, (byte) 0x66, (byte) 0x94, (byte) 0x09, (byte) 0x23, (byte) 0x41, (byte) 0xbc, (byte) 0x04}; byte[] authData = { (byte) 0x54, (byte) 0x68, (byte) 0x65, (byte) 0x20, (byte) 0x73, (byte) 0x65, (byte) 0x63, (byte) 0x6f, (byte) 0x6e, (byte) 0x64, (byte) 0x20, (byte) 0x70, (byte) 0x72, (byte) 0x69, (byte) 0x6e, (byte) 0x63, (byte) 0x69, (byte) 0x70, (byte) 0x6c, (byte) 0x65, (byte) 0x20, (byte) 0x6f, (byte) 0x66, (byte) 0x20, (byte) 0x41, (byte) 0x75, (byte) 0x67, (byte) 0x75, (byte) 0x73, (byte) 0x74, (byte) 0x65, (byte) 0x20, (byte) 0x4b, (byte) 0x65, (byte) 0x72, (byte) 0x63, (byte) 0x6b, (byte) 0x68, (byte) 0x6f, (byte) 0x66, (byte) 0x66, (byte) 0x73 }; byte[] authTag = {(byte) 0x65, (byte) 0x2c, (byte) 0x3f, (byte) 0xa3, (byte) 0x6b, (byte) 0x0a, (byte) 0x7c, (byte) 0x5b, (byte) 0x32, (byte) 0x19, (byte) 0xfa, (byte) 0xb3, (byte) 0xa3, (byte) 0x0b, (byte) 0xc1, (byte) 0xc4}; byte[] plainText = new byte[100]; new Random(0x1234567L).nextBytes(plainText); cryptographyAsyncClient.decrypt(EncryptionAlgorithm.RSA_OAEP, plainText) .subscriberContext(reactor.util.context.Context.of(key1, value1, key2, value2)) .subscribe(decryptResult -> System.out.printf("Received decrypted content of length %d\n", decryptResult.getPlainText().length)); cryptographyAsyncClient.decrypt(EncryptionAlgorithm.A192CBC_HS384, plainText, iv, authData, authTag) .subscriberContext(reactor.util.context.Context.of(key1, value1, key2, value2)) .subscribe(encryptResult -> System.out.printf("Received decrypted content of length %d with algorithm %s \n", encryptResult.getPlainText().length)); } /** * Generates a code sample for using {@link CryptographyAsyncClient * {@link CryptographyAsyncClient * * @throws NoSuchAlgorithmException when the specified algorithm doesn't exist. */ public void signVerify() throws NoSuchAlgorithmException { CryptographyAsyncClient cryptographyAsyncClient = createAsyncClient(); byte[] signature = new byte[100]; byte[] data = new byte[100]; new Random(0x1234567L).nextBytes(data); MessageDigest md = MessageDigest.getInstance("SHA-256"); md.update(data); byte[] digest = md.digest(); cryptographyAsyncClient.sign(SignatureAlgorithm.ES256, digest) .subscriberContext(reactor.util.context.Context.of(key1, value1, key2, value2)) .subscribe(signResult -> System.out.printf("Received signature of length %d with algorithm %s", signResult.getSignature().length)); cryptographyAsyncClient.verify(SignatureAlgorithm.ES256, digest, signature) .subscriberContext(reactor.util.context.Context.of(key1, value1, key2, value2)) .subscribe(verifyResult -> System.out.printf("Verification status %s", verifyResult.isValid())); } /** * Generates a code sample for using {@link CryptographyAsyncClient * {@link CryptographyAsyncClient * * @throws NoSuchAlgorithmException when the specified algorithm doesn't exist. */ public void signDataVerifyData() throws NoSuchAlgorithmException { CryptographyAsyncClient cryptographyAsyncClient = createAsyncClient(); byte[] signature = new byte[100]; byte[] data = new byte[100]; new Random(0x1234567L).nextBytes(data); cryptographyAsyncClient.sign(SignatureAlgorithm.ES256, data) .subscriberContext(reactor.util.context.Context.of(key1, value1, key2, value2)) .subscribe(signResult -> System.out.printf("Received signature of length %d with algorithm %s", signResult.getSignature().length)); cryptographyAsyncClient.verify(SignatureAlgorithm.ES256, data, signature) .subscriberContext(reactor.util.context.Context.of(key1, value1, key2, value2)) .subscribe(verifyResult -> System.out.printf("Verification status %s", verifyResult.isValid())); } /** * Generates a code sample for using {@link CryptographyAsyncClient * {@link CryptographyAsyncClient */ public void wrapKeyUnwrapKey() { CryptographyAsyncClient cryptographyAsyncClient = createAsyncClient(); byte[] encryptedKey = new byte[100]; byte[] key = new byte[100]; new Random(0x1234567L).nextBytes(key); cryptographyAsyncClient.wrapKey(KeyWrapAlgorithm.RSA_OAEP, key) .subscriberContext(reactor.util.context.Context.of(key1, value1, key2, value2)) .subscribe(keyWrapResult -> System.out.printf("Received encypted key of length %d with algorithm %s", keyWrapResult.getEncryptedKey().length, keyWrapResult.getAlgorithm().toString())); cryptographyAsyncClient.unwrapKey(KeyWrapAlgorithm.RSA_OAEP, encryptedKey) .subscriberContext(reactor.util.context.Context.of(key1, value1, key2, value2)) .subscribe(keyUnwrapResult -> System.out.printf("Received key of length %d", keyUnwrapResult.getKey().length)); } /** * Implementation not provided for this method * @return {@code null} */ private TokenCredential getKeyVaultCredential() { return null; } }
nit: anyway to avoid the same exact code being written 3x in this file? otherwise lgtm
public void streamByPageSnippet() { CustomPagedFlux<String> customPagedFlux = createCustomInstance(); PagedIterableBase<String, PagedResponse<String>> customPagedIterableResponse = new PagedIterableBase<>(customPagedFlux); customPagedIterableResponse.streamByPage().forEach(resp -> { System.out.printf("Response headers are %s. Url %s and status code %d %n", resp.getHeaders(), resp.getRequest().getUrl(), resp.getStatusCode()); resp.getItems().forEach(value -> { System.out.printf("Response value is %s %n", value); }); }); }
System.out.printf("Response headers are %s. Url %s and status code %d %n", resp.getHeaders(),
public void streamByPageSnippet() { CustomPagedFlux<String> customPagedFlux = createCustomInstance(); PagedIterableBase<String, PagedResponse<String>> customPagedIterableResponse = new PagedIterableBase<>(customPagedFlux); customPagedIterableResponse.streamByPage().forEach(resp -> { System.out.printf("Response headers are %s. Url %s and status code %d %n", resp.getHeaders(), resp.getRequest().getUrl(), resp.getStatusCode()); resp.getItems().forEach(value -> { System.out.printf("Response value is %s %n", value); }); }); }
class CustomPagedFlux<String> extends PagedFluxBase<String, PagedResponse<String>> { CustomPagedFlux(Supplier<Mono<PagedResponse<String>>> firstPageRetriever) { super(firstPageRetriever); } CustomPagedFlux(Supplier<Mono<PagedResponse<String>>> firstPageRetriever, Function<java.lang.String, Mono<PagedResponse<String>>> nextPageRetriever) { super(firstPageRetriever, nextPageRetriever); } }
class CustomPagedFlux<String> extends PagedFluxBase<String, PagedResponse<String>> { CustomPagedFlux(Supplier<Mono<PagedResponse<String>>> firstPageRetriever) { super(firstPageRetriever); } CustomPagedFlux(Supplier<Mono<PagedResponse<String>>> firstPageRetriever, Function<java.lang.String, Mono<PagedResponse<String>>> nextPageRetriever) { super(firstPageRetriever, nextPageRetriever); } }
These are code snippets that get inserted into our Javadocs. They all have unique codesnippet tags and get inserted for different methods. So, in this case, when viewed on javadocs will provide a clear self-contained sample at the cost of a bit of duplication.
public void streamByPageSnippet() { CustomPagedFlux<String> customPagedFlux = createCustomInstance(); PagedIterableBase<String, PagedResponse<String>> customPagedIterableResponse = new PagedIterableBase<>(customPagedFlux); customPagedIterableResponse.streamByPage().forEach(resp -> { System.out.printf("Response headers are %s. Url %s and status code %d %n", resp.getHeaders(), resp.getRequest().getUrl(), resp.getStatusCode()); resp.getItems().forEach(value -> { System.out.printf("Response value is %s %n", value); }); }); }
System.out.printf("Response headers are %s. Url %s and status code %d %n", resp.getHeaders(),
public void streamByPageSnippet() { CustomPagedFlux<String> customPagedFlux = createCustomInstance(); PagedIterableBase<String, PagedResponse<String>> customPagedIterableResponse = new PagedIterableBase<>(customPagedFlux); customPagedIterableResponse.streamByPage().forEach(resp -> { System.out.printf("Response headers are %s. Url %s and status code %d %n", resp.getHeaders(), resp.getRequest().getUrl(), resp.getStatusCode()); resp.getItems().forEach(value -> { System.out.printf("Response value is %s %n", value); }); }); }
class CustomPagedFlux<String> extends PagedFluxBase<String, PagedResponse<String>> { CustomPagedFlux(Supplier<Mono<PagedResponse<String>>> firstPageRetriever) { super(firstPageRetriever); } CustomPagedFlux(Supplier<Mono<PagedResponse<String>>> firstPageRetriever, Function<java.lang.String, Mono<PagedResponse<String>>> nextPageRetriever) { super(firstPageRetriever, nextPageRetriever); } }
class CustomPagedFlux<String> extends PagedFluxBase<String, PagedResponse<String>> { CustomPagedFlux(Supplier<Mono<PagedResponse<String>>> firstPageRetriever) { super(firstPageRetriever); } CustomPagedFlux(Supplier<Mono<PagedResponse<String>>> firstPageRetriever, Function<java.lang.String, Mono<PagedResponse<String>>> nextPageRetriever) { super(firstPageRetriever, nextPageRetriever); } }
Seems like this could also be hit if there are no accounts no? Perhaps this is confusing if the user didn't specify username or AZURE_USERNAME
public Mono<AccessToken> getToken(TokenRequestContext request) { if (pubClient == null) { try { PersistentTokenCacheAccessAspect accessAspect = new PersistentTokenCacheAccessAspect(); pubClient = PublicClientApplication.builder(this.clientID) .setTokenCacheAccessAspect(accessAspect) .build(); } catch (Exception e) { return Mono.error(e); } } return Mono.fromFuture(pubClient.getAccounts()) .flatMap(set -> { IAccount requestedAccount; Map<String, IAccount> accounts = new HashMap<>(); for (IAccount cached : set) { if (username == null || username.equals(cached.username())) { if (!accounts.containsKey(cached.homeAccountId())) { accounts.put(cached.homeAccountId(), cached); } } } if (accounts.size() == 0) { return Mono.error(new RuntimeException("Requested account was not found")); } else if (accounts.size() > 1) { return Mono.error(new RuntimeException("Multiple entries for the user account " + username + " were found in the shared token cache. This is not currently supported by the" + " SharedTokenCacheCredential.")); } else { requestedAccount = accounts.values().iterator().next(); } SilentParameters params = SilentParameters.builder( new HashSet<>(request.getScopes()), requestedAccount).build(); CompletableFuture<IAuthenticationResult> future; try { future = pubClient.acquireTokenSilently(params); return Mono.fromFuture(() -> future).map(result -> new AccessToken(result.accessToken(), result.expiresOnDate().toInstant().atOffset(ZoneOffset.UTC))); } catch (MalformedURLException e) { e.printStackTrace(); return Mono.error(new RuntimeException("Token was not found")); } }); }
return Mono.error(new RuntimeException("Requested account was not found"));
public Mono<AccessToken> getToken(TokenRequestContext request) { if (pubClient == null) { try { PersistentTokenCacheAccessAspect accessAspect = new PersistentTokenCacheAccessAspect(); pubClient = PublicClientApplication.builder(this.clientID) .setTokenCacheAccessAspect(accessAspect) .build(); } catch (Exception e) { return Mono.error(e); } } return Mono.fromFuture(pubClient.getAccounts()) .flatMap(set -> { IAccount requestedAccount; Map<String, IAccount> accounts = new HashMap<>(); for (IAccount cached : set) { if (username == null || username.equals(cached.username())) { if (!accounts.containsKey(cached.homeAccountId())) { accounts.put(cached.homeAccountId(), cached); } } } if (accounts.size() == 0) { if (username == null) { return Mono.error(new RuntimeException("No accounts were discovered in the shared token cache." + " To fix, authenticate through tooling supporting azure developer sign on.")); } else { return Mono.error(new RuntimeException(String.format("User account '%s' was not found in the " + "shared token cache. Discovered Accounts: [ '%s' ]", username,accounts.values().stream() .map(IAccount::username).collect(Collectors.joining(", "))))); } } else if (accounts.size() > 1) { if (username == null) { return Mono.error(new RuntimeException("Multiple accounts were discovered in the shared token " + "cache. To fix, set the AZURE_USERNAME environment variable to the preferred username, " + "or specify it when constructing SharedTokenCacheCredential.")); } else { return Mono.error(new RuntimeException("Multiple entries for the user account " + username + " were found in the shared token cache. This is not currently supported by the" + " SharedTokenCacheCredential.")); } } else { requestedAccount = accounts.values().iterator().next(); } SilentParameters params = SilentParameters.builder( new HashSet<>(request.getScopes()), requestedAccount).build(); CompletableFuture<IAuthenticationResult> future; try { future = pubClient.acquireTokenSilently(params); return Mono.fromFuture(() -> future).map(result -> new AccessToken(result.accessToken(), result.expiresOnDate().toInstant().atOffset(ZoneOffset.UTC))); } catch (MalformedURLException e) { e.printStackTrace(); return Mono.error(new RuntimeException("Token was not found")); } }); }
class SharedTokenCacheCredential implements TokenCredential { private final String username; private final String clientID; private final Configuration configuration; private PublicClientApplication pubClient; /** * Creates an instance of the Shared Token Cache Credential Provider. * * @param username the username of the account for the application * @param clientID the client ID of the application * @param identityClientOptions the options for configuring the identity client */ SharedTokenCacheCredential(String username, String clientID, IdentityClientOptions identityClientOptions) { this.configuration = Configuration.getGlobalConfiguration().clone(); if (username == null) { this.username = configuration.get(Configuration.PROPERTY_AZURE_USERNAME); } else { this.username = username; } this.clientID = clientID; } /** * Gets token from shared token cache * */ @Override }
class SharedTokenCacheCredential implements TokenCredential { private final String username; private final String clientID; private final Configuration configuration; private PublicClientApplication pubClient; /** * Creates an instance of the Shared Token Cache Credential Provider. * * @param username the username of the account for the application * @param clientID the client ID of the application * @param identityClientOptions the options for configuring the identity client */ SharedTokenCacheCredential(String username, String clientID, IdentityClientOptions identityClientOptions) { this.configuration = Configuration.getGlobalConfiguration().clone(); if (username == null) { this.username = configuration.get(Configuration.PROPERTY_AZURE_USERNAME); } else { this.username = username; } this.clientID = clientID; } /** * Gets token from shared token cache * */ @Override }
Same kind of issue here. If the username wasn't specified this message wouldn't read right.
public Mono<AccessToken> getToken(TokenRequestContext request) { if (pubClient == null) { try { PersistentTokenCacheAccessAspect accessAspect = new PersistentTokenCacheAccessAspect(); pubClient = PublicClientApplication.builder(this.clientID) .setTokenCacheAccessAspect(accessAspect) .build(); } catch (Exception e) { return Mono.error(e); } } return Mono.fromFuture(pubClient.getAccounts()) .flatMap(set -> { IAccount requestedAccount; Map<String, IAccount> accounts = new HashMap<>(); for (IAccount cached : set) { if (username == null || username.equals(cached.username())) { if (!accounts.containsKey(cached.homeAccountId())) { accounts.put(cached.homeAccountId(), cached); } } } if (accounts.size() == 0) { return Mono.error(new RuntimeException("Requested account was not found")); } else if (accounts.size() > 1) { return Mono.error(new RuntimeException("Multiple entries for the user account " + username + " were found in the shared token cache. This is not currently supported by the" + " SharedTokenCacheCredential.")); } else { requestedAccount = accounts.values().iterator().next(); } SilentParameters params = SilentParameters.builder( new HashSet<>(request.getScopes()), requestedAccount).build(); CompletableFuture<IAuthenticationResult> future; try { future = pubClient.acquireTokenSilently(params); return Mono.fromFuture(() -> future).map(result -> new AccessToken(result.accessToken(), result.expiresOnDate().toInstant().atOffset(ZoneOffset.UTC))); } catch (MalformedURLException e) { e.printStackTrace(); return Mono.error(new RuntimeException("Token was not found")); } }); }
+ " SharedTokenCacheCredential."));
public Mono<AccessToken> getToken(TokenRequestContext request) { if (pubClient == null) { try { PersistentTokenCacheAccessAspect accessAspect = new PersistentTokenCacheAccessAspect(); pubClient = PublicClientApplication.builder(this.clientID) .setTokenCacheAccessAspect(accessAspect) .build(); } catch (Exception e) { return Mono.error(e); } } return Mono.fromFuture(pubClient.getAccounts()) .flatMap(set -> { IAccount requestedAccount; Map<String, IAccount> accounts = new HashMap<>(); for (IAccount cached : set) { if (username == null || username.equals(cached.username())) { if (!accounts.containsKey(cached.homeAccountId())) { accounts.put(cached.homeAccountId(), cached); } } } if (accounts.size() == 0) { if (username == null) { return Mono.error(new RuntimeException("No accounts were discovered in the shared token cache." + " To fix, authenticate through tooling supporting azure developer sign on.")); } else { return Mono.error(new RuntimeException(String.format("User account '%s' was not found in the " + "shared token cache. Discovered Accounts: [ '%s' ]", username,accounts.values().stream() .map(IAccount::username).collect(Collectors.joining(", "))))); } } else if (accounts.size() > 1) { if (username == null) { return Mono.error(new RuntimeException("Multiple accounts were discovered in the shared token " + "cache. To fix, set the AZURE_USERNAME environment variable to the preferred username, " + "or specify it when constructing SharedTokenCacheCredential.")); } else { return Mono.error(new RuntimeException("Multiple entries for the user account " + username + " were found in the shared token cache. This is not currently supported by the" + " SharedTokenCacheCredential.")); } } else { requestedAccount = accounts.values().iterator().next(); } SilentParameters params = SilentParameters.builder( new HashSet<>(request.getScopes()), requestedAccount).build(); CompletableFuture<IAuthenticationResult> future; try { future = pubClient.acquireTokenSilently(params); return Mono.fromFuture(() -> future).map(result -> new AccessToken(result.accessToken(), result.expiresOnDate().toInstant().atOffset(ZoneOffset.UTC))); } catch (MalformedURLException e) { e.printStackTrace(); return Mono.error(new RuntimeException("Token was not found")); } }); }
class SharedTokenCacheCredential implements TokenCredential { private final String username; private final String clientID; private final Configuration configuration; private PublicClientApplication pubClient; /** * Creates an instance of the Shared Token Cache Credential Provider. * * @param username the username of the account for the application * @param clientID the client ID of the application * @param identityClientOptions the options for configuring the identity client */ SharedTokenCacheCredential(String username, String clientID, IdentityClientOptions identityClientOptions) { this.configuration = Configuration.getGlobalConfiguration().clone(); if (username == null) { this.username = configuration.get(Configuration.PROPERTY_AZURE_USERNAME); } else { this.username = username; } this.clientID = clientID; } /** * Gets token from shared token cache * */ @Override }
class SharedTokenCacheCredential implements TokenCredential { private final String username; private final String clientID; private final Configuration configuration; private PublicClientApplication pubClient; /** * Creates an instance of the Shared Token Cache Credential Provider. * * @param username the username of the account for the application * @param clientID the client ID of the application * @param identityClientOptions the options for configuring the identity client */ SharedTokenCacheCredential(String username, String clientID, IdentityClientOptions identityClientOptions) { this.configuration = Configuration.getGlobalConfiguration().clone(); if (username == null) { this.username = configuration.get(Configuration.PROPERTY_AZURE_USERNAME); } else { this.username = username; } this.clientID = clientID; } /** * Gets token from shared token cache * */ @Override }
Added better message
public Mono<AccessToken> getToken(TokenRequestContext request) { if (pubClient == null) { try { PersistentTokenCacheAccessAspect accessAspect = new PersistentTokenCacheAccessAspect(); pubClient = PublicClientApplication.builder(this.clientID) .setTokenCacheAccessAspect(accessAspect) .build(); } catch (Exception e) { return Mono.error(e); } } return Mono.fromFuture(pubClient.getAccounts()) .flatMap(set -> { IAccount requestedAccount; Map<String, IAccount> accounts = new HashMap<>(); for (IAccount cached : set) { if (username == null || username.equals(cached.username())) { if (!accounts.containsKey(cached.homeAccountId())) { accounts.put(cached.homeAccountId(), cached); } } } if (accounts.size() == 0) { return Mono.error(new RuntimeException("Requested account was not found")); } else if (accounts.size() > 1) { return Mono.error(new RuntimeException("Multiple entries for the user account " + username + " were found in the shared token cache. This is not currently supported by the" + " SharedTokenCacheCredential.")); } else { requestedAccount = accounts.values().iterator().next(); } SilentParameters params = SilentParameters.builder( new HashSet<>(request.getScopes()), requestedAccount).build(); CompletableFuture<IAuthenticationResult> future; try { future = pubClient.acquireTokenSilently(params); return Mono.fromFuture(() -> future).map(result -> new AccessToken(result.accessToken(), result.expiresOnDate().toInstant().atOffset(ZoneOffset.UTC))); } catch (MalformedURLException e) { e.printStackTrace(); return Mono.error(new RuntimeException("Token was not found")); } }); }
return Mono.error(new RuntimeException("Requested account was not found"));
public Mono<AccessToken> getToken(TokenRequestContext request) { if (pubClient == null) { try { PersistentTokenCacheAccessAspect accessAspect = new PersistentTokenCacheAccessAspect(); pubClient = PublicClientApplication.builder(this.clientID) .setTokenCacheAccessAspect(accessAspect) .build(); } catch (Exception e) { return Mono.error(e); } } return Mono.fromFuture(pubClient.getAccounts()) .flatMap(set -> { IAccount requestedAccount; Map<String, IAccount> accounts = new HashMap<>(); for (IAccount cached : set) { if (username == null || username.equals(cached.username())) { if (!accounts.containsKey(cached.homeAccountId())) { accounts.put(cached.homeAccountId(), cached); } } } if (accounts.size() == 0) { if (username == null) { return Mono.error(new RuntimeException("No accounts were discovered in the shared token cache." + " To fix, authenticate through tooling supporting azure developer sign on.")); } else { return Mono.error(new RuntimeException(String.format("User account '%s' was not found in the " + "shared token cache. Discovered Accounts: [ '%s' ]", username,accounts.values().stream() .map(IAccount::username).collect(Collectors.joining(", "))))); } } else if (accounts.size() > 1) { if (username == null) { return Mono.error(new RuntimeException("Multiple accounts were discovered in the shared token " + "cache. To fix, set the AZURE_USERNAME environment variable to the preferred username, " + "or specify it when constructing SharedTokenCacheCredential.")); } else { return Mono.error(new RuntimeException("Multiple entries for the user account " + username + " were found in the shared token cache. This is not currently supported by the" + " SharedTokenCacheCredential.")); } } else { requestedAccount = accounts.values().iterator().next(); } SilentParameters params = SilentParameters.builder( new HashSet<>(request.getScopes()), requestedAccount).build(); CompletableFuture<IAuthenticationResult> future; try { future = pubClient.acquireTokenSilently(params); return Mono.fromFuture(() -> future).map(result -> new AccessToken(result.accessToken(), result.expiresOnDate().toInstant().atOffset(ZoneOffset.UTC))); } catch (MalformedURLException e) { e.printStackTrace(); return Mono.error(new RuntimeException("Token was not found")); } }); }
class SharedTokenCacheCredential implements TokenCredential { private final String username; private final String clientID; private final Configuration configuration; private PublicClientApplication pubClient; /** * Creates an instance of the Shared Token Cache Credential Provider. * * @param username the username of the account for the application * @param clientID the client ID of the application * @param identityClientOptions the options for configuring the identity client */ SharedTokenCacheCredential(String username, String clientID, IdentityClientOptions identityClientOptions) { this.configuration = Configuration.getGlobalConfiguration().clone(); if (username == null) { this.username = configuration.get(Configuration.PROPERTY_AZURE_USERNAME); } else { this.username = username; } this.clientID = clientID; } /** * Gets token from shared token cache * */ @Override }
class SharedTokenCacheCredential implements TokenCredential { private final String username; private final String clientID; private final Configuration configuration; private PublicClientApplication pubClient; /** * Creates an instance of the Shared Token Cache Credential Provider. * * @param username the username of the account for the application * @param clientID the client ID of the application * @param identityClientOptions the options for configuring the identity client */ SharedTokenCacheCredential(String username, String clientID, IdentityClientOptions identityClientOptions) { this.configuration = Configuration.getGlobalConfiguration().clone(); if (username == null) { this.username = configuration.get(Configuration.PROPERTY_AZURE_USERNAME); } else { this.username = username; } this.clientID = clientID; } /** * Gets token from shared token cache * */ @Override }
Added better message
public Mono<AccessToken> getToken(TokenRequestContext request) { if (pubClient == null) { try { PersistentTokenCacheAccessAspect accessAspect = new PersistentTokenCacheAccessAspect(); pubClient = PublicClientApplication.builder(this.clientID) .setTokenCacheAccessAspect(accessAspect) .build(); } catch (Exception e) { return Mono.error(e); } } return Mono.fromFuture(pubClient.getAccounts()) .flatMap(set -> { IAccount requestedAccount; Map<String, IAccount> accounts = new HashMap<>(); for (IAccount cached : set) { if (username == null || username.equals(cached.username())) { if (!accounts.containsKey(cached.homeAccountId())) { accounts.put(cached.homeAccountId(), cached); } } } if (accounts.size() == 0) { return Mono.error(new RuntimeException("Requested account was not found")); } else if (accounts.size() > 1) { return Mono.error(new RuntimeException("Multiple entries for the user account " + username + " were found in the shared token cache. This is not currently supported by the" + " SharedTokenCacheCredential.")); } else { requestedAccount = accounts.values().iterator().next(); } SilentParameters params = SilentParameters.builder( new HashSet<>(request.getScopes()), requestedAccount).build(); CompletableFuture<IAuthenticationResult> future; try { future = pubClient.acquireTokenSilently(params); return Mono.fromFuture(() -> future).map(result -> new AccessToken(result.accessToken(), result.expiresOnDate().toInstant().atOffset(ZoneOffset.UTC))); } catch (MalformedURLException e) { e.printStackTrace(); return Mono.error(new RuntimeException("Token was not found")); } }); }
+ " SharedTokenCacheCredential."));
public Mono<AccessToken> getToken(TokenRequestContext request) { if (pubClient == null) { try { PersistentTokenCacheAccessAspect accessAspect = new PersistentTokenCacheAccessAspect(); pubClient = PublicClientApplication.builder(this.clientID) .setTokenCacheAccessAspect(accessAspect) .build(); } catch (Exception e) { return Mono.error(e); } } return Mono.fromFuture(pubClient.getAccounts()) .flatMap(set -> { IAccount requestedAccount; Map<String, IAccount> accounts = new HashMap<>(); for (IAccount cached : set) { if (username == null || username.equals(cached.username())) { if (!accounts.containsKey(cached.homeAccountId())) { accounts.put(cached.homeAccountId(), cached); } } } if (accounts.size() == 0) { if (username == null) { return Mono.error(new RuntimeException("No accounts were discovered in the shared token cache." + " To fix, authenticate through tooling supporting azure developer sign on.")); } else { return Mono.error(new RuntimeException(String.format("User account '%s' was not found in the " + "shared token cache. Discovered Accounts: [ '%s' ]", username,accounts.values().stream() .map(IAccount::username).collect(Collectors.joining(", "))))); } } else if (accounts.size() > 1) { if (username == null) { return Mono.error(new RuntimeException("Multiple accounts were discovered in the shared token " + "cache. To fix, set the AZURE_USERNAME environment variable to the preferred username, " + "or specify it when constructing SharedTokenCacheCredential.")); } else { return Mono.error(new RuntimeException("Multiple entries for the user account " + username + " were found in the shared token cache. This is not currently supported by the" + " SharedTokenCacheCredential.")); } } else { requestedAccount = accounts.values().iterator().next(); } SilentParameters params = SilentParameters.builder( new HashSet<>(request.getScopes()), requestedAccount).build(); CompletableFuture<IAuthenticationResult> future; try { future = pubClient.acquireTokenSilently(params); return Mono.fromFuture(() -> future).map(result -> new AccessToken(result.accessToken(), result.expiresOnDate().toInstant().atOffset(ZoneOffset.UTC))); } catch (MalformedURLException e) { e.printStackTrace(); return Mono.error(new RuntimeException("Token was not found")); } }); }
class SharedTokenCacheCredential implements TokenCredential { private final String username; private final String clientID; private final Configuration configuration; private PublicClientApplication pubClient; /** * Creates an instance of the Shared Token Cache Credential Provider. * * @param username the username of the account for the application * @param clientID the client ID of the application * @param identityClientOptions the options for configuring the identity client */ SharedTokenCacheCredential(String username, String clientID, IdentityClientOptions identityClientOptions) { this.configuration = Configuration.getGlobalConfiguration().clone(); if (username == null) { this.username = configuration.get(Configuration.PROPERTY_AZURE_USERNAME); } else { this.username = username; } this.clientID = clientID; } /** * Gets token from shared token cache * */ @Override }
class SharedTokenCacheCredential implements TokenCredential { private final String username; private final String clientID; private final Configuration configuration; private PublicClientApplication pubClient; /** * Creates an instance of the Shared Token Cache Credential Provider. * * @param username the username of the account for the application * @param clientID the client ID of the application * @param identityClientOptions the options for configuring the identity client */ SharedTokenCacheCredential(String username, String clientID, IdentityClientOptions identityClientOptions) { this.configuration = Configuration.getGlobalConfiguration().clone(); if (username == null) { this.username = configuration.get(Configuration.PROPERTY_AZURE_USERNAME); } else { this.username = username; } this.clientID = clientID; } /** * Gets token from shared token cache * */ @Override }
FYI, AccessModifier could be another way to define the modifier accessibility: ``` final AccessModifier accessModifier = CheckUtil.getAccessModifierFromModifiersToken(modifiersAST); ``` and ``` accessModifier.equals(AccessModifier.PUBLIC) ```
private boolean isPublicApi(DetailAST token) { final DetailAST modifiersAST = token.findFirstToken(TokenTypes.MODIFIERS); final boolean isStatic = modifiersAST.findFirstToken(TokenTypes.LITERAL_STATIC) != null; final boolean isPublic = modifiersAST .findFirstToken(TokenTypes.LITERAL_PUBLIC) != null; final boolean isProtected = modifiersAST.findFirstToken(TokenTypes.LITERAL_PROTECTED) != null; return (isPublic || isProtected) && !isStatic; }
final boolean isPublic = modifiersAST
private boolean isPublicApi(DetailAST token) { final DetailAST modifiersAST = token.findFirstToken(TokenTypes.MODIFIERS); final AccessModifier accessModifier = CheckUtil.getAccessModifierFromModifiersToken(modifiersAST); final boolean isStatic = modifiersAST.findFirstToken(TokenTypes.LITERAL_STATIC) != null; return (accessModifier.equals(AccessModifier.PUBLIC) || accessModifier.equals(AccessModifier.PROTECTED)) && !isStatic; }
class BlacklistedWordsCheck extends AbstractCheck { private final Set<String> blacklistedWords = new HashSet<>(Arrays.asList()); private final String ERROR_MESSAGE = "%s, All Public API Classes, Fields and Methods should follow " + "Camelcase standards for the following words: %s."; /** * Adds words that Classes, Methods and Variables that should follow Camelcasing standards * @param blacklistedWords words that should follow normal Camelcasing standards */ public final void setBlacklistedWords(String... blacklistedWords) { if (blacklistedWords != null) { Collections.addAll(this.blacklistedWords, blacklistedWords); } } @Override public int[] getDefaultTokens() { return getRequiredTokens(); } @Override public int[] getAcceptableTokens() { return getRequiredTokens(); } @Override public int[] getRequiredTokens() { return new int[] {TokenTypes.CLASS_DEF, TokenTypes.METHOD_DEF, TokenTypes.VARIABLE_DEF}; } @Override public void visitToken(DetailAST token) { switch (token.getType()) { case TokenTypes.CLASS_DEF: case TokenTypes.METHOD_DEF: case TokenTypes.VARIABLE_DEF: if (isPublicApi(token)) { String tokenName = token.findFirstToken(TokenTypes.IDENT).getText(); if (hasBlacklistedWords(tokenName)) { log(token, String.format(ERROR_MESSAGE, tokenName, this.blacklistedWords.stream().collect(Collectors.joining(", ", "", "")))); } } break; default: break; } } /** * Should we check member with given modifiers. * * @param token * modifiers of member to check. * @return true if we should check such member. */ /** * Gets the disallowed abbreviation contained in given String. * @param tokenName * the given String. * @return the disallowed abbreviation contained in given String as a * separate String. */ private boolean hasBlacklistedWords(String tokenName) { boolean result = false; for (String blacklistedWord : blacklistedWords) { if (tokenName.contains(blacklistedWord)) { result = true; break; } } return result; } }
class BlacklistedWordsCheck extends AbstractCheck { private final Set<String> blacklistedWords = new HashSet<>(Arrays.asList()); private final String ERROR_MESSAGE = "%s, All Public API Classes, Fields and Methods should follow " + "Camelcase standards for the following words: %s."; /** * Adds words that Classes, Methods and Variables that should follow Camelcasing standards * @param blacklistedWords words that should follow normal Camelcasing standards */ public final void setBlacklistedWords(String... blacklistedWords) { if (blacklistedWords != null) { Collections.addAll(this.blacklistedWords, blacklistedWords); } } @Override public int[] getDefaultTokens() { return getRequiredTokens(); } @Override public int[] getAcceptableTokens() { return getRequiredTokens(); } @Override public int[] getRequiredTokens() { return new int[] {TokenTypes.CLASS_DEF, TokenTypes.METHOD_DEF, TokenTypes.VARIABLE_DEF}; } @Override public void visitToken(DetailAST token) { switch (token.getType()) { case TokenTypes.CLASS_DEF: case TokenTypes.METHOD_DEF: case TokenTypes.VARIABLE_DEF: if (isPublicApi(token)) { String tokenName = token.findFirstToken(TokenTypes.IDENT).getText(); if (hasBlacklistedWords(tokenName)) { log(token, String.format(ERROR_MESSAGE, tokenName, this.blacklistedWords.stream().collect(Collectors.joining(", ", "", "")))); } } break; default: break; } } /** * Should we check member with given modifiers. * * @param token modifiers of member to check. * @return true if we should check such member. */ /** * Gets the disallowed abbreviation contained in given String. * @param tokenName the given String. * @return the disallowed abbreviation contained in given String as a * separate String. */ private boolean hasBlacklistedWords(String tokenName) { for (String blacklistedWord : blacklistedWords) { if (tokenName.contains(blacklistedWord)) { return true; } } return false; } }
You can also return true here and return false at the end of method
private boolean hasBlacklistedWords(String tokenName) { boolean result = false; for (String blacklistedWord : blacklistedWords) { if (tokenName.contains(blacklistedWord)) { result = true; break; } } return result; }
result = true;
private boolean hasBlacklistedWords(String tokenName) { for (String blacklistedWord : blacklistedWords) { if (tokenName.contains(blacklistedWord)) { return true; } } return false; }
class BlacklistedWordsCheck extends AbstractCheck { private final Set<String> blacklistedWords = new HashSet<>(Arrays.asList()); private final String ERROR_MESSAGE = "%s, All Public API Classes, Fields and Methods should follow " + "Camelcase standards for the following words: %s."; /** * Adds words that Classes, Methods and Variables that should follow Camelcasing standards * @param blacklistedWords words that should follow normal Camelcasing standards */ public final void setBlacklistedWords(String... blacklistedWords) { if (blacklistedWords != null) { Collections.addAll(this.blacklistedWords, blacklistedWords); } } @Override public int[] getDefaultTokens() { return getRequiredTokens(); } @Override public int[] getAcceptableTokens() { return getRequiredTokens(); } @Override public int[] getRequiredTokens() { return new int[] {TokenTypes.CLASS_DEF, TokenTypes.METHOD_DEF, TokenTypes.VARIABLE_DEF}; } @Override public void visitToken(DetailAST token) { switch (token.getType()) { case TokenTypes.CLASS_DEF: case TokenTypes.METHOD_DEF: case TokenTypes.VARIABLE_DEF: if (isPublicApi(token)) { String tokenName = token.findFirstToken(TokenTypes.IDENT).getText(); if (hasBlacklistedWords(tokenName)) { log(token, String.format(ERROR_MESSAGE, tokenName, this.blacklistedWords.stream().collect(Collectors.joining(", ", "", "")))); } } break; default: break; } } /** * Should we check member with given modifiers. * * @param token * modifiers of member to check. * @return true if we should check such member. */ private boolean isPublicApi(DetailAST token) { final DetailAST modifiersAST = token.findFirstToken(TokenTypes.MODIFIERS); final boolean isStatic = modifiersAST.findFirstToken(TokenTypes.LITERAL_STATIC) != null; final boolean isPublic = modifiersAST .findFirstToken(TokenTypes.LITERAL_PUBLIC) != null; final boolean isProtected = modifiersAST.findFirstToken(TokenTypes.LITERAL_PROTECTED) != null; return (isPublic || isProtected) && !isStatic; } /** * Gets the disallowed abbreviation contained in given String. * @param tokenName * the given String. * @return the disallowed abbreviation contained in given String as a * separate String. */ }
class BlacklistedWordsCheck extends AbstractCheck { private final Set<String> blacklistedWords = new HashSet<>(Arrays.asList()); private final String ERROR_MESSAGE = "%s, All Public API Classes, Fields and Methods should follow " + "Camelcase standards for the following words: %s."; /** * Adds words that Classes, Methods and Variables that should follow Camelcasing standards * @param blacklistedWords words that should follow normal Camelcasing standards */ public final void setBlacklistedWords(String... blacklistedWords) { if (blacklistedWords != null) { Collections.addAll(this.blacklistedWords, blacklistedWords); } } @Override public int[] getDefaultTokens() { return getRequiredTokens(); } @Override public int[] getAcceptableTokens() { return getRequiredTokens(); } @Override public int[] getRequiredTokens() { return new int[] {TokenTypes.CLASS_DEF, TokenTypes.METHOD_DEF, TokenTypes.VARIABLE_DEF}; } @Override public void visitToken(DetailAST token) { switch (token.getType()) { case TokenTypes.CLASS_DEF: case TokenTypes.METHOD_DEF: case TokenTypes.VARIABLE_DEF: if (isPublicApi(token)) { String tokenName = token.findFirstToken(TokenTypes.IDENT).getText(); if (hasBlacklistedWords(tokenName)) { log(token, String.format(ERROR_MESSAGE, tokenName, this.blacklistedWords.stream().collect(Collectors.joining(", ", "", "")))); } } break; default: break; } } /** * Should we check member with given modifiers. * * @param token modifiers of member to check. * @return true if we should check such member. */ private boolean isPublicApi(DetailAST token) { final DetailAST modifiersAST = token.findFirstToken(TokenTypes.MODIFIERS); final AccessModifier accessModifier = CheckUtil.getAccessModifierFromModifiersToken(modifiersAST); final boolean isStatic = modifiersAST.findFirstToken(TokenTypes.LITERAL_STATIC) != null; return (accessModifier.equals(AccessModifier.PUBLIC) || accessModifier.equals(AccessModifier.PROTECTED)) && !isStatic; } /** * Gets the disallowed abbreviation contained in given String. * @param tokenName the given String. * @return the disallowed abbreviation contained in given String as a * separate String. */ }
should use `importKeyWithResponse` here.
public void importKeySnippets() { KeyAsyncClient keyAsyncClient = createAsyncClient(); JsonWebKey jsonWebKeyToImport = new JsonWebKey(); keyAsyncClient.importKey("keyName", jsonWebKeyToImport).subscribe(keyResponse -> System.out.printf("Key is imported with name %s and id %s \n", keyResponse.getName(), keyResponse.getId())); ImportKeyOptions options = new ImportKeyOptions("keyName", jsonWebKeyToImport) .setHardwareProtected(false); keyAsyncClient.importKey(options).subscribe(keyResponse -> System.out.printf("Key is imported with name %s and id %s \n", keyResponse.getName(), keyResponse.getId())); ImportKeyOptions importKeyOptions = new ImportKeyOptions("keyName", jsonWebKeyToImport) .setHardwareProtected(false); keyAsyncClient.importKey(importKeyOptions).subscribe(keyResponse -> System.out.printf("Key is imported with name %s and id %s \n", keyResponse.getName(), keyResponse.getId())); }
keyAsyncClient.importKey(importKeyOptions).subscribe(keyResponse ->
public void importKeySnippets() { KeyAsyncClient keyAsyncClient = createAsyncClient(); JsonWebKey jsonWebKeyToImport = new JsonWebKey(); keyAsyncClient.importKey("keyName", jsonWebKeyToImport).subscribe(keyResponse -> System.out.printf("Key is imported with name %s and id %s \n", keyResponse.getName(), keyResponse.getId())); ImportKeyOptions options = new ImportKeyOptions("keyName", jsonWebKeyToImport) .setHardwareProtected(false); keyAsyncClient.importKey(options).subscribe(keyResponse -> System.out.printf("Key is imported with name %s and id %s \n", keyResponse.getName(), keyResponse.getId())); ImportKeyOptions importKeyOptions = new ImportKeyOptions("keyName", jsonWebKeyToImport) .setHardwareProtected(false); keyAsyncClient.importKeyWithResponse(importKeyOptions).subscribe(keyResponse -> System.out.printf("Key is imported with name %s and id %s \n", keyResponse.getValue().getName(), keyResponse.getValue().getId())); }
class KeyAsyncClientJavaDocCodeSnippets { private String key1 = "key1"; private String key2 = "key2"; private String value1 = "val1"; private String value2 = "val2"; /** * Generates code sample for creating a {@link KeyAsyncClient} * @return An instance of {@link KeyAsyncClient} */ public KeyAsyncClient createAsyncClientWithHttpClient() { KeyAsyncClient keyAsyncClient = new KeyClientBuilder() .vaultUrl("https: .httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)) .addPolicy(new KeyVaultCredentialPolicy(new DefaultAzureCredentialBuilder().build())) .httpClient(HttpClient.createDefault()) .buildAsyncClient(); return keyAsyncClient; } /** * Generates code sample for creating a {@link KeyAsyncClient} * @return An instance of {@link KeyAsyncClient} */ public KeyAsyncClient createAsyncClient() { KeyAsyncClient keyAsyncClient = new KeyClientBuilder() .vaultUrl("https: .credential(new DefaultAzureCredentialBuilder().build()) .buildAsyncClient(); return keyAsyncClient; } /** * Generates code sample for creating a {@link KeyAsyncClient} * @return An instance of {@link KeyAsyncClient} */ public KeyAsyncClient createAsyncClientWithPipeline() { HttpPipeline pipeline = new HttpPipelineBuilder() .policies(new KeyVaultCredentialPolicy(new DefaultAzureCredentialBuilder().build()), new RetryPolicy()) .build(); KeyAsyncClient keyAsyncClient = new KeyClientBuilder() .pipeline(pipeline) .vaultUrl("https: .buildAsyncClient(); return keyAsyncClient; } /** * Generates a code sample for using {@link KeyAsyncClient */ public void createKey() { KeyAsyncClient keyAsyncClient = createAsyncClient(); keyAsyncClient.createKey("keyName", KeyType.EC) .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(keyResponse -> System.out.printf("Key is created with name %s and id %s %n", keyResponse.getName(), keyResponse.getId())); CreateKeyOptions createKeyOptions = new CreateKeyOptions("keyName", KeyType.RSA) .setNotBefore(OffsetDateTime.now().plusDays(1)) .setExpiresOn(OffsetDateTime.now().plusYears(1)); keyAsyncClient.createKey(createKeyOptions) .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(keyResponse -> System.out.printf("Key is created with name %s and id %s %n", keyResponse.getName(), keyResponse.getId())); CreateRsaKeyOptions createRsaKeyOptions = new CreateRsaKeyOptions("keyName") .setKeySize(2048) .setNotBefore(OffsetDateTime.now().plusDays(1)) .setExpiresOn(OffsetDateTime.now().plusYears(1)); keyAsyncClient.createRsaKey(createRsaKeyOptions) .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(keyResponse -> System.out.printf("Key is created with name %s and id %s %n", keyResponse.getName(), keyResponse.getId())); CreateEcKeyOptions createEcKeyOptions = new CreateEcKeyOptions("keyName") .setCurveName(KeyCurveName.P_384) .setNotBefore(OffsetDateTime.now().plusDays(1)) .setExpiresOn(OffsetDateTime.now().plusYears(1)); keyAsyncClient.createEcKey(createEcKeyOptions) .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(keyResponse -> System.out.printf("Key is created with name %s and id %s %n", keyResponse.getName(), keyResponse.getId())); } /** * Generates a code sample for using {@link KeyAsyncClient */ public void deleteKeySnippets() { KeyAsyncClient keyAsyncClient = createAsyncClient(); keyAsyncClient.beginDeleteKey("keyName") .subscribe(pollResponse -> { System.out.println("Delete Status: " + pollResponse.getStatus().toString()); System.out.println("Delete Key Name: " + pollResponse.getValue().getName()); System.out.println("Key Delete Date: " + pollResponse.getValue().getDeletedOn().toString()); }); } /** * Generates a code sample for using {@link KeyAsyncClient */ /** * Generates a code sample for using {@link KeyAsyncClient */ public void getDeletedKeySnippets() { KeyAsyncClient keyAsyncClient = createAsyncClient(); keyAsyncClient.getDeletedKey("keyName") .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(keyResponse -> System.out.printf("Deleted Key's Recovery Id %s", keyResponse.getRecoveryId())); } /** * Generates a code sample for using {@link KeyAsyncClient */ public void createKeyWithResponses() { KeyAsyncClient keyAsyncClient = createAsyncClient(); CreateKeyOptions createKeyOptions = new CreateKeyOptions("keyName", KeyType.RSA) .setNotBefore(OffsetDateTime.now().plusDays(1)) .setExpiresOn(OffsetDateTime.now().plusYears(1)); keyAsyncClient.createKeyWithResponse(createKeyOptions) .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(keyResponse -> System.out.printf("Key is created with name %s and id %s %n", keyResponse.getValue().getName(), keyResponse.getValue().getId())); CreateRsaKeyOptions createRsaKeyOptions = new CreateRsaKeyOptions("keyName") .setKeySize(2048) .setNotBefore(OffsetDateTime.now().plusDays(1)) .setExpiresOn(OffsetDateTime.now().plusYears(1)); keyAsyncClient.createRsaKeyWithResponse(createRsaKeyOptions) .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(keyResponse -> System.out.printf("Key is created with name %s and id %s %n", keyResponse.getValue().getName(), keyResponse.getValue().getId())); CreateEcKeyOptions createEcKeyOptions = new CreateEcKeyOptions("keyName") .setCurveName(KeyCurveName.P_384) .setNotBefore(OffsetDateTime.now().plusDays(1)) .setExpiresOn(OffsetDateTime.now().plusYears(1)); keyAsyncClient.createEcKeyWithResponse(createEcKeyOptions) .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(keyResponse -> System.out.printf("Key is created with name %s and id %s %n", keyResponse.getValue().getName(), keyResponse.getValue().getId())); } /** * Generates a code sample for using {@link KeyAsyncClient */ public void getKeyWithResponseSnippets() { KeyAsyncClient keyAsyncClient = createAsyncClient(); String keyVersion = "6A385B124DEF4096AF1361A85B16C204"; keyAsyncClient.getKeyWithResponse("keyName", keyVersion) .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(keyResponse -> System.out.printf("Key is created with name %s and id %s %n", keyResponse.getValue().getName(), keyResponse.getValue().getId())); } /** * Generates a code sample for using {@link KeyAsyncClient */ public void getKeySnippets() { KeyAsyncClient keyAsyncClient = createAsyncClient(); String keyVersion = "6A385B124DEF4096AF1361A85B16C204"; keyAsyncClient.getKey("keyName", keyVersion) .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(keyResponse -> System.out.printf("Key is created with name %s and id %s %n", keyResponse.getName(), keyResponse.getId())); keyAsyncClient.getKey("keyName") .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(keyResponse -> System.out.printf("Key is created with name %s and id %s %n", keyResponse.getName(), keyResponse.getId())); } /** * Generates a code sample for using {@link KeyAsyncClient */ public void updateKeyWithResponseSnippets() { KeyAsyncClient keyAsyncClient = createAsyncClient(); keyAsyncClient.getKey("keyName") .subscribe(keyResponse -> { keyResponse.getProperties().setNotBefore(OffsetDateTime.now().plusDays(50)); keyAsyncClient.updateKeyPropertiesWithResponse(keyResponse.getProperties(), KeyOperation.ENCRYPT, KeyOperation.DECRYPT) .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(updatedKeyResponse -> System.out.printf("Key's updated not before time %s %n", updatedKeyResponse.getValue().getProperties().getNotBefore().toString())); }); } /** * Generates a code sample for using {@link KeyAsyncClient */ public void updateKeySnippets() { KeyAsyncClient keyAsyncClient = createAsyncClient(); keyAsyncClient.getKey("keyName") .subscribe(keyResponse -> { keyResponse.getProperties().setNotBefore(OffsetDateTime.now().plusDays(50)); keyAsyncClient.updateKeyProperties(keyResponse.getProperties(), KeyOperation.ENCRYPT, KeyOperation.DECRYPT) .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(updatedKeyResponse -> System.out.printf("Key's updated not before time %s %n", updatedKeyResponse.getProperties().getNotBefore().toString())); }); keyAsyncClient.getKey("keyName") .subscribe(keyResponse -> { keyResponse.getProperties().setNotBefore(OffsetDateTime.now().plusDays(50)); keyAsyncClient.updateKeyProperties(keyResponse.getProperties()) .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(updatedKeyResponse -> System.out.printf("Key's updated not before time %s %n", updatedKeyResponse.getProperties().getNotBefore().toString())); }); } /** * Generates a code sample for using {@link KeyAsyncClient */ public void getDeleteKeyWithResponseSnippets() { KeyAsyncClient keyAsyncClient = createAsyncClient(); keyAsyncClient.getDeletedKeyWithResponse("keyName") .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(deletedKeyResponse -> System.out.printf("Deleted Key's Recovery Id %s", deletedKeyResponse.getValue().getRecoveryId())); } /** * Generates a code sample for using {@link KeyAsyncClient */ public void purgeDeletedKeySnippets() { KeyAsyncClient keyAsyncClient = createAsyncClient(); keyAsyncClient.purgeDeletedKey("deletedKeyName") .subscribe(purgeResponse -> System.out.println("Successfully Purged deleted Key")); } /** * Generates a code sample for using {@link KeyAsyncClient */ public void purgeDeletedKeyWithResponseSnippets() { KeyAsyncClient keyAsyncClient = createAsyncClient(); keyAsyncClient.purgeDeletedKeyWithResponse("deletedKeyName") .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(purgeResponse -> System.out.printf("Purge Status response %d %n", purgeResponse.getStatusCode())); } /** * Generates a code sample for using {@link KeyAsyncClient */ public void recoverDeletedKeySnippets() { KeyAsyncClient keyAsyncClient = createAsyncClient(); keyAsyncClient.beginRecoverDeletedKey("deletedKeyName") .subscribe(pollResponse -> { System.out.println("Recovery Status: " + pollResponse.getStatus().toString()); System.out.println("Recover Key Name: " + pollResponse.getValue().getName()); System.out.println("Recover Key Type: " + pollResponse.getValue().getKeyType()); }); } /** * Generates a code sample for using {@link KeyAsyncClient */ public void backupKeySnippets() { KeyAsyncClient keyAsyncClient = createAsyncClient(); keyAsyncClient.backupKey("keyName") .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(keyBackupResponse -> System.out.printf("Key's Backup Byte array's length %s %n", keyBackupResponse.length)); } /** * Generates a code sample for using {@link KeyAsyncClient */ public void backupKeyWithResponseSnippets() { KeyAsyncClient keyAsyncClient = createAsyncClient(); keyAsyncClient.backupKeyWithResponse("keyName") .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(keyBackupResponse -> System.out.printf("Key's Backup Byte array's length %s %n", keyBackupResponse.getValue().length)); } /** * Generates a code sample for using {@link KeyAsyncClient */ public void restoreKeySnippets() { KeyAsyncClient keyAsyncClient = createAsyncClient(); byte[] keyBackupByteArray = {}; keyAsyncClient.restoreKeyBackup(keyBackupByteArray) .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(keyResponse -> System.out.printf("Restored Key with name %s and id %s %n", keyResponse.getName(), keyResponse.getId())); } /** * Generates a code sample for using {@link KeyAsyncClient */ public void restoreKeyWithResponseSnippets() { KeyAsyncClient keyAsyncClient = createAsyncClient(); byte[] keyBackupByteArray = {}; keyAsyncClient.restoreKeyBackupWithResponse(keyBackupByteArray) .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(keyResponse -> System.out.printf("Restored Key with name %s and id %s %n", keyResponse.getValue().getName(), keyResponse.getValue().getId())); } /** * Generates a code sample for using {@link KeyAsyncClient */ public void listKeySnippets() { KeyAsyncClient keyAsyncClient = createAsyncClient(); keyAsyncClient.listPropertiesOfKeys() .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(keyProperties -> keyAsyncClient.getKey(keyProperties.getName(), keyProperties.getVersion()) .subscribe(keyResponse -> System.out.printf("Received key with name %s and type %s", keyResponse.getName(), keyResponse.getKeyType()))); } /** * Generates a code sample for using {@link KeyAsyncClient */ public void listDeletedKeysSnippets() { KeyAsyncClient keyAsyncClient = createAsyncClient(); keyAsyncClient.listDeletedKeys() .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(deletedKey -> System.out.printf("Deleted key's recovery Id %s", deletedKey.getRecoveryId())); } /** * Generates code sample for using {@link KeyAsyncClient */ public void listKeyVersions() { KeyAsyncClient keyAsyncClient = createAsyncClient(); keyAsyncClient.listPropertiesOfKeyVersions("keyName") .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(keyProperties -> keyAsyncClient.getKey(keyProperties.getName(), keyProperties.getVersion()) .subscribe(keyResponse -> System.out.printf("Received key's version with name %s, type %s and version %s", keyResponse.getName(), keyResponse.getKeyType(), keyResponse.getProperties().getVersion()))); } /** * Implementation not provided for this method * @return {@code null} */ private TokenCredential getKeyVaultCredential() { return null; } }
class KeyAsyncClientJavaDocCodeSnippets { private String key1 = "key1"; private String key2 = "key2"; private String value1 = "val1"; private String value2 = "val2"; /** * Generates code sample for creating a {@link KeyAsyncClient} * @return An instance of {@link KeyAsyncClient} */ public KeyAsyncClient createAsyncClientWithHttpClient() { KeyAsyncClient keyAsyncClient = new KeyClientBuilder() .vaultUrl("https: .httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)) .addPolicy(new KeyVaultCredentialPolicy(new DefaultAzureCredentialBuilder().build())) .httpClient(HttpClient.createDefault()) .buildAsyncClient(); return keyAsyncClient; } /** * Generates code sample for creating a {@link KeyAsyncClient} * @return An instance of {@link KeyAsyncClient} */ public KeyAsyncClient createAsyncClient() { KeyAsyncClient keyAsyncClient = new KeyClientBuilder() .vaultUrl("https: .credential(new DefaultAzureCredentialBuilder().build()) .buildAsyncClient(); return keyAsyncClient; } /** * Generates code sample for creating a {@link KeyAsyncClient} * @return An instance of {@link KeyAsyncClient} */ public KeyAsyncClient createAsyncClientWithPipeline() { HttpPipeline pipeline = new HttpPipelineBuilder() .policies(new KeyVaultCredentialPolicy(new DefaultAzureCredentialBuilder().build()), new RetryPolicy()) .build(); KeyAsyncClient keyAsyncClient = new KeyClientBuilder() .pipeline(pipeline) .vaultUrl("https: .buildAsyncClient(); return keyAsyncClient; } /** * Generates a code sample for using {@link KeyAsyncClient */ public void createKey() { KeyAsyncClient keyAsyncClient = createAsyncClient(); keyAsyncClient.createKey("keyName", KeyType.EC) .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(keyResponse -> System.out.printf("Key is created with name %s and id %s %n", keyResponse.getName(), keyResponse.getId())); CreateKeyOptions createKeyOptions = new CreateKeyOptions("keyName", KeyType.RSA) .setNotBefore(OffsetDateTime.now().plusDays(1)) .setExpiresOn(OffsetDateTime.now().plusYears(1)); keyAsyncClient.createKey(createKeyOptions) .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(keyResponse -> System.out.printf("Key is created with name %s and id %s %n", keyResponse.getName(), keyResponse.getId())); CreateRsaKeyOptions createRsaKeyOptions = new CreateRsaKeyOptions("keyName") .setKeySize(2048) .setNotBefore(OffsetDateTime.now().plusDays(1)) .setExpiresOn(OffsetDateTime.now().plusYears(1)); keyAsyncClient.createRsaKey(createRsaKeyOptions) .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(keyResponse -> System.out.printf("Key is created with name %s and id %s %n", keyResponse.getName(), keyResponse.getId())); CreateEcKeyOptions createEcKeyOptions = new CreateEcKeyOptions("keyName") .setCurveName(KeyCurveName.P_384) .setNotBefore(OffsetDateTime.now().plusDays(1)) .setExpiresOn(OffsetDateTime.now().plusYears(1)); keyAsyncClient.createEcKey(createEcKeyOptions) .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(keyResponse -> System.out.printf("Key is created with name %s and id %s %n", keyResponse.getName(), keyResponse.getId())); } /** * Generates a code sample for using {@link KeyAsyncClient */ public void deleteKeySnippets() { KeyAsyncClient keyAsyncClient = createAsyncClient(); keyAsyncClient.beginDeleteKey("keyName") .subscribe(pollResponse -> { System.out.println("Delete Status: " + pollResponse.getStatus().toString()); System.out.println("Delete Key Name: " + pollResponse.getValue().getName()); System.out.println("Key Delete Date: " + pollResponse.getValue().getDeletedOn().toString()); }); } /** * Generates a code sample for using {@link KeyAsyncClient */ /** * Generates a code sample for using {@link KeyAsyncClient */ public void getDeletedKeySnippets() { KeyAsyncClient keyAsyncClient = createAsyncClient(); keyAsyncClient.getDeletedKey("keyName") .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(keyResponse -> System.out.printf("Deleted Key's Recovery Id %s", keyResponse.getRecoveryId())); } /** * Generates a code sample for using {@link KeyAsyncClient */ public void createKeyWithResponses() { KeyAsyncClient keyAsyncClient = createAsyncClient(); CreateKeyOptions createKeyOptions = new CreateKeyOptions("keyName", KeyType.RSA) .setNotBefore(OffsetDateTime.now().plusDays(1)) .setExpiresOn(OffsetDateTime.now().plusYears(1)); keyAsyncClient.createKeyWithResponse(createKeyOptions) .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(keyResponse -> System.out.printf("Key is created with name %s and id %s %n", keyResponse.getValue().getName(), keyResponse.getValue().getId())); CreateRsaKeyOptions createRsaKeyOptions = new CreateRsaKeyOptions("keyName") .setKeySize(2048) .setNotBefore(OffsetDateTime.now().plusDays(1)) .setExpiresOn(OffsetDateTime.now().plusYears(1)); keyAsyncClient.createRsaKeyWithResponse(createRsaKeyOptions) .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(keyResponse -> System.out.printf("Key is created with name %s and id %s %n", keyResponse.getValue().getName(), keyResponse.getValue().getId())); CreateEcKeyOptions createEcKeyOptions = new CreateEcKeyOptions("keyName") .setCurveName(KeyCurveName.P_384) .setNotBefore(OffsetDateTime.now().plusDays(1)) .setExpiresOn(OffsetDateTime.now().plusYears(1)); keyAsyncClient.createEcKeyWithResponse(createEcKeyOptions) .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(keyResponse -> System.out.printf("Key is created with name %s and id %s %n", keyResponse.getValue().getName(), keyResponse.getValue().getId())); } /** * Generates a code sample for using {@link KeyAsyncClient */ public void getKeyWithResponseSnippets() { KeyAsyncClient keyAsyncClient = createAsyncClient(); String keyVersion = "6A385B124DEF4096AF1361A85B16C204"; keyAsyncClient.getKeyWithResponse("keyName", keyVersion) .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(keyResponse -> System.out.printf("Key is created with name %s and id %s %n", keyResponse.getValue().getName(), keyResponse.getValue().getId())); } /** * Generates a code sample for using {@link KeyAsyncClient */ public void getKeySnippets() { KeyAsyncClient keyAsyncClient = createAsyncClient(); String keyVersion = "6A385B124DEF4096AF1361A85B16C204"; keyAsyncClient.getKey("keyName", keyVersion) .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(keyResponse -> System.out.printf("Key is created with name %s and id %s %n", keyResponse.getName(), keyResponse.getId())); keyAsyncClient.getKey("keyName") .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(keyResponse -> System.out.printf("Key is created with name %s and id %s %n", keyResponse.getName(), keyResponse.getId())); } /** * Generates a code sample for using {@link KeyAsyncClient */ public void updateKeyWithResponseSnippets() { KeyAsyncClient keyAsyncClient = createAsyncClient(); keyAsyncClient.getKey("keyName") .subscribe(keyResponse -> { keyResponse.getProperties().setNotBefore(OffsetDateTime.now().plusDays(50)); keyAsyncClient.updateKeyPropertiesWithResponse(keyResponse.getProperties(), KeyOperation.ENCRYPT, KeyOperation.DECRYPT) .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(updatedKeyResponse -> System.out.printf("Key's updated not before time %s %n", updatedKeyResponse.getValue().getProperties().getNotBefore().toString())); }); } /** * Generates a code sample for using {@link KeyAsyncClient */ public void updateKeySnippets() { KeyAsyncClient keyAsyncClient = createAsyncClient(); keyAsyncClient.getKey("keyName") .subscribe(keyResponse -> { keyResponse.getProperties().setNotBefore(OffsetDateTime.now().plusDays(50)); keyAsyncClient.updateKeyProperties(keyResponse.getProperties(), KeyOperation.ENCRYPT, KeyOperation.DECRYPT) .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(updatedKeyResponse -> System.out.printf("Key's updated not before time %s %n", updatedKeyResponse.getProperties().getNotBefore().toString())); }); keyAsyncClient.getKey("keyName") .subscribe(keyResponse -> { keyResponse.getProperties().setNotBefore(OffsetDateTime.now().plusDays(50)); keyAsyncClient.updateKeyProperties(keyResponse.getProperties()) .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(updatedKeyResponse -> System.out.printf("Key's updated not before time %s %n", updatedKeyResponse.getProperties().getNotBefore().toString())); }); } /** * Generates a code sample for using {@link KeyAsyncClient */ public void getDeleteKeyWithResponseSnippets() { KeyAsyncClient keyAsyncClient = createAsyncClient(); keyAsyncClient.getDeletedKeyWithResponse("keyName") .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(deletedKeyResponse -> System.out.printf("Deleted Key's Recovery Id %s", deletedKeyResponse.getValue().getRecoveryId())); } /** * Generates a code sample for using {@link KeyAsyncClient */ public void purgeDeletedKeySnippets() { KeyAsyncClient keyAsyncClient = createAsyncClient(); keyAsyncClient.purgeDeletedKey("deletedKeyName") .subscribe(purgeResponse -> System.out.println("Successfully Purged deleted Key")); } /** * Generates a code sample for using {@link KeyAsyncClient */ public void purgeDeletedKeyWithResponseSnippets() { KeyAsyncClient keyAsyncClient = createAsyncClient(); keyAsyncClient.purgeDeletedKeyWithResponse("deletedKeyName") .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(purgeResponse -> System.out.printf("Purge Status response %d %n", purgeResponse.getStatusCode())); } /** * Generates a code sample for using {@link KeyAsyncClient */ public void recoverDeletedKeySnippets() { KeyAsyncClient keyAsyncClient = createAsyncClient(); keyAsyncClient.beginRecoverDeletedKey("deletedKeyName") .subscribe(pollResponse -> { System.out.println("Recovery Status: " + pollResponse.getStatus().toString()); System.out.println("Recover Key Name: " + pollResponse.getValue().getName()); System.out.println("Recover Key Type: " + pollResponse.getValue().getKeyType()); }); } /** * Generates a code sample for using {@link KeyAsyncClient */ public void backupKeySnippets() { KeyAsyncClient keyAsyncClient = createAsyncClient(); keyAsyncClient.backupKey("keyName") .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(keyBackupResponse -> System.out.printf("Key's Backup Byte array's length %s %n", keyBackupResponse.length)); } /** * Generates a code sample for using {@link KeyAsyncClient */ public void backupKeyWithResponseSnippets() { KeyAsyncClient keyAsyncClient = createAsyncClient(); keyAsyncClient.backupKeyWithResponse("keyName") .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(keyBackupResponse -> System.out.printf("Key's Backup Byte array's length %s %n", keyBackupResponse.getValue().length)); } /** * Generates a code sample for using {@link KeyAsyncClient */ public void restoreKeySnippets() { KeyAsyncClient keyAsyncClient = createAsyncClient(); byte[] keyBackupByteArray = {}; keyAsyncClient.restoreKeyBackup(keyBackupByteArray) .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(keyResponse -> System.out.printf("Restored Key with name %s and id %s %n", keyResponse.getName(), keyResponse.getId())); } /** * Generates a code sample for using {@link KeyAsyncClient */ public void restoreKeyWithResponseSnippets() { KeyAsyncClient keyAsyncClient = createAsyncClient(); byte[] keyBackupByteArray = {}; keyAsyncClient.restoreKeyBackupWithResponse(keyBackupByteArray) .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(keyResponse -> System.out.printf("Restored Key with name %s and id %s %n", keyResponse.getValue().getName(), keyResponse.getValue().getId())); } /** * Generates a code sample for using {@link KeyAsyncClient */ public void listKeySnippets() { KeyAsyncClient keyAsyncClient = createAsyncClient(); keyAsyncClient.listPropertiesOfKeys() .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(keyProperties -> keyAsyncClient.getKey(keyProperties.getName(), keyProperties.getVersion()) .subscribe(keyResponse -> System.out.printf("Received key with name %s and type %s", keyResponse.getName(), keyResponse.getKeyType()))); } /** * Generates a code sample for using {@link KeyAsyncClient */ public void listDeletedKeysSnippets() { KeyAsyncClient keyAsyncClient = createAsyncClient(); keyAsyncClient.listDeletedKeys() .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(deletedKey -> System.out.printf("Deleted key's recovery Id %s", deletedKey.getRecoveryId())); } /** * Generates code sample for using {@link KeyAsyncClient */ public void listKeyVersions() { KeyAsyncClient keyAsyncClient = createAsyncClient(); keyAsyncClient.listPropertiesOfKeyVersions("keyName") .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(keyProperties -> keyAsyncClient.getKey(keyProperties.getName(), keyProperties.getVersion()) .subscribe(keyResponse -> System.out.printf("Received key's version with name %s, type %s and version %s", keyResponse.getName(), keyResponse.getKeyType(), keyResponse.getProperties().getVersion()))); } /** * Implementation not provided for this method * @return {@code null} */ private TokenCredential getKeyVaultCredential() { return null; } }
good catch, updated.
public void importKeySnippets() { KeyAsyncClient keyAsyncClient = createAsyncClient(); JsonWebKey jsonWebKeyToImport = new JsonWebKey(); keyAsyncClient.importKey("keyName", jsonWebKeyToImport).subscribe(keyResponse -> System.out.printf("Key is imported with name %s and id %s \n", keyResponse.getName(), keyResponse.getId())); ImportKeyOptions options = new ImportKeyOptions("keyName", jsonWebKeyToImport) .setHardwareProtected(false); keyAsyncClient.importKey(options).subscribe(keyResponse -> System.out.printf("Key is imported with name %s and id %s \n", keyResponse.getName(), keyResponse.getId())); ImportKeyOptions importKeyOptions = new ImportKeyOptions("keyName", jsonWebKeyToImport) .setHardwareProtected(false); keyAsyncClient.importKey(importKeyOptions).subscribe(keyResponse -> System.out.printf("Key is imported with name %s and id %s \n", keyResponse.getName(), keyResponse.getId())); }
keyAsyncClient.importKey(importKeyOptions).subscribe(keyResponse ->
public void importKeySnippets() { KeyAsyncClient keyAsyncClient = createAsyncClient(); JsonWebKey jsonWebKeyToImport = new JsonWebKey(); keyAsyncClient.importKey("keyName", jsonWebKeyToImport).subscribe(keyResponse -> System.out.printf("Key is imported with name %s and id %s \n", keyResponse.getName(), keyResponse.getId())); ImportKeyOptions options = new ImportKeyOptions("keyName", jsonWebKeyToImport) .setHardwareProtected(false); keyAsyncClient.importKey(options).subscribe(keyResponse -> System.out.printf("Key is imported with name %s and id %s \n", keyResponse.getName(), keyResponse.getId())); ImportKeyOptions importKeyOptions = new ImportKeyOptions("keyName", jsonWebKeyToImport) .setHardwareProtected(false); keyAsyncClient.importKeyWithResponse(importKeyOptions).subscribe(keyResponse -> System.out.printf("Key is imported with name %s and id %s \n", keyResponse.getValue().getName(), keyResponse.getValue().getId())); }
class KeyAsyncClientJavaDocCodeSnippets { private String key1 = "key1"; private String key2 = "key2"; private String value1 = "val1"; private String value2 = "val2"; /** * Generates code sample for creating a {@link KeyAsyncClient} * @return An instance of {@link KeyAsyncClient} */ public KeyAsyncClient createAsyncClientWithHttpClient() { KeyAsyncClient keyAsyncClient = new KeyClientBuilder() .vaultUrl("https: .httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)) .addPolicy(new KeyVaultCredentialPolicy(new DefaultAzureCredentialBuilder().build())) .httpClient(HttpClient.createDefault()) .buildAsyncClient(); return keyAsyncClient; } /** * Generates code sample for creating a {@link KeyAsyncClient} * @return An instance of {@link KeyAsyncClient} */ public KeyAsyncClient createAsyncClient() { KeyAsyncClient keyAsyncClient = new KeyClientBuilder() .vaultUrl("https: .credential(new DefaultAzureCredentialBuilder().build()) .buildAsyncClient(); return keyAsyncClient; } /** * Generates code sample for creating a {@link KeyAsyncClient} * @return An instance of {@link KeyAsyncClient} */ public KeyAsyncClient createAsyncClientWithPipeline() { HttpPipeline pipeline = new HttpPipelineBuilder() .policies(new KeyVaultCredentialPolicy(new DefaultAzureCredentialBuilder().build()), new RetryPolicy()) .build(); KeyAsyncClient keyAsyncClient = new KeyClientBuilder() .pipeline(pipeline) .vaultUrl("https: .buildAsyncClient(); return keyAsyncClient; } /** * Generates a code sample for using {@link KeyAsyncClient */ public void createKey() { KeyAsyncClient keyAsyncClient = createAsyncClient(); keyAsyncClient.createKey("keyName", KeyType.EC) .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(keyResponse -> System.out.printf("Key is created with name %s and id %s %n", keyResponse.getName(), keyResponse.getId())); CreateKeyOptions createKeyOptions = new CreateKeyOptions("keyName", KeyType.RSA) .setNotBefore(OffsetDateTime.now().plusDays(1)) .setExpiresOn(OffsetDateTime.now().plusYears(1)); keyAsyncClient.createKey(createKeyOptions) .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(keyResponse -> System.out.printf("Key is created with name %s and id %s %n", keyResponse.getName(), keyResponse.getId())); CreateRsaKeyOptions createRsaKeyOptions = new CreateRsaKeyOptions("keyName") .setKeySize(2048) .setNotBefore(OffsetDateTime.now().plusDays(1)) .setExpiresOn(OffsetDateTime.now().plusYears(1)); keyAsyncClient.createRsaKey(createRsaKeyOptions) .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(keyResponse -> System.out.printf("Key is created with name %s and id %s %n", keyResponse.getName(), keyResponse.getId())); CreateEcKeyOptions createEcKeyOptions = new CreateEcKeyOptions("keyName") .setCurveName(KeyCurveName.P_384) .setNotBefore(OffsetDateTime.now().plusDays(1)) .setExpiresOn(OffsetDateTime.now().plusYears(1)); keyAsyncClient.createEcKey(createEcKeyOptions) .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(keyResponse -> System.out.printf("Key is created with name %s and id %s %n", keyResponse.getName(), keyResponse.getId())); } /** * Generates a code sample for using {@link KeyAsyncClient */ public void deleteKeySnippets() { KeyAsyncClient keyAsyncClient = createAsyncClient(); keyAsyncClient.beginDeleteKey("keyName") .subscribe(pollResponse -> { System.out.println("Delete Status: " + pollResponse.getStatus().toString()); System.out.println("Delete Key Name: " + pollResponse.getValue().getName()); System.out.println("Key Delete Date: " + pollResponse.getValue().getDeletedOn().toString()); }); } /** * Generates a code sample for using {@link KeyAsyncClient */ /** * Generates a code sample for using {@link KeyAsyncClient */ public void getDeletedKeySnippets() { KeyAsyncClient keyAsyncClient = createAsyncClient(); keyAsyncClient.getDeletedKey("keyName") .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(keyResponse -> System.out.printf("Deleted Key's Recovery Id %s", keyResponse.getRecoveryId())); } /** * Generates a code sample for using {@link KeyAsyncClient */ public void createKeyWithResponses() { KeyAsyncClient keyAsyncClient = createAsyncClient(); CreateKeyOptions createKeyOptions = new CreateKeyOptions("keyName", KeyType.RSA) .setNotBefore(OffsetDateTime.now().plusDays(1)) .setExpiresOn(OffsetDateTime.now().plusYears(1)); keyAsyncClient.createKeyWithResponse(createKeyOptions) .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(keyResponse -> System.out.printf("Key is created with name %s and id %s %n", keyResponse.getValue().getName(), keyResponse.getValue().getId())); CreateRsaKeyOptions createRsaKeyOptions = new CreateRsaKeyOptions("keyName") .setKeySize(2048) .setNotBefore(OffsetDateTime.now().plusDays(1)) .setExpiresOn(OffsetDateTime.now().plusYears(1)); keyAsyncClient.createRsaKeyWithResponse(createRsaKeyOptions) .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(keyResponse -> System.out.printf("Key is created with name %s and id %s %n", keyResponse.getValue().getName(), keyResponse.getValue().getId())); CreateEcKeyOptions createEcKeyOptions = new CreateEcKeyOptions("keyName") .setCurveName(KeyCurveName.P_384) .setNotBefore(OffsetDateTime.now().plusDays(1)) .setExpiresOn(OffsetDateTime.now().plusYears(1)); keyAsyncClient.createEcKeyWithResponse(createEcKeyOptions) .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(keyResponse -> System.out.printf("Key is created with name %s and id %s %n", keyResponse.getValue().getName(), keyResponse.getValue().getId())); } /** * Generates a code sample for using {@link KeyAsyncClient */ public void getKeyWithResponseSnippets() { KeyAsyncClient keyAsyncClient = createAsyncClient(); String keyVersion = "6A385B124DEF4096AF1361A85B16C204"; keyAsyncClient.getKeyWithResponse("keyName", keyVersion) .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(keyResponse -> System.out.printf("Key is created with name %s and id %s %n", keyResponse.getValue().getName(), keyResponse.getValue().getId())); } /** * Generates a code sample for using {@link KeyAsyncClient */ public void getKeySnippets() { KeyAsyncClient keyAsyncClient = createAsyncClient(); String keyVersion = "6A385B124DEF4096AF1361A85B16C204"; keyAsyncClient.getKey("keyName", keyVersion) .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(keyResponse -> System.out.printf("Key is created with name %s and id %s %n", keyResponse.getName(), keyResponse.getId())); keyAsyncClient.getKey("keyName") .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(keyResponse -> System.out.printf("Key is created with name %s and id %s %n", keyResponse.getName(), keyResponse.getId())); } /** * Generates a code sample for using {@link KeyAsyncClient */ public void updateKeyWithResponseSnippets() { KeyAsyncClient keyAsyncClient = createAsyncClient(); keyAsyncClient.getKey("keyName") .subscribe(keyResponse -> { keyResponse.getProperties().setNotBefore(OffsetDateTime.now().plusDays(50)); keyAsyncClient.updateKeyPropertiesWithResponse(keyResponse.getProperties(), KeyOperation.ENCRYPT, KeyOperation.DECRYPT) .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(updatedKeyResponse -> System.out.printf("Key's updated not before time %s %n", updatedKeyResponse.getValue().getProperties().getNotBefore().toString())); }); } /** * Generates a code sample for using {@link KeyAsyncClient */ public void updateKeySnippets() { KeyAsyncClient keyAsyncClient = createAsyncClient(); keyAsyncClient.getKey("keyName") .subscribe(keyResponse -> { keyResponse.getProperties().setNotBefore(OffsetDateTime.now().plusDays(50)); keyAsyncClient.updateKeyProperties(keyResponse.getProperties(), KeyOperation.ENCRYPT, KeyOperation.DECRYPT) .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(updatedKeyResponse -> System.out.printf("Key's updated not before time %s %n", updatedKeyResponse.getProperties().getNotBefore().toString())); }); keyAsyncClient.getKey("keyName") .subscribe(keyResponse -> { keyResponse.getProperties().setNotBefore(OffsetDateTime.now().plusDays(50)); keyAsyncClient.updateKeyProperties(keyResponse.getProperties()) .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(updatedKeyResponse -> System.out.printf("Key's updated not before time %s %n", updatedKeyResponse.getProperties().getNotBefore().toString())); }); } /** * Generates a code sample for using {@link KeyAsyncClient */ public void getDeleteKeyWithResponseSnippets() { KeyAsyncClient keyAsyncClient = createAsyncClient(); keyAsyncClient.getDeletedKeyWithResponse("keyName") .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(deletedKeyResponse -> System.out.printf("Deleted Key's Recovery Id %s", deletedKeyResponse.getValue().getRecoveryId())); } /** * Generates a code sample for using {@link KeyAsyncClient */ public void purgeDeletedKeySnippets() { KeyAsyncClient keyAsyncClient = createAsyncClient(); keyAsyncClient.purgeDeletedKey("deletedKeyName") .subscribe(purgeResponse -> System.out.println("Successfully Purged deleted Key")); } /** * Generates a code sample for using {@link KeyAsyncClient */ public void purgeDeletedKeyWithResponseSnippets() { KeyAsyncClient keyAsyncClient = createAsyncClient(); keyAsyncClient.purgeDeletedKeyWithResponse("deletedKeyName") .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(purgeResponse -> System.out.printf("Purge Status response %d %n", purgeResponse.getStatusCode())); } /** * Generates a code sample for using {@link KeyAsyncClient */ public void recoverDeletedKeySnippets() { KeyAsyncClient keyAsyncClient = createAsyncClient(); keyAsyncClient.beginRecoverDeletedKey("deletedKeyName") .subscribe(pollResponse -> { System.out.println("Recovery Status: " + pollResponse.getStatus().toString()); System.out.println("Recover Key Name: " + pollResponse.getValue().getName()); System.out.println("Recover Key Type: " + pollResponse.getValue().getKeyType()); }); } /** * Generates a code sample for using {@link KeyAsyncClient */ public void backupKeySnippets() { KeyAsyncClient keyAsyncClient = createAsyncClient(); keyAsyncClient.backupKey("keyName") .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(keyBackupResponse -> System.out.printf("Key's Backup Byte array's length %s %n", keyBackupResponse.length)); } /** * Generates a code sample for using {@link KeyAsyncClient */ public void backupKeyWithResponseSnippets() { KeyAsyncClient keyAsyncClient = createAsyncClient(); keyAsyncClient.backupKeyWithResponse("keyName") .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(keyBackupResponse -> System.out.printf("Key's Backup Byte array's length %s %n", keyBackupResponse.getValue().length)); } /** * Generates a code sample for using {@link KeyAsyncClient */ public void restoreKeySnippets() { KeyAsyncClient keyAsyncClient = createAsyncClient(); byte[] keyBackupByteArray = {}; keyAsyncClient.restoreKeyBackup(keyBackupByteArray) .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(keyResponse -> System.out.printf("Restored Key with name %s and id %s %n", keyResponse.getName(), keyResponse.getId())); } /** * Generates a code sample for using {@link KeyAsyncClient */ public void restoreKeyWithResponseSnippets() { KeyAsyncClient keyAsyncClient = createAsyncClient(); byte[] keyBackupByteArray = {}; keyAsyncClient.restoreKeyBackupWithResponse(keyBackupByteArray) .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(keyResponse -> System.out.printf("Restored Key with name %s and id %s %n", keyResponse.getValue().getName(), keyResponse.getValue().getId())); } /** * Generates a code sample for using {@link KeyAsyncClient */ public void listKeySnippets() { KeyAsyncClient keyAsyncClient = createAsyncClient(); keyAsyncClient.listPropertiesOfKeys() .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(keyProperties -> keyAsyncClient.getKey(keyProperties.getName(), keyProperties.getVersion()) .subscribe(keyResponse -> System.out.printf("Received key with name %s and type %s", keyResponse.getName(), keyResponse.getKeyType()))); } /** * Generates a code sample for using {@link KeyAsyncClient */ public void listDeletedKeysSnippets() { KeyAsyncClient keyAsyncClient = createAsyncClient(); keyAsyncClient.listDeletedKeys() .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(deletedKey -> System.out.printf("Deleted key's recovery Id %s", deletedKey.getRecoveryId())); } /** * Generates code sample for using {@link KeyAsyncClient */ public void listKeyVersions() { KeyAsyncClient keyAsyncClient = createAsyncClient(); keyAsyncClient.listPropertiesOfKeyVersions("keyName") .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(keyProperties -> keyAsyncClient.getKey(keyProperties.getName(), keyProperties.getVersion()) .subscribe(keyResponse -> System.out.printf("Received key's version with name %s, type %s and version %s", keyResponse.getName(), keyResponse.getKeyType(), keyResponse.getProperties().getVersion()))); } /** * Implementation not provided for this method * @return {@code null} */ private TokenCredential getKeyVaultCredential() { return null; } }
class KeyAsyncClientJavaDocCodeSnippets { private String key1 = "key1"; private String key2 = "key2"; private String value1 = "val1"; private String value2 = "val2"; /** * Generates code sample for creating a {@link KeyAsyncClient} * @return An instance of {@link KeyAsyncClient} */ public KeyAsyncClient createAsyncClientWithHttpClient() { KeyAsyncClient keyAsyncClient = new KeyClientBuilder() .vaultUrl("https: .httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)) .addPolicy(new KeyVaultCredentialPolicy(new DefaultAzureCredentialBuilder().build())) .httpClient(HttpClient.createDefault()) .buildAsyncClient(); return keyAsyncClient; } /** * Generates code sample for creating a {@link KeyAsyncClient} * @return An instance of {@link KeyAsyncClient} */ public KeyAsyncClient createAsyncClient() { KeyAsyncClient keyAsyncClient = new KeyClientBuilder() .vaultUrl("https: .credential(new DefaultAzureCredentialBuilder().build()) .buildAsyncClient(); return keyAsyncClient; } /** * Generates code sample for creating a {@link KeyAsyncClient} * @return An instance of {@link KeyAsyncClient} */ public KeyAsyncClient createAsyncClientWithPipeline() { HttpPipeline pipeline = new HttpPipelineBuilder() .policies(new KeyVaultCredentialPolicy(new DefaultAzureCredentialBuilder().build()), new RetryPolicy()) .build(); KeyAsyncClient keyAsyncClient = new KeyClientBuilder() .pipeline(pipeline) .vaultUrl("https: .buildAsyncClient(); return keyAsyncClient; } /** * Generates a code sample for using {@link KeyAsyncClient */ public void createKey() { KeyAsyncClient keyAsyncClient = createAsyncClient(); keyAsyncClient.createKey("keyName", KeyType.EC) .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(keyResponse -> System.out.printf("Key is created with name %s and id %s %n", keyResponse.getName(), keyResponse.getId())); CreateKeyOptions createKeyOptions = new CreateKeyOptions("keyName", KeyType.RSA) .setNotBefore(OffsetDateTime.now().plusDays(1)) .setExpiresOn(OffsetDateTime.now().plusYears(1)); keyAsyncClient.createKey(createKeyOptions) .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(keyResponse -> System.out.printf("Key is created with name %s and id %s %n", keyResponse.getName(), keyResponse.getId())); CreateRsaKeyOptions createRsaKeyOptions = new CreateRsaKeyOptions("keyName") .setKeySize(2048) .setNotBefore(OffsetDateTime.now().plusDays(1)) .setExpiresOn(OffsetDateTime.now().plusYears(1)); keyAsyncClient.createRsaKey(createRsaKeyOptions) .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(keyResponse -> System.out.printf("Key is created with name %s and id %s %n", keyResponse.getName(), keyResponse.getId())); CreateEcKeyOptions createEcKeyOptions = new CreateEcKeyOptions("keyName") .setCurveName(KeyCurveName.P_384) .setNotBefore(OffsetDateTime.now().plusDays(1)) .setExpiresOn(OffsetDateTime.now().plusYears(1)); keyAsyncClient.createEcKey(createEcKeyOptions) .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(keyResponse -> System.out.printf("Key is created with name %s and id %s %n", keyResponse.getName(), keyResponse.getId())); } /** * Generates a code sample for using {@link KeyAsyncClient */ public void deleteKeySnippets() { KeyAsyncClient keyAsyncClient = createAsyncClient(); keyAsyncClient.beginDeleteKey("keyName") .subscribe(pollResponse -> { System.out.println("Delete Status: " + pollResponse.getStatus().toString()); System.out.println("Delete Key Name: " + pollResponse.getValue().getName()); System.out.println("Key Delete Date: " + pollResponse.getValue().getDeletedOn().toString()); }); } /** * Generates a code sample for using {@link KeyAsyncClient */ /** * Generates a code sample for using {@link KeyAsyncClient */ public void getDeletedKeySnippets() { KeyAsyncClient keyAsyncClient = createAsyncClient(); keyAsyncClient.getDeletedKey("keyName") .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(keyResponse -> System.out.printf("Deleted Key's Recovery Id %s", keyResponse.getRecoveryId())); } /** * Generates a code sample for using {@link KeyAsyncClient */ public void createKeyWithResponses() { KeyAsyncClient keyAsyncClient = createAsyncClient(); CreateKeyOptions createKeyOptions = new CreateKeyOptions("keyName", KeyType.RSA) .setNotBefore(OffsetDateTime.now().plusDays(1)) .setExpiresOn(OffsetDateTime.now().plusYears(1)); keyAsyncClient.createKeyWithResponse(createKeyOptions) .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(keyResponse -> System.out.printf("Key is created with name %s and id %s %n", keyResponse.getValue().getName(), keyResponse.getValue().getId())); CreateRsaKeyOptions createRsaKeyOptions = new CreateRsaKeyOptions("keyName") .setKeySize(2048) .setNotBefore(OffsetDateTime.now().plusDays(1)) .setExpiresOn(OffsetDateTime.now().plusYears(1)); keyAsyncClient.createRsaKeyWithResponse(createRsaKeyOptions) .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(keyResponse -> System.out.printf("Key is created with name %s and id %s %n", keyResponse.getValue().getName(), keyResponse.getValue().getId())); CreateEcKeyOptions createEcKeyOptions = new CreateEcKeyOptions("keyName") .setCurveName(KeyCurveName.P_384) .setNotBefore(OffsetDateTime.now().plusDays(1)) .setExpiresOn(OffsetDateTime.now().plusYears(1)); keyAsyncClient.createEcKeyWithResponse(createEcKeyOptions) .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(keyResponse -> System.out.printf("Key is created with name %s and id %s %n", keyResponse.getValue().getName(), keyResponse.getValue().getId())); } /** * Generates a code sample for using {@link KeyAsyncClient */ public void getKeyWithResponseSnippets() { KeyAsyncClient keyAsyncClient = createAsyncClient(); String keyVersion = "6A385B124DEF4096AF1361A85B16C204"; keyAsyncClient.getKeyWithResponse("keyName", keyVersion) .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(keyResponse -> System.out.printf("Key is created with name %s and id %s %n", keyResponse.getValue().getName(), keyResponse.getValue().getId())); } /** * Generates a code sample for using {@link KeyAsyncClient */ public void getKeySnippets() { KeyAsyncClient keyAsyncClient = createAsyncClient(); String keyVersion = "6A385B124DEF4096AF1361A85B16C204"; keyAsyncClient.getKey("keyName", keyVersion) .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(keyResponse -> System.out.printf("Key is created with name %s and id %s %n", keyResponse.getName(), keyResponse.getId())); keyAsyncClient.getKey("keyName") .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(keyResponse -> System.out.printf("Key is created with name %s and id %s %n", keyResponse.getName(), keyResponse.getId())); } /** * Generates a code sample for using {@link KeyAsyncClient */ public void updateKeyWithResponseSnippets() { KeyAsyncClient keyAsyncClient = createAsyncClient(); keyAsyncClient.getKey("keyName") .subscribe(keyResponse -> { keyResponse.getProperties().setNotBefore(OffsetDateTime.now().plusDays(50)); keyAsyncClient.updateKeyPropertiesWithResponse(keyResponse.getProperties(), KeyOperation.ENCRYPT, KeyOperation.DECRYPT) .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(updatedKeyResponse -> System.out.printf("Key's updated not before time %s %n", updatedKeyResponse.getValue().getProperties().getNotBefore().toString())); }); } /** * Generates a code sample for using {@link KeyAsyncClient */ public void updateKeySnippets() { KeyAsyncClient keyAsyncClient = createAsyncClient(); keyAsyncClient.getKey("keyName") .subscribe(keyResponse -> { keyResponse.getProperties().setNotBefore(OffsetDateTime.now().plusDays(50)); keyAsyncClient.updateKeyProperties(keyResponse.getProperties(), KeyOperation.ENCRYPT, KeyOperation.DECRYPT) .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(updatedKeyResponse -> System.out.printf("Key's updated not before time %s %n", updatedKeyResponse.getProperties().getNotBefore().toString())); }); keyAsyncClient.getKey("keyName") .subscribe(keyResponse -> { keyResponse.getProperties().setNotBefore(OffsetDateTime.now().plusDays(50)); keyAsyncClient.updateKeyProperties(keyResponse.getProperties()) .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(updatedKeyResponse -> System.out.printf("Key's updated not before time %s %n", updatedKeyResponse.getProperties().getNotBefore().toString())); }); } /** * Generates a code sample for using {@link KeyAsyncClient */ public void getDeleteKeyWithResponseSnippets() { KeyAsyncClient keyAsyncClient = createAsyncClient(); keyAsyncClient.getDeletedKeyWithResponse("keyName") .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(deletedKeyResponse -> System.out.printf("Deleted Key's Recovery Id %s", deletedKeyResponse.getValue().getRecoveryId())); } /** * Generates a code sample for using {@link KeyAsyncClient */ public void purgeDeletedKeySnippets() { KeyAsyncClient keyAsyncClient = createAsyncClient(); keyAsyncClient.purgeDeletedKey("deletedKeyName") .subscribe(purgeResponse -> System.out.println("Successfully Purged deleted Key")); } /** * Generates a code sample for using {@link KeyAsyncClient */ public void purgeDeletedKeyWithResponseSnippets() { KeyAsyncClient keyAsyncClient = createAsyncClient(); keyAsyncClient.purgeDeletedKeyWithResponse("deletedKeyName") .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(purgeResponse -> System.out.printf("Purge Status response %d %n", purgeResponse.getStatusCode())); } /** * Generates a code sample for using {@link KeyAsyncClient */ public void recoverDeletedKeySnippets() { KeyAsyncClient keyAsyncClient = createAsyncClient(); keyAsyncClient.beginRecoverDeletedKey("deletedKeyName") .subscribe(pollResponse -> { System.out.println("Recovery Status: " + pollResponse.getStatus().toString()); System.out.println("Recover Key Name: " + pollResponse.getValue().getName()); System.out.println("Recover Key Type: " + pollResponse.getValue().getKeyType()); }); } /** * Generates a code sample for using {@link KeyAsyncClient */ public void backupKeySnippets() { KeyAsyncClient keyAsyncClient = createAsyncClient(); keyAsyncClient.backupKey("keyName") .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(keyBackupResponse -> System.out.printf("Key's Backup Byte array's length %s %n", keyBackupResponse.length)); } /** * Generates a code sample for using {@link KeyAsyncClient */ public void backupKeyWithResponseSnippets() { KeyAsyncClient keyAsyncClient = createAsyncClient(); keyAsyncClient.backupKeyWithResponse("keyName") .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(keyBackupResponse -> System.out.printf("Key's Backup Byte array's length %s %n", keyBackupResponse.getValue().length)); } /** * Generates a code sample for using {@link KeyAsyncClient */ public void restoreKeySnippets() { KeyAsyncClient keyAsyncClient = createAsyncClient(); byte[] keyBackupByteArray = {}; keyAsyncClient.restoreKeyBackup(keyBackupByteArray) .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(keyResponse -> System.out.printf("Restored Key with name %s and id %s %n", keyResponse.getName(), keyResponse.getId())); } /** * Generates a code sample for using {@link KeyAsyncClient */ public void restoreKeyWithResponseSnippets() { KeyAsyncClient keyAsyncClient = createAsyncClient(); byte[] keyBackupByteArray = {}; keyAsyncClient.restoreKeyBackupWithResponse(keyBackupByteArray) .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(keyResponse -> System.out.printf("Restored Key with name %s and id %s %n", keyResponse.getValue().getName(), keyResponse.getValue().getId())); } /** * Generates a code sample for using {@link KeyAsyncClient */ public void listKeySnippets() { KeyAsyncClient keyAsyncClient = createAsyncClient(); keyAsyncClient.listPropertiesOfKeys() .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(keyProperties -> keyAsyncClient.getKey(keyProperties.getName(), keyProperties.getVersion()) .subscribe(keyResponse -> System.out.printf("Received key with name %s and type %s", keyResponse.getName(), keyResponse.getKeyType()))); } /** * Generates a code sample for using {@link KeyAsyncClient */ public void listDeletedKeysSnippets() { KeyAsyncClient keyAsyncClient = createAsyncClient(); keyAsyncClient.listDeletedKeys() .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(deletedKey -> System.out.printf("Deleted key's recovery Id %s", deletedKey.getRecoveryId())); } /** * Generates code sample for using {@link KeyAsyncClient */ public void listKeyVersions() { KeyAsyncClient keyAsyncClient = createAsyncClient(); keyAsyncClient.listPropertiesOfKeyVersions("keyName") .subscriberContext(Context.of(key1, value1, key2, value2)) .subscribe(keyProperties -> keyAsyncClient.getKey(keyProperties.getName(), keyProperties.getVersion()) .subscribe(keyResponse -> System.out.printf("Received key's version with name %s, type %s and version %s", keyResponse.getName(), keyResponse.getKeyType(), keyResponse.getProperties().getVersion()))); } /** * Implementation not provided for this method * @return {@code null} */ private TokenCredential getKeyVaultCredential() { return null; } }
FluxUtils in azure-core has a convenience method for this `monoError(logger, ex)`
public Mono<Void> send(Iterable<EventData> events, SendOptions options) { if (events == null) { return Mono.error(logger.logExceptionAsError(new NullPointerException("'events' cannot be null."))); } else if (options == null) { return Mono.error(logger.logExceptionAsError(new NullPointerException("'options' cannot be null."))); } return send(Flux.fromIterable(events), options); }
return Mono.error(logger.logExceptionAsError(new NullPointerException("'events' cannot be null.")));
public Mono<Void> send(Iterable<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.fromIterable(events), options); }
class EventHubProducerAsyncClient implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final BatchOptions DEFAULT_BATCH_OPTIONS = new BatchOptions(); /** * Keeps track of the opened send links. Links are key'd by their entityPath. The send link for allowing the service * load balance messages is the eventHubName. */ private final ConcurrentHashMap<String, AmqpSendLink> openLinks = new ConcurrentHashMap<>(); private final ClientLogger logger = new ClientLogger(EventHubProducerAsyncClient.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final String eventHubName; private final EventHubLinkProvider linkProvider; private final RetryOptions retryOptions; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; /** * Creates a new instance of this {@link EventHubProducerAsyncClient} that can send messages to a single partition * when {@link BatchOptions * balance the messages amongst available partitions. */ EventHubProducerAsyncClient(String eventHubName, EventHubLinkProvider linkProvider, RetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer) { this.eventHubName = eventHubName; this.linkProvider = linkProvider; this.retryOptions = retryOptions; this.tracerProvider = tracerProvider; this.messageSerializer = messageSerializer; } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return eventHubName; } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getProperties() { return linkProvider.getManagementNode().flatMap(EventHubManagementNode::getEventHubProperties); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ @ServiceMethod(returns = ReturnType.COLLECTION) public Flux<String> getPartitionIds() { return getProperties().flatMapMany(properties -> Flux.fromArray(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * @return The set of information for the requested partition under the Event Hub this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return linkProvider.getManagementNode().flatMap(node -> node.getPartitionProperties(partitionId)); } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @param options A set of options used to configure the {@link EventDataBatch}. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch(BatchOptions options) { if (options == null) { return Mono.error(logger.logExceptionAsError(new NullPointerException("'options' cannot be null."))); } final BatchOptions clone = options.clone(); if (!ImplUtils.isNullOrEmpty(clone.getPartitionKey()) && !ImplUtils.isNullOrEmpty(clone.getPartitionId())) { return Mono.error(logger.logExceptionAsError(new IllegalArgumentException(String.format(Locale.US, "BatchOptions.getPartitionKey() and BatchOptions.getPartitionId() are both set. Only one or the" + " other can be used. partitionKey: '%s'. partitionId: '%s'", clone.getPartitionKey(), clone.getPartitionId())))); } else if (!ImplUtils.isNullOrEmpty(clone.getPartitionKey()) && clone.getPartitionKey().length() > MAX_PARTITION_KEY_LENGTH) { return Mono.error(logger.logExceptionAsError(new IllegalArgumentException(String.format(Locale.US, "PartitionKey '%s' exceeds the maximum allowed length: '%s'.", clone.getPartitionKey(), MAX_PARTITION_KEY_LENGTH)))); } return getSendLink(clone.getPartitionId()) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (clone.getMaximumSizeInBytes() > maximumLinkSize) { return Mono.error(logger.logExceptionAsError( new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", clone.getMaximumSizeInBytes(), maximumLinkSize)))); } final int batchSize = clone.getMaximumSizeInBytes() > 0 ? clone.getMaximumSizeInBytes() : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, clone.getPartitionId(), clone.getPartitionKey(), link::getErrorContext)); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * * @return A {@link Mono} that completes when the event is pushed to the service. */ public Mono<Void> send(EventData event) { if (event == null) { return Mono.error(logger.logExceptionAsError(new NullPointerException("'event' cannot be null."))); } return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * * @return A {@link Mono} that completes when the event is pushed to the service. */ public Mono<Void> send(EventData event, SendOptions options) { if (event == null) { return Mono.error(logger.logExceptionAsError(new NullPointerException("'event' cannot be null."))); } else if (options == null) { return Mono.error(logger.logExceptionAsError(new NullPointerException("'options' cannot be null."))); } return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * * @return A {@link Mono} that completes when all events are pushed to the service. */ public Mono<Void> send(Iterable<EventData> events) { if (events == null) { return Mono.error(logger.logExceptionAsError(new NullPointerException("'events' cannot be null."))); } return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * * @return A {@link Mono} that completes when all events are pushed to the service. */ /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * * @return A {@link Mono} that completes when all events are pushed to the service. */ public Mono<Void> send(Flux<EventData> events) { if (events == null) { return Mono.error(logger.logExceptionAsError(new NullPointerException("'events' cannot be null."))); } return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * * @return A {@link Mono} that completes when all events are pushed to the service. */ public Mono<Void> send(Flux<EventData> events, SendOptions options) { if (events == null) { return Mono.error(logger.logExceptionAsError(new NullPointerException("'events' cannot be null."))); } else if (options == null) { return Mono.error(logger.logExceptionAsError(new NullPointerException("'options' cannot be null."))); } return sendInternal(events, options); } /** * Sends the batch to the associated Event Hub. * * @param batch The batch to send to the service. * * @return A {@link Mono} that completes when the batch is pushed to the service. * * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubProducerAsyncClient * @see EventHubProducerAsyncClient */ public Mono<Void> send(EventDataBatch batch) { if (batch == null) { return Mono.error(logger.logExceptionAsError(new NullPointerException("'batch' cannot be null."))); } else if (batch.getEvents().isEmpty()) { logger.warning("Cannot send an EventBatch that is empty."); return Mono.empty(); } if (!ImplUtils.isNullOrEmpty(batch.getPartitionId())) { logger.info("Sending batch with size[{}] to partitionId[{}].", batch.getSize(), batch.getPartitionId()); } else if (!ImplUtils.isNullOrEmpty(batch.getPartitionKey())) { logger.info("Sending batch with size[{}] with partitionKey[{}].", batch.getSize(), batch.getPartitionKey()); } else { logger.info("Sending batch with size[{}] to be distributed round-robin in service.", batch.getSize()); } final String partitionKey = batch.getPartitionKey(); final List<Message> messages = batch.getEvents().stream().map(event -> { final Message message = messageSerializer.serialize(event); if (!ImplUtils.isNullOrEmpty(partitionKey)) { final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } return message; }).collect(Collectors.toList()); return getSendLink(batch.getPartitionId()) .flatMap(link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)); } private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final SendOptions clone = options.clone(); final boolean isTracingEnabled = tracerProvider.isEnabled(); if (!ImplUtils.isNullOrEmpty(clone.getPartitionKey()) && !ImplUtils.isNullOrEmpty(clone.getPartitionId())) { return Mono.error(logger.logExceptionAsError(new IllegalArgumentException(String.format(Locale.US, "BatchOptions.getPartitionKey() and BatchOptions.getPartitionId() are both set. Only one or the" + " other can be used. partitionKey: '%s'. partitionId: '%s'", clone.getPartitionKey(), clone.getPartitionId())))); } return getSendLink(options.getPartitionId()) .flatMap(link -> { final AtomicReference<Context> sendSpanContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; return link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final BatchOptions batchOptions = new BatchOptions() .setPartitionKey(options.getPartitionKey()) .setPartitionId(options.getPartitionId()) .setMaximumSizeInBytes(batchSize); final AtomicBoolean isFirst = new AtomicBoolean(true); return events.map(eventData -> { if (!isTracingEnabled) { return eventData; } final Context parentContext = eventData.getContext(); if (isFirst.getAndSet(false)) { Context entityContext = parentContext.addData(ENTITY_PATH_KEY, link.getEntityPath()); sendSpanContext.set(tracerProvider.startSpan( entityContext.addData(HOST_NAME_KEY, link.getHostname()), ProcessKind.SEND)); } return setSpanContext(eventData, parentContext); }).collect(new EventDataCollector(batchOptions, 1, link::getErrorContext)); }) .flatMap(list -> sendInternal(Flux.fromIterable(list))) .doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(sendSpanContext.get(), signal); } }); }); } private EventData setSpanContext(EventData event, Context parentContext) { Optional<Object> eventContextData = event.getContext().getData(SPAN_CONTEXT_KEY); if (eventContextData.isPresent()) { Object spanContextObject = eventContextData.get(); if (spanContextObject instanceof Context) { tracerProvider.addSpanLinks((Context) eventContextData.get()); } else { logger.warning("Event Data context type is not of type Context, but type: {}. Not adding span links.", spanContextObject.getClass()); } } else { Context eventSpanContext = tracerProvider.startSpan(parentContext, ProcessKind.MESSAGE); if (eventSpanContext != null) { Optional<Object> eventDiagnosticIdOptional = eventSpanContext.getData(DIAGNOSTIC_ID_KEY); if (eventDiagnosticIdOptional.isPresent()) { event.addProperty(DIAGNOSTIC_ID_KEY, eventDiagnosticIdOptional.get().toString()); tracerProvider.endSpan(eventSpanContext, Signal.complete()); event.addContext(SPAN_CONTEXT_KEY, eventSpanContext); } } } return event; } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error("Error sending batch.", error); }); } private String getEntityPath(String partitionId) { return ImplUtils.isNullOrEmpty(partitionId) ? eventHubName : String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, partitionId); } private String getLinkName(String partitionId) { return ImplUtils.isNullOrEmpty(partitionId) ? StringUtil.getRandomString("EC") : StringUtil.getRandomString("PS"); } private Mono<AmqpSendLink> getSendLink(String partitionId) { final String entityPath = getEntityPath(partitionId); final AmqpSendLink openLink = openLinks.get(entityPath); if (openLink != null) { return Mono.just(openLink); } else { return linkProvider.createSendLink(getLinkName(partitionId), entityPath, retryOptions) .map(link -> openLinks.computeIfAbsent(entityPath, unusedKey -> link)); } } /** * Disposes of the {@link EventHubProducerAsyncClient} by closing the underlying connection to the service. */ @Override public void close() { if (!isDisposed.getAndSet(true)) { openLinks.forEach((key, value) -> { try { value.close(); } catch (IOException e) { logger.warning("Error closing link for partition: {}", key, e); } }); openLinks.clear(); } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * ErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final String partitionId; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private volatile EventDataBatch currentBatch; EventDataCollector(BatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.getPartitionKey(); this.partitionId = options.getPartitionId(); this.contextProvider = contextProvider; currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, "EventData does not fit into maximum number of batches. '%s'", maxNumberOfBatches); throw new AmqpException(false, ErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
class EventHubProducerAsyncClient implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final BatchOptions DEFAULT_BATCH_OPTIONS = new BatchOptions(); /** * Keeps track of the opened send links. Links are key'd by their entityPath. The send link for allowing the service * load balance messages is the eventHubName. */ private final ConcurrentHashMap<String, AmqpSendLink> openLinks = new ConcurrentHashMap<>(); private final ClientLogger logger = new ClientLogger(EventHubProducerAsyncClient.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final String fullyQualifiedNamespace; private final String eventHubName; private final EventHubLinkProvider linkProvider; private final RetryOptions retryOptions; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; /** * Creates a new instance of this {@link EventHubProducerAsyncClient} that can send messages to a single partition * when {@link BatchOptions * balance the messages amongst available partitions. */ EventHubProducerAsyncClient(String fullyQualifiedNamespace, String eventHubName, EventHubLinkProvider linkProvider, RetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer) { this.fullyQualifiedNamespace = fullyQualifiedNamespace; this.eventHubName = eventHubName; this.linkProvider = linkProvider; this.retryOptions = retryOptions; this.tracerProvider = tracerProvider; this.messageSerializer = messageSerializer; } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return eventHubName; } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getProperties() { return linkProvider.getManagementNode().flatMap(EventHubManagementNode::getEventHubProperties); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ @ServiceMethod(returns = ReturnType.COLLECTION) public Flux<String> getPartitionIds() { return getProperties().flatMapMany(properties -> Flux.fromArray(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * @return The set of information for the requested partition under the Event Hub this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return linkProvider.getManagementNode().flatMap(node -> node.getPartitionProperties(partitionId)); } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @param options A set of options used to configure the {@link EventDataBatch}. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch(BatchOptions options) { if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } final BatchOptions clone = options.clone(); if (!ImplUtils.isNullOrEmpty(clone.getPartitionKey()) && !ImplUtils.isNullOrEmpty(clone.getPartitionId())) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.getPartitionKey() and BatchOptions.getPartitionId() are both set. Only one or the" + " other can be used. partitionKey: '%s'. partitionId: '%s'", clone.getPartitionKey(), clone.getPartitionId()))); } else if (!ImplUtils.isNullOrEmpty(clone.getPartitionKey()) && clone.getPartitionKey().length() > MAX_PARTITION_KEY_LENGTH) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "PartitionKey '%s' exceeds the maximum allowed length: '%s'.", clone.getPartitionKey(), MAX_PARTITION_KEY_LENGTH))); } return getSendLink(clone.getPartitionId()) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (clone.getMaximumSizeInBytes() > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", clone.getMaximumSizeInBytes(), maximumLinkSize))); } final int batchSize = clone.getMaximumSizeInBytes() > 0 ? clone.getMaximumSizeInBytes() : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, clone.getPartitionId(), clone.getPartitionKey(), link::getErrorContext)); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * * @return A {@link Mono} that completes when the event is pushed to the service. */ public Mono<Void> send(EventData event) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * * @return A {@link Mono} that completes when the event is pushed to the service. */ public Mono<Void> send(EventData event, SendOptions options) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * * @return A {@link Mono} that completes when all events are pushed to the service. */ public Mono<Void> send(Iterable<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * * @return A {@link Mono} that completes when all events are pushed to the service. */ /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * * @return A {@link Mono} that completes when all events are pushed to the service. */ public Mono<Void> send(Flux<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * * @return A {@link Mono} that completes when all events are pushed to the service. */ public Mono<Void> send(Flux<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return sendInternal(events, options); } /** * Sends the batch to the associated Event Hub. * * @param batch The batch to send to the service. * * @return A {@link Mono} that completes when the batch is pushed to the service. * * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubProducerAsyncClient * @see EventHubProducerAsyncClient */ public Mono<Void> send(EventDataBatch batch) { if (batch == null) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } else if (batch.getEvents().isEmpty()) { logger.warning("Cannot send an EventBatch that is empty."); return Mono.empty(); } if (!ImplUtils.isNullOrEmpty(batch.getPartitionId())) { logger.info("Sending batch with size[{}] to partitionId[{}].", batch.getSize(), batch.getPartitionId()); } else if (!ImplUtils.isNullOrEmpty(batch.getPartitionKey())) { logger.info("Sending batch with size[{}] with partitionKey[{}].", batch.getSize(), batch.getPartitionKey()); } else { logger.info("Sending batch with size[{}] to be distributed round-robin in service.", batch.getSize()); } final String partitionKey = batch.getPartitionKey(); final List<Message> messages = batch.getEvents().stream().map(event -> { final Message message = messageSerializer.serialize(event); if (!ImplUtils.isNullOrEmpty(partitionKey)) { final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } return message; }).collect(Collectors.toList()); return getSendLink(batch.getPartitionId()) .flatMap(link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)); } private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final SendOptions clone = options.clone(); final boolean isTracingEnabled = tracerProvider.isEnabled(); if (!ImplUtils.isNullOrEmpty(clone.getPartitionKey()) && !ImplUtils.isNullOrEmpty(clone.getPartitionId())) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.getPartitionKey() and BatchOptions.getPartitionId() are both set. Only one or the" + " other can be used. partitionKey: '%s'. partitionId: '%s'", clone.getPartitionKey(), clone.getPartitionId()))); } return getSendLink(options.getPartitionId()) .flatMap(link -> { final AtomicReference<Context> sendSpanContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; return link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final BatchOptions batchOptions = new BatchOptions() .setPartitionKey(options.getPartitionKey()) .setPartitionId(options.getPartitionId()) .setMaximumSizeInBytes(batchSize); final AtomicBoolean isFirst = new AtomicBoolean(true); return events.map(eventData -> { if (!isTracingEnabled) { return eventData; } final Context parentContext = eventData.getContext(); if (isFirst.getAndSet(false)) { Context entityContext = parentContext.addData(ENTITY_PATH_KEY, link.getEntityPath()); sendSpanContext.set(tracerProvider.startSpan( entityContext.addData(HOST_NAME_KEY, link.getHostname()), ProcessKind.SEND)); } return setSpanContext(eventData, parentContext); }).collect(new EventDataCollector(batchOptions, 1, link::getErrorContext)); }) .flatMap(list -> sendInternal(Flux.fromIterable(list))) .doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(sendSpanContext.get(), signal); } }); }); } private EventData setSpanContext(EventData event, Context parentContext) { Optional<Object> eventContextData = event.getContext().getData(SPAN_CONTEXT_KEY); if (eventContextData.isPresent()) { Object spanContextObject = eventContextData.get(); if (spanContextObject instanceof Context) { tracerProvider.addSpanLinks((Context) eventContextData.get()); } else { logger.warning("Event Data context type is not of type Context, but type: {}. Not adding span links.", spanContextObject.getClass()); } } else { Context eventSpanContext = tracerProvider.startSpan(parentContext, ProcessKind.MESSAGE); if (eventSpanContext != null) { Optional<Object> eventDiagnosticIdOptional = eventSpanContext.getData(DIAGNOSTIC_ID_KEY); if (eventDiagnosticIdOptional.isPresent()) { event.addProperty(DIAGNOSTIC_ID_KEY, eventDiagnosticIdOptional.get().toString()); tracerProvider.endSpan(eventSpanContext, Signal.complete()); event.addContext(SPAN_CONTEXT_KEY, eventSpanContext); } } } return event; } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error("Error sending batch.", error); }); } private String getEntityPath(String partitionId) { return ImplUtils.isNullOrEmpty(partitionId) ? eventHubName : String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, partitionId); } private String getLinkName(String partitionId) { return ImplUtils.isNullOrEmpty(partitionId) ? StringUtil.getRandomString("EC") : StringUtil.getRandomString("PS"); } private Mono<AmqpSendLink> getSendLink(String partitionId) { final String entityPath = getEntityPath(partitionId); final AmqpSendLink openLink = openLinks.get(entityPath); if (openLink != null) { return Mono.just(openLink); } else { return linkProvider.createSendLink(getLinkName(partitionId), entityPath, retryOptions) .map(link -> openLinks.computeIfAbsent(entityPath, unusedKey -> link)); } } /** * Disposes of the {@link EventHubProducerAsyncClient} by closing the underlying connection to the service. */ @Override public void close() { if (!isDisposed.getAndSet(true)) { openLinks.forEach((key, value) -> { try { value.close(); } catch (IOException e) { logger.warning("Error closing link for partition: {}", key, e); } }); openLinks.clear(); } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * ErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final String partitionId; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private volatile EventDataBatch currentBatch; EventDataCollector(BatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.getPartitionKey(); this.partitionId = options.getPartitionId(); this.contextProvider = contextProvider; currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, "EventData does not fit into maximum number of batches. '%s'", maxNumberOfBatches); throw new AmqpException(false, ErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
Instead of generating the file path, checking existence, failing or get the file why not use ```java File playbackFile = new File(folderFile, testName + ".json"); if (!playbackFile.exists()) { // throw exception } return playbackFile; ```
private File getRecordFile(String testName) { URL folderUrl = InterceptorManager.class.getClassLoader().getResource("."); File folderFile = new File(folderUrl.getPath() + RECORD_FOLDER); if (!folderFile.exists()) { if (folderFile.mkdir()) { logger.verbose("Created directory: {}", folderFile.getPath()); } } String filePath = folderFile.getPath() + "/" + testName + ".json"; if (Files.notExists(Paths.get(filePath))) { throw logger.logExceptionAsError(new RuntimeException(String.format( "Missing playback file. File path: %s. ", filePath))); } logger.info("==> Playback file path: " + filePath); return new File(filePath); }
String filePath = folderFile.getPath() + "/" + testName + ".json";
private File getRecordFile(String testName) { URL folderUrl = InterceptorManager.class.getClassLoader().getResource("."); File folderFile = new File(folderUrl.getPath() + RECORD_FOLDER); if (!folderFile.exists()) { if (folderFile.mkdir()) { logger.verbose("Created directory: {}", folderFile.getPath()); } } File playbackFile = new File(folderFile, testName + ".json"); if (!playbackFile.exists()) { throw logger.logExceptionAsError(new RuntimeException(String.format( "Missing playback file. File path: %s. ", playbackFile))); } logger.info("==> Playback file path: " + playbackFile); return playbackFile; }
class InterceptorManager implements AutoCloseable { private static final String RECORD_FOLDER = "session-records/"; private final ClientLogger logger = new ClientLogger(InterceptorManager.class); private final Map<String, String> textReplacementRules; private final String testName; private final TestMode testMode; private final RecordedData recordedData; /** * Creates a new InterceptorManager that either replays test-session records or saves them. * * <ul> * <li>If {@code testMode} is {@link TestMode * record to read network calls from.</li> * <li>If {@code testMode} is {@link TestMode * all the network calls to it.</li> * </ul> * * The test session records are persisted in the path: "<i>session-records/{@code testName}.json</i>" * * @param testName Name of the test session record. * @param testMode The {@link TestMode} for this interceptor. * @throws IOException If {@code testMode} is {@link TestMode * not be located or the data could not be deserialized into an instance of {@link RecordedData}. * @throws NullPointerException If {@code testName} is {@code null}. */ public InterceptorManager(String testName, TestMode testMode) throws IOException { Objects.requireNonNull(testName, "'testName' cannot be null."); this.testName = testName; this.testMode = testMode; this.textReplacementRules = new HashMap<>(); this.recordedData = testMode == TestMode.PLAYBACK ? readDataFromFile() : new RecordedData(); } /** * Creates a new InterceptorManager that replays test session records. It takes a set of * {@code textReplacementRules}, that can be used by {@link PlaybackClient} to replace values in a * {@link NetworkCallRecord * * The test session records are read from: "<i>session-records/{@code testName}.json</i>" * * @param testName Name of the test session record. * @param textReplacementRules A set of rules to replace text in {@link NetworkCallRecord * back network calls. * @throws IOException An existing test session record could not be located or the data could not be deserialized * into an instance of {@link RecordedData}. * @throws NullPointerException If {@code testName} or {@code textReplacementRules} is {@code null}. */ public InterceptorManager(String testName, Map<String, String> textReplacementRules) throws IOException { Objects.requireNonNull(testName, "'testName' cannot be null."); Objects.requireNonNull(textReplacementRules, "'textReplacementRules' cannot be null."); this.testName = testName; this.testMode = TestMode.PLAYBACK; this.recordedData = readDataFromFile(); this.textReplacementRules = textReplacementRules; } /** * Gets whether this InterceptorManager is in playback mode. * * @return true if the InterceptorManager is in playback mode and false otherwise. */ public boolean isPlaybackMode() { return testMode == TestMode.PLAYBACK; } /** * Gets the recorded data InterceptorManager is keeping track of. * * @return The recorded data managed by InterceptorManager. */ public RecordedData getRecordedData() { return recordedData; } /** * Gets a new HTTP pipeline policy that records network calls and its data is managed by {@link InterceptorManager}. * * @return HttpPipelinePolicy to record network calls. */ public HttpPipelinePolicy getRecordPolicy() { return new RecordNetworkCallPolicy(recordedData); } /** * Gets a new HTTP client that plays back test session records managed by {@link InterceptorManager}. * * @return An HTTP client that plays back network calls from its recorded data. */ public HttpClient getPlaybackClient() { return new PlaybackClient(recordedData, textReplacementRules); } /** * Disposes of resources used by this InterceptorManager. * * If {@code testMode} is {@link TestMode * "<i>session-records/{@code testName}.json</i>" */ @Override public void close() { switch (testMode) { case RECORD: try { writeDataToFile(); } catch (IOException e) { logger.error("Unable to write data to playback file.", e); } break; case PLAYBACK: break; default: logger.error("==> Unknown AZURE_TEST_MODE: {}", testMode); break; } } private RecordedData readDataFromFile() throws IOException { File recordFile = getRecordFile(testName); ObjectMapper mapper = new ObjectMapper().enable(SerializationFeature.INDENT_OUTPUT); return mapper.readValue(recordFile, RecordedData.class); } private void writeDataToFile() throws IOException { ObjectMapper mapper = new ObjectMapper(); mapper.enable(SerializationFeature.INDENT_OUTPUT); File recordFile = getRecordFile(testName); if (recordFile.createNewFile()) { logger.verbose("Created record file: {}", recordFile.getPath()); } mapper.writeValue(recordFile, recordedData); } /** * Add text replacement rule (regex as key, the replacement text as value) into {@link InterceptorManager * * @param regex the pattern to locate the position of replacement * @param replacement the replacement text */ public void addTextReplacementRule(String regex, String replacement) { textReplacementRules.put(regex, replacement); } }
class InterceptorManager implements AutoCloseable { private static final String RECORD_FOLDER = "session-records/"; private final ClientLogger logger = new ClientLogger(InterceptorManager.class); private final Map<String, String> textReplacementRules; private final String testName; private final TestMode testMode; private final RecordedData recordedData; /** * Creates a new InterceptorManager that either replays test-session records or saves them. * * <ul> * <li>If {@code testMode} is {@link TestMode * record to read network calls from.</li> * <li>If {@code testMode} is {@link TestMode * all the network calls to it.</li> * </ul> * * The test session records are persisted in the path: "<i>session-records/{@code testName}.json</i>" * * @param testName Name of the test session record. * @param testMode The {@link TestMode} for this interceptor. * @throws IOException If {@code testMode} is {@link TestMode * not be located or the data could not be deserialized into an instance of {@link RecordedData}. * @throws NullPointerException If {@code testName} is {@code null}. */ public InterceptorManager(String testName, TestMode testMode) throws IOException { Objects.requireNonNull(testName, "'testName' cannot be null."); this.testName = testName; this.testMode = testMode; this.textReplacementRules = new HashMap<>(); this.recordedData = testMode == TestMode.PLAYBACK ? readDataFromFile() : new RecordedData(); } /** * Creates a new InterceptorManager that replays test session records. It takes a set of * {@code textReplacementRules}, that can be used by {@link PlaybackClient} to replace values in a * {@link NetworkCallRecord * * The test session records are read from: "<i>session-records/{@code testName}.json</i>" * * @param testName Name of the test session record. * @param textReplacementRules A set of rules to replace text in {@link NetworkCallRecord * back network calls. * @throws IOException An existing test session record could not be located or the data could not be deserialized * into an instance of {@link RecordedData}. * @throws NullPointerException If {@code testName} or {@code textReplacementRules} is {@code null}. */ public InterceptorManager(String testName, Map<String, String> textReplacementRules) throws IOException { Objects.requireNonNull(testName, "'testName' cannot be null."); Objects.requireNonNull(textReplacementRules, "'textReplacementRules' cannot be null."); this.testName = testName; this.testMode = TestMode.PLAYBACK; this.recordedData = readDataFromFile(); this.textReplacementRules = textReplacementRules; } /** * Gets whether this InterceptorManager is in playback mode. * * @return true if the InterceptorManager is in playback mode and false otherwise. */ public boolean isPlaybackMode() { return testMode == TestMode.PLAYBACK; } /** * Gets the recorded data InterceptorManager is keeping track of. * * @return The recorded data managed by InterceptorManager. */ public RecordedData getRecordedData() { return recordedData; } /** * Gets a new HTTP pipeline policy that records network calls and its data is managed by {@link InterceptorManager}. * * @return HttpPipelinePolicy to record network calls. */ public HttpPipelinePolicy getRecordPolicy() { return new RecordNetworkCallPolicy(recordedData); } /** * Gets a new HTTP client that plays back test session records managed by {@link InterceptorManager}. * * @return An HTTP client that plays back network calls from its recorded data. */ public HttpClient getPlaybackClient() { return new PlaybackClient(recordedData, textReplacementRules); } /** * Disposes of resources used by this InterceptorManager. * * If {@code testMode} is {@link TestMode * "<i>session-records/{@code testName}.json</i>" */ @Override public void close() { switch (testMode) { case RECORD: try { writeDataToFile(); } catch (IOException e) { logger.error("Unable to write data to playback file.", e); } break; case PLAYBACK: break; default: logger.error("==> Unknown AZURE_TEST_MODE: {}", testMode); break; } } private RecordedData readDataFromFile() throws IOException { File recordFile = getRecordFile(testName); ObjectMapper mapper = new ObjectMapper().enable(SerializationFeature.INDENT_OUTPUT); return mapper.readValue(recordFile, RecordedData.class); } private void writeDataToFile() throws IOException { ObjectMapper mapper = new ObjectMapper(); mapper.enable(SerializationFeature.INDENT_OUTPUT); File recordFile = getRecordFile(testName); if (recordFile.createNewFile()) { logger.verbose("Created record file: {}", recordFile.getPath()); } mapper.writeValue(recordFile, recordedData); } /** * Add text replacement rule (regex as key, the replacement text as value) into {@link InterceptorManager * * @param regex the pattern to locate the position of replacement * @param replacement the replacement text */ public void addTextReplacementRule(String regex, String replacement) { textReplacementRules.put(regex, replacement); } }
Can you add in creating a directory client and putting a file under it? That way we show all the basic client types.
public static void main(String[] args) throws IOException { /* * From the Azure portal, get your Storage account's name and account key. */ String accountName = SampleHelper.getAccountName(); String accountKey = SampleHelper.getAccountKey(); /* * Use your Storage account's name and key to create a credential object; this is used to access your account. */ StorageSharedKeyCredential credential = new StorageSharedKeyCredential(accountName, accountKey); /* * From the Azure portal, get your Storage account dfs service URL endpoint. * The URL typically looks like this: */ String endpoint = String.format(Locale.ROOT, "https: /* * Create a DataLakeServiceClient object that wraps the service endpoint, credential and a request pipeline. */ DataLakeServiceClient storageClient = new DataLakeServiceClientBuilder().endpoint(endpoint).credential(credential).buildClient(); /* * This example shows several common operations just to get you started. */ /* * Create a client that references a to-be-created file system in your Azure Storage account. This returns a * FileSystem object that wraps the file system's endpoint, credential and a request pipeline (inherited from storageClient). * Note that file system names require lowercase. */ FileSystemClient fileSystemClient = storageClient.getFileSystemClient("myjavafilesystembasic" + System.currentTimeMillis()); /* * Create a file system in Storage datalake account. */ fileSystemClient.create(); /* * Create a client that references a to-be-created file in your Azure Storage account's file system. * This returns a DataLakeFileClient object that wraps the file's endpoint, credential and a request pipeline * (inherited from fileSystemClient). Note that file names can be mixed case. */ DataLakeFileClient fileClient = fileSystemClient.getFileClient("HelloWorld.txt"); String data = "Hello world!"; InputStream dataStream = new ByteArrayInputStream(data.getBytes(StandardCharsets.UTF_8)); /* * Create the file with string (plain text) content. */ fileClient.create(); fileClient.append(dataStream, 0, data.length()); fileClient.flush(data.length()); dataStream.close(); /* * Download the file's content to output stream. */ int dataSize = (int) fileClient.getProperties().getFileSize(); ByteArrayOutputStream outputStream = new ByteArrayOutputStream(dataSize); fileClient.read(outputStream); outputStream.close(); /* * Verify that the file data round-tripped correctly. */ if (!data.equals(new String(outputStream.toByteArray(), StandardCharsets.UTF_8))) { throw new RuntimeException("The downloaded data does not match the uploaded data."); } /* * Create more files (maybe even a few directories) before listing. */ for (int i = 0; i < 3; i++) { String sampleData = "Samples"; InputStream dataInFiles = new ByteArrayInputStream(sampleData.getBytes(Charset.defaultCharset())); DataLakeFileClient fClient = fileSystemClient.getFileClient("myfilesforlisting" + System.currentTimeMillis()); fClient.create(); fClient.append(dataInFiles, 0, sampleData.length()); fClient.flush(sampleData.length()); dataInFiles.close(); fileSystemClient.getDirectoryClient("mydirsforlisting" + System.currentTimeMillis()).create(); } /* * List the path(s) in our file system. */ fileSystemClient.listPaths() .forEach(pathItem -> System.out.println("Path name: " + pathItem.getName())); /* * Delete the file we created earlier. */ fileClient.delete(); /* * Delete the file system we created earlier. */ fileSystemClient.delete(); }
InputStream dataStream = new ByteArrayInputStream(data.getBytes(StandardCharsets.UTF_8));
public static void main(String[] args) throws IOException { /* * From the Azure portal, get your Storage account's name and account key. */ String accountName = SampleHelper.getAccountName(); String accountKey = SampleHelper.getAccountKey(); /* * Use your Storage account's name and key to create a credential object; this is used to access your account. */ StorageSharedKeyCredential credential = new StorageSharedKeyCredential(accountName, accountKey); /* * From the Azure portal, get your Storage account dfs service URL endpoint. * The URL typically looks like this: */ String endpoint = String.format(Locale.ROOT, "https: /* * Create a DataLakeServiceClient object that wraps the service endpoint, credential and a request pipeline. */ DataLakeServiceClient storageClient = new DataLakeServiceClientBuilder().endpoint(endpoint).credential(credential).buildClient(); /* * This example shows several common operations just to get you started. */ /* * Create a client that references a to-be-created file system in your Azure Storage account. This returns a * FileSystem object that wraps the file system's endpoint, credential and a request pipeline (inherited from storageClient). * Note that file system names require lowercase. */ DataLakeFileSystemClient dataLakeFileSystemClient = storageClient.getFileSystemClient("myjavafilesystembasic" + System.currentTimeMillis()); /* * Create a file system in Storage datalake account. */ dataLakeFileSystemClient.create(); /* * Create a client that references a to-be-created file in your Azure Storage account's file system. * This returns a DataLakeFileClient object that wraps the file's endpoint, credential and a request pipeline * (inherited from dataLakeFileSystemClient). Note that file names can be mixed case. */ DataLakeFileClient fileClient = dataLakeFileSystemClient.getFileClient("HelloWorld.txt"); String data = "Hello world!"; InputStream dataStream = new ByteArrayInputStream(data.getBytes(StandardCharsets.UTF_8)); /* * Create the file with string (plain text) content. */ fileClient.create(); fileClient.append(dataStream, 0, data.length()); fileClient.flush(data.length()); dataStream.close(); /* * Download the file's content to output stream. */ int dataSize = (int) fileClient.getProperties().getFileSize(); ByteArrayOutputStream outputStream = new ByteArrayOutputStream(dataSize); fileClient.read(outputStream); outputStream.close(); /* * Verify that the file data round-tripped correctly. */ if (!data.equals(new String(outputStream.toByteArray(), StandardCharsets.UTF_8))) { throw new RuntimeException("The downloaded data does not match the uploaded data."); } /* * Create more files (maybe even a few directories) before listing. */ for (int i = 0; i < 3; i++) { String sampleData = "Samples"; InputStream dataInFiles = new ByteArrayInputStream(sampleData.getBytes(Charset.defaultCharset())); DataLakeFileClient fClient = dataLakeFileSystemClient.getFileClient("myfilesforlisting" + System.currentTimeMillis()); fClient.create(); fClient.append(dataInFiles, 0, sampleData.length()); fClient.flush(sampleData.length()); dataInFiles.close(); dataLakeFileSystemClient.getDirectoryClient("mydirsforlisting" + System.currentTimeMillis()).create(); } /* * List the path(s) in our file system. */ dataLakeFileSystemClient.listPaths() .forEach(pathItem -> System.out.println("Path name: " + pathItem.getName())); /* * Delete the file we created earlier. */ fileClient.delete(); /* * Delete the file system we created earlier. */ dataLakeFileSystemClient.delete(); }
class BasicExample { /** * Entry point into the basic examples for Storage datalake. * * @param args Unused. Arguments to the program. * @throws IOException If an I/O error occurs * @throws RuntimeException If the downloaded data doesn't match the uploaded data */ }
class BasicExample { /** * Entry point into the basic examples for Storage datalake. * * @param args Unused. Arguments to the program. * @throws IOException If an I/O error occurs * @throws RuntimeException If the downloaded data doesn't match the uploaded data */ }
https://github.com/Azure/azure-sdk-for-java/issues/6106
public static void main(String[] args) throws IOException { /* * From the Azure portal, get your Storage account's name and account key. */ String accountName = SampleHelper.getAccountName(); String accountKey = SampleHelper.getAccountKey(); /* * Use your Storage account's name and key to create a credential object; this is used to access your account. */ StorageSharedKeyCredential credential = new StorageSharedKeyCredential(accountName, accountKey); /* * From the Azure portal, get your Storage account dfs service URL endpoint. * The URL typically looks like this: */ String endpoint = String.format(Locale.ROOT, "https: /* * Create a DataLakeServiceClient object that wraps the service endpoint, credential and a request pipeline. */ DataLakeServiceClient storageClient = new DataLakeServiceClientBuilder().endpoint(endpoint).credential(credential).buildClient(); /* * This example shows several common operations just to get you started. */ /* * Create a client that references a to-be-created file system in your Azure Storage account. This returns a * FileSystem object that wraps the file system's endpoint, credential and a request pipeline (inherited from storageClient). * Note that file system names require lowercase. */ FileSystemClient fileSystemClient = storageClient.getFileSystemClient("myjavafilesystembasic" + System.currentTimeMillis()); /* * Create a file system in Storage datalake account. */ fileSystemClient.create(); /* * Create a client that references a to-be-created file in your Azure Storage account's file system. * This returns a DataLakeFileClient object that wraps the file's endpoint, credential and a request pipeline * (inherited from fileSystemClient). Note that file names can be mixed case. */ DataLakeFileClient fileClient = fileSystemClient.getFileClient("HelloWorld.txt"); String data = "Hello world!"; InputStream dataStream = new ByteArrayInputStream(data.getBytes(StandardCharsets.UTF_8)); /* * Create the file with string (plain text) content. */ fileClient.create(); fileClient.append(dataStream, 0, data.length()); fileClient.flush(data.length()); dataStream.close(); /* * Download the file's content to output stream. */ int dataSize = (int) fileClient.getProperties().getFileSize(); ByteArrayOutputStream outputStream = new ByteArrayOutputStream(dataSize); fileClient.read(outputStream); outputStream.close(); /* * Verify that the file data round-tripped correctly. */ if (!data.equals(new String(outputStream.toByteArray(), StandardCharsets.UTF_8))) { throw new RuntimeException("The downloaded data does not match the uploaded data."); } /* * Create more files (maybe even a few directories) before listing. */ for (int i = 0; i < 3; i++) { String sampleData = "Samples"; InputStream dataInFiles = new ByteArrayInputStream(sampleData.getBytes(Charset.defaultCharset())); DataLakeFileClient fClient = fileSystemClient.getFileClient("myfilesforlisting" + System.currentTimeMillis()); fClient.create(); fClient.append(dataInFiles, 0, sampleData.length()); fClient.flush(sampleData.length()); dataInFiles.close(); fileSystemClient.getDirectoryClient("mydirsforlisting" + System.currentTimeMillis()).create(); } /* * List the path(s) in our file system. */ fileSystemClient.listPaths() .forEach(pathItem -> System.out.println("Path name: " + pathItem.getName())); /* * Delete the file we created earlier. */ fileClient.delete(); /* * Delete the file system we created earlier. */ fileSystemClient.delete(); }
InputStream dataStream = new ByteArrayInputStream(data.getBytes(StandardCharsets.UTF_8));
public static void main(String[] args) throws IOException { /* * From the Azure portal, get your Storage account's name and account key. */ String accountName = SampleHelper.getAccountName(); String accountKey = SampleHelper.getAccountKey(); /* * Use your Storage account's name and key to create a credential object; this is used to access your account. */ StorageSharedKeyCredential credential = new StorageSharedKeyCredential(accountName, accountKey); /* * From the Azure portal, get your Storage account dfs service URL endpoint. * The URL typically looks like this: */ String endpoint = String.format(Locale.ROOT, "https: /* * Create a DataLakeServiceClient object that wraps the service endpoint, credential and a request pipeline. */ DataLakeServiceClient storageClient = new DataLakeServiceClientBuilder().endpoint(endpoint).credential(credential).buildClient(); /* * This example shows several common operations just to get you started. */ /* * Create a client that references a to-be-created file system in your Azure Storage account. This returns a * FileSystem object that wraps the file system's endpoint, credential and a request pipeline (inherited from storageClient). * Note that file system names require lowercase. */ DataLakeFileSystemClient dataLakeFileSystemClient = storageClient.getFileSystemClient("myjavafilesystembasic" + System.currentTimeMillis()); /* * Create a file system in Storage datalake account. */ dataLakeFileSystemClient.create(); /* * Create a client that references a to-be-created file in your Azure Storage account's file system. * This returns a DataLakeFileClient object that wraps the file's endpoint, credential and a request pipeline * (inherited from dataLakeFileSystemClient). Note that file names can be mixed case. */ DataLakeFileClient fileClient = dataLakeFileSystemClient.getFileClient("HelloWorld.txt"); String data = "Hello world!"; InputStream dataStream = new ByteArrayInputStream(data.getBytes(StandardCharsets.UTF_8)); /* * Create the file with string (plain text) content. */ fileClient.create(); fileClient.append(dataStream, 0, data.length()); fileClient.flush(data.length()); dataStream.close(); /* * Download the file's content to output stream. */ int dataSize = (int) fileClient.getProperties().getFileSize(); ByteArrayOutputStream outputStream = new ByteArrayOutputStream(dataSize); fileClient.read(outputStream); outputStream.close(); /* * Verify that the file data round-tripped correctly. */ if (!data.equals(new String(outputStream.toByteArray(), StandardCharsets.UTF_8))) { throw new RuntimeException("The downloaded data does not match the uploaded data."); } /* * Create more files (maybe even a few directories) before listing. */ for (int i = 0; i < 3; i++) { String sampleData = "Samples"; InputStream dataInFiles = new ByteArrayInputStream(sampleData.getBytes(Charset.defaultCharset())); DataLakeFileClient fClient = dataLakeFileSystemClient.getFileClient("myfilesforlisting" + System.currentTimeMillis()); fClient.create(); fClient.append(dataInFiles, 0, sampleData.length()); fClient.flush(sampleData.length()); dataInFiles.close(); dataLakeFileSystemClient.getDirectoryClient("mydirsforlisting" + System.currentTimeMillis()).create(); } /* * List the path(s) in our file system. */ dataLakeFileSystemClient.listPaths() .forEach(pathItem -> System.out.println("Path name: " + pathItem.getName())); /* * Delete the file we created earlier. */ fileClient.delete(); /* * Delete the file system we created earlier. */ dataLakeFileSystemClient.delete(); }
class BasicExample { /** * Entry point into the basic examples for Storage datalake. * * @param args Unused. Arguments to the program. * @throws IOException If an I/O error occurs * @throws RuntimeException If the downloaded data doesn't match the uploaded data */ }
class BasicExample { /** * Entry point into the basic examples for Storage datalake. * * @param args Unused. Arguments to the program. * @throws IOException If an I/O error occurs * @throws RuntimeException If the downloaded data doesn't match the uploaded data */ }
Should this be logged at an info level as this isn't performing the operation as expected?
public void setAttribute(String key, String value, Context context) { if (ImplUtils.isNullOrEmpty(value)) { logger.info("Failed to set span attribute since value is null or empty."); return; } final Span span = getSpan(context); if (span != null) { span.setAttribute(key, AttributeValue.stringAttributeValue(value)); } else { logger.warning("Failed to find span to add attribute."); } }
logger.info("Failed to set span attribute since value is null or empty.");
public void setAttribute(String key, String value, Context context) { if (CoreUtils.isNullOrEmpty(value)) { logger.warning("Failed to set span attribute since value is null or empty."); return; } final Span span = getSpan(context); if (span != null) { span.setAttribute(key, AttributeValue.stringAttributeValue(value)); } else { logger.warning("Failed to find span to add attribute."); } }
class OpenTelemetryTracer implements com.azure.core.util.tracing.Tracer { private static final Tracer TRACER = OpenTelemetry.getTracerFactory().get("Azure-OpenTelemetry"); static final String COMPONENT = "component"; static final String MESSAGE_BUS_DESTINATION = "message_bus.destination"; static final String PEER_ENDPOINT = "peer.address"; private final ClientLogger logger = new ClientLogger(OpenTelemetryTracer.class); /** * {@inheritDoc} */ @Override public Context start(String spanName, Context context) { Objects.requireNonNull(spanName, "'spanName' cannot be null."); Objects.requireNonNull(context, "'context' cannot be null."); Builder spanBuilder = getSpanBuilder(spanName, context); Span span = spanBuilder.startSpan(); return context.addData(PARENT_SPAN_KEY, span); } /** * {@inheritDoc} */ @Override public Context start(String spanName, Context context, ProcessKind processKind) { Objects.requireNonNull(spanName, "'spanName' cannot be null."); Objects.requireNonNull(context, "'context' cannot be null."); Objects.requireNonNull(processKind, "'processKind' cannot be null."); Span span; Builder spanBuilder; switch (processKind) { case SEND: spanBuilder = getSpanBuilder(spanName, context); span = spanBuilder.setSpanKind(Span.Kind.PRODUCER).startSpan(); if (span.isRecording()) { addSpanRequestAttributes(span, context, spanName); } return context.addData(PARENT_SPAN_KEY, span); case MESSAGE: spanBuilder = getSpanBuilder(spanName, context); span = spanBuilder.startSpan(); context = setContextData(span); return context.addData(PARENT_SPAN_KEY, span); case PROCESS: return startScopedSpan(spanName, context); default: return Context.NONE; } } /** * {@inheritDoc} */ @Override public void end(int responseCode, Throwable throwable, Context context) { Objects.requireNonNull(context, "'context' cannot be null."); final Span span = getSpan(context); if (span == null) { return; } if (span.isRecording()) { span.setStatus(HttpTraceUtil.parseResponseStatus(responseCode, throwable)); } span.end(); } /** * {@inheritDoc} */ @Override /** * {@inheritDoc} */ @Override public Context setSpanName(String spanName, Context context) { return context.addData(USER_SPAN_NAME_KEY, spanName); } /** * {@inheritDoc} */ @Override public void end(String statusMessage, Throwable throwable, Context context) { final Span span = getSpan(context); if (span == null) { logger.warning("Failed to find span to end it."); return; } if (span.isRecording()) { span.setStatus(AmqpTraceUtil.parseStatusMessage(statusMessage, throwable)); } span.end(); } /** * {@inheritDoc} */ @Override public void addLink(Context context) { final Builder spanBuilder = getSpanBuilder(PARENT_SPAN_KEY, context); final SpanContext spanContext = getSpanContext(context); if (spanContext == null) { logger.warning("Failed to find span context to link it."); return; } spanBuilder.addLink(spanContext); spanBuilder.startSpan(); } /** * {@inheritDoc} */ @Override public Context extractContext(String diagnosticId, Context context) { return AmqpPropagationFormatUtil.extractContext(diagnosticId, context); } /** * Starts a new child {@link Span} with parent being the remote and uses the {@link * Span} is in the current Context, to return an object that represents that scope. * <p>The scope is exited when the returned object is closed.</p> * * @param spanName The name of the returned Span. * @param context The {@link com.azure.core.util.Context} containing the {@link * SpanContext}. * @return The returned {@link Span} and the scope in a {@link com.azure.core.util.Context} * object. */ private Context startScopedSpan(String spanName, Context context) { Objects.requireNonNull(context, "'context' cannot be null."); Span span; SpanContext spanContext = getSpanContext(context); if (spanContext != null) { span = startSpanWithRemoteParent(spanName, spanContext); } else { Builder spanBuilder = getSpanBuilder(spanName, context); span = spanBuilder.setSpanKind(Span.Kind.SERVER).startSpan(); } return context.addData(PARENT_SPAN_KEY, span).addData("scope", TRACER.withSpan(span)); } /** * Creates a {@link Builder} to create and start a new child {@link Span} with parent being the remote and * designated by the {@link SpanContext}. * * @param spanName The name of the returned Span. * @param spanContext The remote parent context of the returned Span. * @return A {@link Span} with parent being the remote {@link Span} * designated by the {@link SpanContext}. */ private static Span startSpanWithRemoteParent(String spanName, SpanContext spanContext) { Builder spanBuilder = TRACER.spanBuilder(spanName).setParent(spanContext); spanBuilder.setSpanKind(Span.Kind.SERVER); return spanBuilder.startSpan(); } /** * Extracts the {@link SpanContext trace identifiers} and the {@link * SpanContext} of the current tracing span as text and returns in a {@link * com.azure.core.util.Context} object. * * @param span The current tracing span. * @return The {@link com.azure.core.util.Context} containing the {@link SpanContext} and * trace-parent of the current span. */ private static Context setContextData(Span span) { SpanContext spanContext = span.getContext(); final String traceparent = AmqpPropagationFormatUtil.getDiagnosticId(spanContext); return new Context(DIAGNOSTIC_ID_KEY, traceparent).addData(SPAN_CONTEXT_KEY, spanContext); } /** * Extracts request attributes from the given {@code context} and adds it to the started span. * * @param span The span to which request attributes are to be added. * @param context The context containing the request attributes. * @param spanName The name of the returned Span containing the component value. */ private void addSpanRequestAttributes(Span span, Context context, String spanName) { Objects.requireNonNull(span, "'span' cannot be null."); span.setAttribute(COMPONENT, AttributeValue.stringAttributeValue(parseComponentValue(spanName))); span.setAttribute( MESSAGE_BUS_DESTINATION, AttributeValue.stringAttributeValue(getRequestKeyAttribute(context, ENTITY_PATH_KEY))); span.setAttribute( PEER_ENDPOINT, AttributeValue.stringAttributeValue(getRequestKeyAttribute(context, HOST_NAME_KEY))); } /** * Extracts the component name from the given span name. * * @param spanName The spanName containing the component name. * @return The component name contained in the context. */ private static String parseComponentValue(String spanName) { if (spanName != null && !spanName.isEmpty()) { int componentNameStartIndex = spanName.indexOf("."); int componentNameEndIndex = spanName.lastIndexOf("."); if (componentNameStartIndex != -1 && componentNameEndIndex != -1) { return spanName.substring(componentNameStartIndex + 1, componentNameEndIndex); } } return ""; } /** * Extracts request attributes from the given {@code context} and provided key. * * @param context The context containing the entity path. * @param key The name of the attribute that needs to be extracted from the {@code Context}. * @return The value for the provided key contained in the context. */ private String getRequestKeyAttribute(Context context, String key) { final Optional<Object> optionalObject = context.getData(key); if (!optionalObject.isPresent()) { logger.warning("Failed to find {} in the context.", key); return ""; } final Object value = optionalObject.get(); if (!(value instanceof String)) { logger.warning("Could not extract {}. Data is not of type String. Actual class: {}", key, value.getClass()); return ""; } return value.toString(); } /** * Extracts a {@link Span} from the given {@code context}. * * @param context The context containing the span. * @return The {@link Span} contained in the context, and {@code null} if it does not. */ private Span getSpan(Context context) { final Optional<Object> spanOptional = context.getData(PARENT_SPAN_KEY); if (!spanOptional.isPresent()) { logger.warning("Failed to find span in the context."); return null; } final Object value = spanOptional.get(); if (!(value instanceof Span)) { logger.warning("Could not extract span. Data in {} is not of type Span. Actual class: {}", PARENT_SPAN_KEY, value.getClass()); return null; } return (Span) value; } /** * Extracts the span name from the given {@code context}. * * @param context The context containing the span name. * @return The span name contained in the context, and {@code null} if it does not. */ private String getSpanName(Context context) { final Optional<Object> spanNameOptional = context.getData(USER_SPAN_NAME_KEY); if (!spanNameOptional.isPresent()) { logger.warning("Failed to find span name in the context."); return null; } final Object value = spanNameOptional.get(); if (!(value instanceof String)) { logger.warning("Could not extract span name. Data in {} is not of type String. Actual class: {}", USER_SPAN_NAME_KEY, value.getClass()); return null; } return value.toString(); } /** * Extracts a {@link SpanContext} from the given {@code context}. * * @param context The context containing the span context. * @return The {@link SpanContext} contained in the context, and {@code null} if it does not. */ private SpanContext getSpanContext(Context context) { final Optional<Object> spanContextOptional = context.getData(SPAN_CONTEXT_KEY); if (!spanContextOptional.isPresent()) { logger.warning("Failed to find span context in the context."); return null; } final Object value = spanContextOptional.get(); if (!(value instanceof SpanContext)) { logger.warning("Could not extract span context. Data is not of type SpanContext. Actual class: {}", value.getClass()); return null; } return (SpanContext) value; } /** * Returns a {@link Builder} to create and start a new child {@link Span} with parent being * the designated {@code Span}. * * @param spanName The name of the returned Span. * @param context The context containing the span and the span name. * @return A {@code Span.Builder} to create and start a new {@code Span}. */ private Builder getSpanBuilder(String spanName, Context context) { Span parentSpan = getSpan(context); String spanNameKey = getSpanName(context); if (spanNameKey == null) { spanNameKey = spanName; } if (parentSpan == null) { parentSpan = TRACER.getCurrentSpan(); } return TRACER.spanBuilder(spanNameKey).setParent(parentSpan); } }
class OpenTelemetryTracer implements com.azure.core.util.tracing.Tracer { private static final Tracer TRACER = OpenTelemetry.getTracerFactory().get("Azure-OpenTelemetry"); static final String COMPONENT = "component"; static final String MESSAGE_BUS_DESTINATION = "message_bus.destination"; static final String PEER_ENDPOINT = "peer.address"; private final ClientLogger logger = new ClientLogger(OpenTelemetryTracer.class); /** * {@inheritDoc} */ @Override public Context start(String spanName, Context context) { Objects.requireNonNull(spanName, "'spanName' cannot be null."); Objects.requireNonNull(context, "'context' cannot be null."); Builder spanBuilder = getSpanBuilder(spanName, context); Span span = spanBuilder.startSpan(); return context.addData(PARENT_SPAN_KEY, span); } /** * {@inheritDoc} */ @Override public Context start(String spanName, Context context, ProcessKind processKind) { Objects.requireNonNull(spanName, "'spanName' cannot be null."); Objects.requireNonNull(context, "'context' cannot be null."); Objects.requireNonNull(processKind, "'processKind' cannot be null."); Span span; Builder spanBuilder; switch (processKind) { case SEND: spanBuilder = getSpanBuilder(spanName, context); span = spanBuilder.setSpanKind(Span.Kind.PRODUCER).startSpan(); if (span.isRecording()) { addSpanRequestAttributes(span, context, spanName); } return context.addData(PARENT_SPAN_KEY, span); case MESSAGE: spanBuilder = getSpanBuilder(spanName, context); span = spanBuilder.startSpan(); context = setContextData(span); return context.addData(PARENT_SPAN_KEY, span); case PROCESS: return startScopedSpan(spanName, context); default: return Context.NONE; } } /** * {@inheritDoc} */ @Override public void end(int responseCode, Throwable throwable, Context context) { Objects.requireNonNull(context, "'context' cannot be null."); final Span span = getSpan(context); if (span == null) { return; } if (span.isRecording()) { span.setStatus(HttpTraceUtil.parseResponseStatus(responseCode, throwable)); } span.end(); } /** * {@inheritDoc} */ @Override /** * {@inheritDoc} */ @Override public Context setSpanName(String spanName, Context context) { return context.addData(USER_SPAN_NAME_KEY, spanName); } /** * {@inheritDoc} */ @Override public void end(String statusMessage, Throwable throwable, Context context) { final Span span = getSpan(context); if (span == null) { logger.warning("Failed to find span to end it."); return; } if (span.isRecording()) { span.setStatus(AmqpTraceUtil.parseStatusMessage(statusMessage, throwable)); } span.end(); } /** * {@inheritDoc} */ @Override public void addLink(Context context) { final Builder spanBuilder = getSpanBuilder(PARENT_SPAN_KEY, context); final SpanContext spanContext = getSpanContext(context); if (spanContext == null) { logger.warning("Failed to find span context to link it."); return; } spanBuilder.addLink(spanContext); } /** * {@inheritDoc} */ @Override public Context extractContext(String diagnosticId, Context context) { return AmqpPropagationFormatUtil.extractContext(diagnosticId, context); } /** * Starts a new child {@link Span} with parent being the remote and uses the {@link * Span} is in the current Context, to return an object that represents that scope. * <p>The scope is exited when the returned object is closed.</p> * * @param spanName The name of the returned Span. * @param context The {@link com.azure.core.util.Context} containing the {@link * SpanContext}. * @return The returned {@link Span} and the scope in a {@link com.azure.core.util.Context} * object. */ private Context startScopedSpan(String spanName, Context context) { Objects.requireNonNull(context, "'context' cannot be null."); Span span; SpanContext spanContext = getSpanContext(context); if (spanContext != null) { span = startSpanWithRemoteParent(spanName, spanContext); } else { Builder spanBuilder = getSpanBuilder(spanName, context); span = spanBuilder.setSpanKind(Span.Kind.SERVER).startSpan(); } return context.addData(PARENT_SPAN_KEY, span).addData("scope", TRACER.withSpan(span)); } /** * Creates a {@link Builder} to create and start a new child {@link Span} with parent being the remote and * designated by the {@link SpanContext}. * * @param spanName The name of the returned Span. * @param spanContext The remote parent context of the returned Span. * @return A {@link Span} with parent being the remote {@link Span} * designated by the {@link SpanContext}. */ private static Span startSpanWithRemoteParent(String spanName, SpanContext spanContext) { Builder spanBuilder = TRACER.spanBuilder(spanName).setParent(spanContext); spanBuilder.setSpanKind(Span.Kind.SERVER); return spanBuilder.startSpan(); } /** * Extracts the {@link SpanContext trace identifiers} and the {@link * SpanContext} of the current tracing span as text and returns in a {@link * com.azure.core.util.Context} object. * * @param span The current tracing span. * @return The {@link com.azure.core.util.Context} containing the {@link SpanContext} and * trace-parent of the current span. */ private static Context setContextData(Span span) { SpanContext spanContext = span.getContext(); final String traceparent = AmqpPropagationFormatUtil.getDiagnosticId(spanContext); return new Context(DIAGNOSTIC_ID_KEY, traceparent).addData(SPAN_CONTEXT_KEY, spanContext); } /** * Extracts request attributes from the given {@code context} and adds it to the started span. * * @param span The span to which request attributes are to be added. * @param context The context containing the request attributes. * @param spanName The name of the returned Span containing the component value. */ private void addSpanRequestAttributes(Span span, Context context, String spanName) { Objects.requireNonNull(span, "'span' cannot be null."); span.setAttribute(COMPONENT, AttributeValue.stringAttributeValue(parseComponentValue(spanName))); span.setAttribute( MESSAGE_BUS_DESTINATION, AttributeValue.stringAttributeValue(getRequestKeyAttribute(context, ENTITY_PATH_KEY))); span.setAttribute( PEER_ENDPOINT, AttributeValue.stringAttributeValue(getRequestKeyAttribute(context, HOST_NAME_KEY))); } /** * Extracts the component name from the given span name. * * @param spanName The spanName containing the component name i.e spanName = "Azure.eventhubs.send" * @return The component name contained in the context i.e "eventhubs" */ private static String parseComponentValue(String spanName) { if (spanName != null && !spanName.isEmpty()) { int componentNameStartIndex = spanName.indexOf("."); int componentNameEndIndex = spanName.lastIndexOf("."); if (componentNameStartIndex != -1 && componentNameEndIndex != -1) { return spanName.substring(componentNameStartIndex + 1, componentNameEndIndex); } } return ""; } /** * Extracts request attributes from the given {@code context} and provided key. * * @param context The context containing the specified attribute key. * @param key The name of the attribute that needs to be extracted from the {@code Context}. * @return The value for the provided key contained in the context. */ private String getRequestKeyAttribute(Context context, String key) { final Object value = getOptionalObject(context, key); if (!(value instanceof String)) { logger.warning("Could not extract {}. Data in context for key {} is not of type String.", key); return ""; } return value.toString(); } /** * Extracts a {@link Span} from the given {@code context}. * * @param context The context containing the span. * @return The {@link Span} contained in the context, and {@code null} if it does not. */ private Span getSpan(Context context) { final Object value = getOptionalObject(context, PARENT_SPAN_KEY); if (!(value instanceof Span)) { logger.warning("Could not extract span. Data in context for key {} is not of type Span.", PARENT_SPAN_KEY); return null; } return (Span) value; } /** * Extracts the span name from the given {@code context}. * * @param context The context containing the span name. * @return The span name contained in the context, and {@code null} if it does not. */ private String getSpanName(Context context) { final Object value = getOptionalObject(context, USER_SPAN_NAME_KEY); if (!(value instanceof String)) { logger.warning("Could not extract span name. Data in context for key {} is not of type String.", USER_SPAN_NAME_KEY); return null; } return value.toString(); } /** * Extracts a {@link SpanContext} from the given {@code context}. * * @param context The context containing the span context. * @return The {@link SpanContext} contained in the context, and {@code null} if it does not. */ private SpanContext getSpanContext(Context context) { final Object value = getOptionalObject(context, SPAN_CONTEXT_KEY); if (!(value instanceof SpanContext)) { logger.warning("Could not extract span context. Data is in context for key {} not of type SpanContext.", SPAN_CONTEXT_KEY); return null; } return (SpanContext) value; } /** * Returns a {@link Builder} to create and start a new child {@link Span} with parent being * the designated {@code Span}. * * @param spanName The name of the returned Span. * @param context The context containing the span and the span name. * @return A {@code Span.Builder} to create and start a new {@code Span}. */ private Builder getSpanBuilder(String spanName, Context context) { Span parentSpan = getSpan(context); String spanNameKey = getSpanName(context); if (spanNameKey == null) { spanNameKey = spanName; } if (parentSpan == null) { parentSpan = TRACER.getCurrentSpan(); } return TRACER.spanBuilder(spanNameKey).setParent(parentSpan); } /** * Returns the value of the specified key from the context. * * @param context The context containing the specified key. * @param key The name of the attribute that needs to be extracted from the {@code Context}. * @return The value for the provided key contained in the context. */ private Object getOptionalObject(Context context, String key) { final Optional<Object> optionalObject = context.getData(key); if (!optionalObject.isPresent()) { logger.warning("Failed to find {} in the context.", key); return null; } return optionalObject.get(); } }
Is there anyway to combine these methods together, or at least have a shared helper method as most are performing a similar operation.
private Span getSpan(Context context) { final Optional<Object> spanOptional = context.getData(PARENT_SPAN_KEY); if (!spanOptional.isPresent()) { logger.warning("Failed to find span in the context."); return null; } final Object value = spanOptional.get(); if (!(value instanceof Span)) { logger.warning("Could not extract span. Data in {} is not of type Span. Actual class: {}", PARENT_SPAN_KEY, value.getClass()); return null; } return (Span) value; }
if (!spanOptional.isPresent()) {
private Span getSpan(Context context) { final Object value = getOptionalObject(context, PARENT_SPAN_KEY); if (!(value instanceof Span)) { logger.warning("Could not extract span. Data in context for key {} is not of type Span.", PARENT_SPAN_KEY); return null; } return (Span) value; }
class OpenTelemetryTracer implements com.azure.core.util.tracing.Tracer { private static final Tracer TRACER = OpenTelemetry.getTracerFactory().get("Azure-OpenTelemetry"); static final String COMPONENT = "component"; static final String MESSAGE_BUS_DESTINATION = "message_bus.destination"; static final String PEER_ENDPOINT = "peer.address"; private final ClientLogger logger = new ClientLogger(OpenTelemetryTracer.class); /** * {@inheritDoc} */ @Override public Context start(String spanName, Context context) { Objects.requireNonNull(spanName, "'spanName' cannot be null."); Objects.requireNonNull(context, "'context' cannot be null."); Builder spanBuilder = getSpanBuilder(spanName, context); Span span = spanBuilder.startSpan(); return context.addData(PARENT_SPAN_KEY, span); } /** * {@inheritDoc} */ @Override public Context start(String spanName, Context context, ProcessKind processKind) { Objects.requireNonNull(spanName, "'spanName' cannot be null."); Objects.requireNonNull(context, "'context' cannot be null."); Objects.requireNonNull(processKind, "'processKind' cannot be null."); Span span; Builder spanBuilder; switch (processKind) { case SEND: spanBuilder = getSpanBuilder(spanName, context); span = spanBuilder.setSpanKind(Span.Kind.PRODUCER).startSpan(); if (span.isRecording()) { addSpanRequestAttributes(span, context, spanName); } return context.addData(PARENT_SPAN_KEY, span); case MESSAGE: spanBuilder = getSpanBuilder(spanName, context); span = spanBuilder.startSpan(); context = setContextData(span); return context.addData(PARENT_SPAN_KEY, span); case PROCESS: return startScopedSpan(spanName, context); default: return Context.NONE; } } /** * {@inheritDoc} */ @Override public void end(int responseCode, Throwable throwable, Context context) { Objects.requireNonNull(context, "'context' cannot be null."); final Span span = getSpan(context); if (span == null) { return; } if (span.isRecording()) { span.setStatus(HttpTraceUtil.parseResponseStatus(responseCode, throwable)); } span.end(); } /** * {@inheritDoc} */ @Override public void setAttribute(String key, String value, Context context) { if (ImplUtils.isNullOrEmpty(value)) { logger.info("Failed to set span attribute since value is null or empty."); return; } final Span span = getSpan(context); if (span != null) { span.setAttribute(key, AttributeValue.stringAttributeValue(value)); } else { logger.warning("Failed to find span to add attribute."); } } /** * {@inheritDoc} */ @Override public Context setSpanName(String spanName, Context context) { return context.addData(USER_SPAN_NAME_KEY, spanName); } /** * {@inheritDoc} */ @Override public void end(String statusMessage, Throwable throwable, Context context) { final Span span = getSpan(context); if (span == null) { logger.warning("Failed to find span to end it."); return; } if (span.isRecording()) { span.setStatus(AmqpTraceUtil.parseStatusMessage(statusMessage, throwable)); } span.end(); } /** * {@inheritDoc} */ @Override public void addLink(Context context) { final Builder spanBuilder = getSpanBuilder(PARENT_SPAN_KEY, context); final SpanContext spanContext = getSpanContext(context); if (spanContext == null) { logger.warning("Failed to find span context to link it."); return; } spanBuilder.addLink(spanContext); spanBuilder.startSpan(); } /** * {@inheritDoc} */ @Override public Context extractContext(String diagnosticId, Context context) { return AmqpPropagationFormatUtil.extractContext(diagnosticId, context); } /** * Starts a new child {@link Span} with parent being the remote and uses the {@link * Span} is in the current Context, to return an object that represents that scope. * <p>The scope is exited when the returned object is closed.</p> * * @param spanName The name of the returned Span. * @param context The {@link com.azure.core.util.Context} containing the {@link * SpanContext}. * @return The returned {@link Span} and the scope in a {@link com.azure.core.util.Context} * object. */ private Context startScopedSpan(String spanName, Context context) { Objects.requireNonNull(context, "'context' cannot be null."); Span span; SpanContext spanContext = getSpanContext(context); if (spanContext != null) { span = startSpanWithRemoteParent(spanName, spanContext); } else { Builder spanBuilder = getSpanBuilder(spanName, context); span = spanBuilder.setSpanKind(Span.Kind.SERVER).startSpan(); } return context.addData(PARENT_SPAN_KEY, span).addData("scope", TRACER.withSpan(span)); } /** * Creates a {@link Builder} to create and start a new child {@link Span} with parent being the remote and * designated by the {@link SpanContext}. * * @param spanName The name of the returned Span. * @param spanContext The remote parent context of the returned Span. * @return A {@link Span} with parent being the remote {@link Span} * designated by the {@link SpanContext}. */ private static Span startSpanWithRemoteParent(String spanName, SpanContext spanContext) { Builder spanBuilder = TRACER.spanBuilder(spanName).setParent(spanContext); spanBuilder.setSpanKind(Span.Kind.SERVER); return spanBuilder.startSpan(); } /** * Extracts the {@link SpanContext trace identifiers} and the {@link * SpanContext} of the current tracing span as text and returns in a {@link * com.azure.core.util.Context} object. * * @param span The current tracing span. * @return The {@link com.azure.core.util.Context} containing the {@link SpanContext} and * trace-parent of the current span. */ private static Context setContextData(Span span) { SpanContext spanContext = span.getContext(); final String traceparent = AmqpPropagationFormatUtil.getDiagnosticId(spanContext); return new Context(DIAGNOSTIC_ID_KEY, traceparent).addData(SPAN_CONTEXT_KEY, spanContext); } /** * Extracts request attributes from the given {@code context} and adds it to the started span. * * @param span The span to which request attributes are to be added. * @param context The context containing the request attributes. * @param spanName The name of the returned Span containing the component value. */ private void addSpanRequestAttributes(Span span, Context context, String spanName) { Objects.requireNonNull(span, "'span' cannot be null."); span.setAttribute(COMPONENT, AttributeValue.stringAttributeValue(parseComponentValue(spanName))); span.setAttribute( MESSAGE_BUS_DESTINATION, AttributeValue.stringAttributeValue(getRequestKeyAttribute(context, ENTITY_PATH_KEY))); span.setAttribute( PEER_ENDPOINT, AttributeValue.stringAttributeValue(getRequestKeyAttribute(context, HOST_NAME_KEY))); } /** * Extracts the component name from the given span name. * * @param spanName The spanName containing the component name. * @return The component name contained in the context. */ private static String parseComponentValue(String spanName) { if (spanName != null && !spanName.isEmpty()) { int componentNameStartIndex = spanName.indexOf("."); int componentNameEndIndex = spanName.lastIndexOf("."); if (componentNameStartIndex != -1 && componentNameEndIndex != -1) { return spanName.substring(componentNameStartIndex + 1, componentNameEndIndex); } } return ""; } /** * Extracts request attributes from the given {@code context} and provided key. * * @param context The context containing the entity path. * @param key The name of the attribute that needs to be extracted from the {@code Context}. * @return The value for the provided key contained in the context. */ private String getRequestKeyAttribute(Context context, String key) { final Optional<Object> optionalObject = context.getData(key); if (!optionalObject.isPresent()) { logger.warning("Failed to find {} in the context.", key); return ""; } final Object value = optionalObject.get(); if (!(value instanceof String)) { logger.warning("Could not extract {}. Data is not of type String. Actual class: {}", key, value.getClass()); return ""; } return value.toString(); } /** * Extracts a {@link Span} from the given {@code context}. * * @param context The context containing the span. * @return The {@link Span} contained in the context, and {@code null} if it does not. */ /** * Extracts the span name from the given {@code context}. * * @param context The context containing the span name. * @return The span name contained in the context, and {@code null} if it does not. */ private String getSpanName(Context context) { final Optional<Object> spanNameOptional = context.getData(USER_SPAN_NAME_KEY); if (!spanNameOptional.isPresent()) { logger.warning("Failed to find span name in the context."); return null; } final Object value = spanNameOptional.get(); if (!(value instanceof String)) { logger.warning("Could not extract span name. Data in {} is not of type String. Actual class: {}", USER_SPAN_NAME_KEY, value.getClass()); return null; } return value.toString(); } /** * Extracts a {@link SpanContext} from the given {@code context}. * * @param context The context containing the span context. * @return The {@link SpanContext} contained in the context, and {@code null} if it does not. */ private SpanContext getSpanContext(Context context) { final Optional<Object> spanContextOptional = context.getData(SPAN_CONTEXT_KEY); if (!spanContextOptional.isPresent()) { logger.warning("Failed to find span context in the context."); return null; } final Object value = spanContextOptional.get(); if (!(value instanceof SpanContext)) { logger.warning("Could not extract span context. Data is not of type SpanContext. Actual class: {}", value.getClass()); return null; } return (SpanContext) value; } /** * Returns a {@link Builder} to create and start a new child {@link Span} with parent being * the designated {@code Span}. * * @param spanName The name of the returned Span. * @param context The context containing the span and the span name. * @return A {@code Span.Builder} to create and start a new {@code Span}. */ private Builder getSpanBuilder(String spanName, Context context) { Span parentSpan = getSpan(context); String spanNameKey = getSpanName(context); if (spanNameKey == null) { spanNameKey = spanName; } if (parentSpan == null) { parentSpan = TRACER.getCurrentSpan(); } return TRACER.spanBuilder(spanNameKey).setParent(parentSpan); } }
class OpenTelemetryTracer implements com.azure.core.util.tracing.Tracer { private static final Tracer TRACER = OpenTelemetry.getTracerFactory().get("Azure-OpenTelemetry"); static final String COMPONENT = "component"; static final String MESSAGE_BUS_DESTINATION = "message_bus.destination"; static final String PEER_ENDPOINT = "peer.address"; private final ClientLogger logger = new ClientLogger(OpenTelemetryTracer.class); /** * {@inheritDoc} */ @Override public Context start(String spanName, Context context) { Objects.requireNonNull(spanName, "'spanName' cannot be null."); Objects.requireNonNull(context, "'context' cannot be null."); Builder spanBuilder = getSpanBuilder(spanName, context); Span span = spanBuilder.startSpan(); return context.addData(PARENT_SPAN_KEY, span); } /** * {@inheritDoc} */ @Override public Context start(String spanName, Context context, ProcessKind processKind) { Objects.requireNonNull(spanName, "'spanName' cannot be null."); Objects.requireNonNull(context, "'context' cannot be null."); Objects.requireNonNull(processKind, "'processKind' cannot be null."); Span span; Builder spanBuilder; switch (processKind) { case SEND: spanBuilder = getSpanBuilder(spanName, context); span = spanBuilder.setSpanKind(Span.Kind.PRODUCER).startSpan(); if (span.isRecording()) { addSpanRequestAttributes(span, context, spanName); } return context.addData(PARENT_SPAN_KEY, span); case MESSAGE: spanBuilder = getSpanBuilder(spanName, context); span = spanBuilder.startSpan(); context = setContextData(span); return context.addData(PARENT_SPAN_KEY, span); case PROCESS: return startScopedSpan(spanName, context); default: return Context.NONE; } } /** * {@inheritDoc} */ @Override public void end(int responseCode, Throwable throwable, Context context) { Objects.requireNonNull(context, "'context' cannot be null."); final Span span = getSpan(context); if (span == null) { return; } if (span.isRecording()) { span.setStatus(HttpTraceUtil.parseResponseStatus(responseCode, throwable)); } span.end(); } /** * {@inheritDoc} */ @Override public void setAttribute(String key, String value, Context context) { if (CoreUtils.isNullOrEmpty(value)) { logger.warning("Failed to set span attribute since value is null or empty."); return; } final Span span = getSpan(context); if (span != null) { span.setAttribute(key, AttributeValue.stringAttributeValue(value)); } else { logger.warning("Failed to find span to add attribute."); } } /** * {@inheritDoc} */ @Override public Context setSpanName(String spanName, Context context) { return context.addData(USER_SPAN_NAME_KEY, spanName); } /** * {@inheritDoc} */ @Override public void end(String statusMessage, Throwable throwable, Context context) { final Span span = getSpan(context); if (span == null) { logger.warning("Failed to find span to end it."); return; } if (span.isRecording()) { span.setStatus(AmqpTraceUtil.parseStatusMessage(statusMessage, throwable)); } span.end(); } /** * {@inheritDoc} */ @Override public void addLink(Context context) { final Builder spanBuilder = getSpanBuilder(PARENT_SPAN_KEY, context); final SpanContext spanContext = getSpanContext(context); if (spanContext == null) { logger.warning("Failed to find span context to link it."); return; } spanBuilder.addLink(spanContext); } /** * {@inheritDoc} */ @Override public Context extractContext(String diagnosticId, Context context) { return AmqpPropagationFormatUtil.extractContext(diagnosticId, context); } /** * Starts a new child {@link Span} with parent being the remote and uses the {@link * Span} is in the current Context, to return an object that represents that scope. * <p>The scope is exited when the returned object is closed.</p> * * @param spanName The name of the returned Span. * @param context The {@link com.azure.core.util.Context} containing the {@link * SpanContext}. * @return The returned {@link Span} and the scope in a {@link com.azure.core.util.Context} * object. */ private Context startScopedSpan(String spanName, Context context) { Objects.requireNonNull(context, "'context' cannot be null."); Span span; SpanContext spanContext = getSpanContext(context); if (spanContext != null) { span = startSpanWithRemoteParent(spanName, spanContext); } else { Builder spanBuilder = getSpanBuilder(spanName, context); span = spanBuilder.setSpanKind(Span.Kind.SERVER).startSpan(); } return context.addData(PARENT_SPAN_KEY, span).addData("scope", TRACER.withSpan(span)); } /** * Creates a {@link Builder} to create and start a new child {@link Span} with parent being the remote and * designated by the {@link SpanContext}. * * @param spanName The name of the returned Span. * @param spanContext The remote parent context of the returned Span. * @return A {@link Span} with parent being the remote {@link Span} * designated by the {@link SpanContext}. */ private static Span startSpanWithRemoteParent(String spanName, SpanContext spanContext) { Builder spanBuilder = TRACER.spanBuilder(spanName).setParent(spanContext); spanBuilder.setSpanKind(Span.Kind.SERVER); return spanBuilder.startSpan(); } /** * Extracts the {@link SpanContext trace identifiers} and the {@link * SpanContext} of the current tracing span as text and returns in a {@link * com.azure.core.util.Context} object. * * @param span The current tracing span. * @return The {@link com.azure.core.util.Context} containing the {@link SpanContext} and * trace-parent of the current span. */ private static Context setContextData(Span span) { SpanContext spanContext = span.getContext(); final String traceparent = AmqpPropagationFormatUtil.getDiagnosticId(spanContext); return new Context(DIAGNOSTIC_ID_KEY, traceparent).addData(SPAN_CONTEXT_KEY, spanContext); } /** * Extracts request attributes from the given {@code context} and adds it to the started span. * * @param span The span to which request attributes are to be added. * @param context The context containing the request attributes. * @param spanName The name of the returned Span containing the component value. */ private void addSpanRequestAttributes(Span span, Context context, String spanName) { Objects.requireNonNull(span, "'span' cannot be null."); span.setAttribute(COMPONENT, AttributeValue.stringAttributeValue(parseComponentValue(spanName))); span.setAttribute( MESSAGE_BUS_DESTINATION, AttributeValue.stringAttributeValue(getRequestKeyAttribute(context, ENTITY_PATH_KEY))); span.setAttribute( PEER_ENDPOINT, AttributeValue.stringAttributeValue(getRequestKeyAttribute(context, HOST_NAME_KEY))); } /** * Extracts the component name from the given span name. * * @param spanName The spanName containing the component name i.e spanName = "Azure.eventhubs.send" * @return The component name contained in the context i.e "eventhubs" */ private static String parseComponentValue(String spanName) { if (spanName != null && !spanName.isEmpty()) { int componentNameStartIndex = spanName.indexOf("."); int componentNameEndIndex = spanName.lastIndexOf("."); if (componentNameStartIndex != -1 && componentNameEndIndex != -1) { return spanName.substring(componentNameStartIndex + 1, componentNameEndIndex); } } return ""; } /** * Extracts request attributes from the given {@code context} and provided key. * * @param context The context containing the specified attribute key. * @param key The name of the attribute that needs to be extracted from the {@code Context}. * @return The value for the provided key contained in the context. */ private String getRequestKeyAttribute(Context context, String key) { final Object value = getOptionalObject(context, key); if (!(value instanceof String)) { logger.warning("Could not extract {}. Data in context for key {} is not of type String.", key); return ""; } return value.toString(); } /** * Extracts a {@link Span} from the given {@code context}. * * @param context The context containing the span. * @return The {@link Span} contained in the context, and {@code null} if it does not. */ /** * Extracts the span name from the given {@code context}. * * @param context The context containing the span name. * @return The span name contained in the context, and {@code null} if it does not. */ private String getSpanName(Context context) { final Object value = getOptionalObject(context, USER_SPAN_NAME_KEY); if (!(value instanceof String)) { logger.warning("Could not extract span name. Data in context for key {} is not of type String.", USER_SPAN_NAME_KEY); return null; } return value.toString(); } /** * Extracts a {@link SpanContext} from the given {@code context}. * * @param context The context containing the span context. * @return The {@link SpanContext} contained in the context, and {@code null} if it does not. */ private SpanContext getSpanContext(Context context) { final Object value = getOptionalObject(context, SPAN_CONTEXT_KEY); if (!(value instanceof SpanContext)) { logger.warning("Could not extract span context. Data is in context for key {} not of type SpanContext.", SPAN_CONTEXT_KEY); return null; } return (SpanContext) value; } /** * Returns a {@link Builder} to create and start a new child {@link Span} with parent being * the designated {@code Span}. * * @param spanName The name of the returned Span. * @param context The context containing the span and the span name. * @return A {@code Span.Builder} to create and start a new {@code Span}. */ private Builder getSpanBuilder(String spanName, Context context) { Span parentSpan = getSpan(context); String spanNameKey = getSpanName(context); if (spanNameKey == null) { spanNameKey = spanName; } if (parentSpan == null) { parentSpan = TRACER.getCurrentSpan(); } return TRACER.spanBuilder(spanNameKey).setParent(parentSpan); } /** * Returns the value of the specified key from the context. * * @param context The context containing the specified key. * @param key The name of the attribute that needs to be extracted from the {@code Context}. * @return The value for the provided key contained in the context. */ private Object getOptionalObject(Context context, String key) { final Optional<Object> optionalObject = context.getData(key); if (!optionalObject.isPresent()) { logger.warning("Failed to find {} in the context.", key); return null; } return optionalObject.get(); } }
updated to use warn
public void setAttribute(String key, String value, Context context) { if (ImplUtils.isNullOrEmpty(value)) { logger.info("Failed to set span attribute since value is null or empty."); return; } final Span span = getSpan(context); if (span != null) { span.setAttribute(key, AttributeValue.stringAttributeValue(value)); } else { logger.warning("Failed to find span to add attribute."); } }
logger.info("Failed to set span attribute since value is null or empty.");
public void setAttribute(String key, String value, Context context) { if (CoreUtils.isNullOrEmpty(value)) { logger.warning("Failed to set span attribute since value is null or empty."); return; } final Span span = getSpan(context); if (span != null) { span.setAttribute(key, AttributeValue.stringAttributeValue(value)); } else { logger.warning("Failed to find span to add attribute."); } }
class OpenTelemetryTracer implements com.azure.core.util.tracing.Tracer { private static final Tracer TRACER = OpenTelemetry.getTracerFactory().get("Azure-OpenTelemetry"); static final String COMPONENT = "component"; static final String MESSAGE_BUS_DESTINATION = "message_bus.destination"; static final String PEER_ENDPOINT = "peer.address"; private final ClientLogger logger = new ClientLogger(OpenTelemetryTracer.class); /** * {@inheritDoc} */ @Override public Context start(String spanName, Context context) { Objects.requireNonNull(spanName, "'spanName' cannot be null."); Objects.requireNonNull(context, "'context' cannot be null."); Builder spanBuilder = getSpanBuilder(spanName, context); Span span = spanBuilder.startSpan(); return context.addData(PARENT_SPAN_KEY, span); } /** * {@inheritDoc} */ @Override public Context start(String spanName, Context context, ProcessKind processKind) { Objects.requireNonNull(spanName, "'spanName' cannot be null."); Objects.requireNonNull(context, "'context' cannot be null."); Objects.requireNonNull(processKind, "'processKind' cannot be null."); Span span; Builder spanBuilder; switch (processKind) { case SEND: spanBuilder = getSpanBuilder(spanName, context); span = spanBuilder.setSpanKind(Span.Kind.PRODUCER).startSpan(); if (span.isRecording()) { addSpanRequestAttributes(span, context, spanName); } return context.addData(PARENT_SPAN_KEY, span); case MESSAGE: spanBuilder = getSpanBuilder(spanName, context); span = spanBuilder.startSpan(); context = setContextData(span); return context.addData(PARENT_SPAN_KEY, span); case PROCESS: return startScopedSpan(spanName, context); default: return Context.NONE; } } /** * {@inheritDoc} */ @Override public void end(int responseCode, Throwable throwable, Context context) { Objects.requireNonNull(context, "'context' cannot be null."); final Span span = getSpan(context); if (span == null) { return; } if (span.isRecording()) { span.setStatus(HttpTraceUtil.parseResponseStatus(responseCode, throwable)); } span.end(); } /** * {@inheritDoc} */ @Override /** * {@inheritDoc} */ @Override public Context setSpanName(String spanName, Context context) { return context.addData(USER_SPAN_NAME_KEY, spanName); } /** * {@inheritDoc} */ @Override public void end(String statusMessage, Throwable throwable, Context context) { final Span span = getSpan(context); if (span == null) { logger.warning("Failed to find span to end it."); return; } if (span.isRecording()) { span.setStatus(AmqpTraceUtil.parseStatusMessage(statusMessage, throwable)); } span.end(); } /** * {@inheritDoc} */ @Override public void addLink(Context context) { final Builder spanBuilder = getSpanBuilder(PARENT_SPAN_KEY, context); final SpanContext spanContext = getSpanContext(context); if (spanContext == null) { logger.warning("Failed to find span context to link it."); return; } spanBuilder.addLink(spanContext); spanBuilder.startSpan(); } /** * {@inheritDoc} */ @Override public Context extractContext(String diagnosticId, Context context) { return AmqpPropagationFormatUtil.extractContext(diagnosticId, context); } /** * Starts a new child {@link Span} with parent being the remote and uses the {@link * Span} is in the current Context, to return an object that represents that scope. * <p>The scope is exited when the returned object is closed.</p> * * @param spanName The name of the returned Span. * @param context The {@link com.azure.core.util.Context} containing the {@link * SpanContext}. * @return The returned {@link Span} and the scope in a {@link com.azure.core.util.Context} * object. */ private Context startScopedSpan(String spanName, Context context) { Objects.requireNonNull(context, "'context' cannot be null."); Span span; SpanContext spanContext = getSpanContext(context); if (spanContext != null) { span = startSpanWithRemoteParent(spanName, spanContext); } else { Builder spanBuilder = getSpanBuilder(spanName, context); span = spanBuilder.setSpanKind(Span.Kind.SERVER).startSpan(); } return context.addData(PARENT_SPAN_KEY, span).addData("scope", TRACER.withSpan(span)); } /** * Creates a {@link Builder} to create and start a new child {@link Span} with parent being the remote and * designated by the {@link SpanContext}. * * @param spanName The name of the returned Span. * @param spanContext The remote parent context of the returned Span. * @return A {@link Span} with parent being the remote {@link Span} * designated by the {@link SpanContext}. */ private static Span startSpanWithRemoteParent(String spanName, SpanContext spanContext) { Builder spanBuilder = TRACER.spanBuilder(spanName).setParent(spanContext); spanBuilder.setSpanKind(Span.Kind.SERVER); return spanBuilder.startSpan(); } /** * Extracts the {@link SpanContext trace identifiers} and the {@link * SpanContext} of the current tracing span as text and returns in a {@link * com.azure.core.util.Context} object. * * @param span The current tracing span. * @return The {@link com.azure.core.util.Context} containing the {@link SpanContext} and * trace-parent of the current span. */ private static Context setContextData(Span span) { SpanContext spanContext = span.getContext(); final String traceparent = AmqpPropagationFormatUtil.getDiagnosticId(spanContext); return new Context(DIAGNOSTIC_ID_KEY, traceparent).addData(SPAN_CONTEXT_KEY, spanContext); } /** * Extracts request attributes from the given {@code context} and adds it to the started span. * * @param span The span to which request attributes are to be added. * @param context The context containing the request attributes. * @param spanName The name of the returned Span containing the component value. */ private void addSpanRequestAttributes(Span span, Context context, String spanName) { Objects.requireNonNull(span, "'span' cannot be null."); span.setAttribute(COMPONENT, AttributeValue.stringAttributeValue(parseComponentValue(spanName))); span.setAttribute( MESSAGE_BUS_DESTINATION, AttributeValue.stringAttributeValue(getRequestKeyAttribute(context, ENTITY_PATH_KEY))); span.setAttribute( PEER_ENDPOINT, AttributeValue.stringAttributeValue(getRequestKeyAttribute(context, HOST_NAME_KEY))); } /** * Extracts the component name from the given span name. * * @param spanName The spanName containing the component name. * @return The component name contained in the context. */ private static String parseComponentValue(String spanName) { if (spanName != null && !spanName.isEmpty()) { int componentNameStartIndex = spanName.indexOf("."); int componentNameEndIndex = spanName.lastIndexOf("."); if (componentNameStartIndex != -1 && componentNameEndIndex != -1) { return spanName.substring(componentNameStartIndex + 1, componentNameEndIndex); } } return ""; } /** * Extracts request attributes from the given {@code context} and provided key. * * @param context The context containing the entity path. * @param key The name of the attribute that needs to be extracted from the {@code Context}. * @return The value for the provided key contained in the context. */ private String getRequestKeyAttribute(Context context, String key) { final Optional<Object> optionalObject = context.getData(key); if (!optionalObject.isPresent()) { logger.warning("Failed to find {} in the context.", key); return ""; } final Object value = optionalObject.get(); if (!(value instanceof String)) { logger.warning("Could not extract {}. Data is not of type String. Actual class: {}", key, value.getClass()); return ""; } return value.toString(); } /** * Extracts a {@link Span} from the given {@code context}. * * @param context The context containing the span. * @return The {@link Span} contained in the context, and {@code null} if it does not. */ private Span getSpan(Context context) { final Optional<Object> spanOptional = context.getData(PARENT_SPAN_KEY); if (!spanOptional.isPresent()) { logger.warning("Failed to find span in the context."); return null; } final Object value = spanOptional.get(); if (!(value instanceof Span)) { logger.warning("Could not extract span. Data in {} is not of type Span. Actual class: {}", PARENT_SPAN_KEY, value.getClass()); return null; } return (Span) value; } /** * Extracts the span name from the given {@code context}. * * @param context The context containing the span name. * @return The span name contained in the context, and {@code null} if it does not. */ private String getSpanName(Context context) { final Optional<Object> spanNameOptional = context.getData(USER_SPAN_NAME_KEY); if (!spanNameOptional.isPresent()) { logger.warning("Failed to find span name in the context."); return null; } final Object value = spanNameOptional.get(); if (!(value instanceof String)) { logger.warning("Could not extract span name. Data in {} is not of type String. Actual class: {}", USER_SPAN_NAME_KEY, value.getClass()); return null; } return value.toString(); } /** * Extracts a {@link SpanContext} from the given {@code context}. * * @param context The context containing the span context. * @return The {@link SpanContext} contained in the context, and {@code null} if it does not. */ private SpanContext getSpanContext(Context context) { final Optional<Object> spanContextOptional = context.getData(SPAN_CONTEXT_KEY); if (!spanContextOptional.isPresent()) { logger.warning("Failed to find span context in the context."); return null; } final Object value = spanContextOptional.get(); if (!(value instanceof SpanContext)) { logger.warning("Could not extract span context. Data is not of type SpanContext. Actual class: {}", value.getClass()); return null; } return (SpanContext) value; } /** * Returns a {@link Builder} to create and start a new child {@link Span} with parent being * the designated {@code Span}. * * @param spanName The name of the returned Span. * @param context The context containing the span and the span name. * @return A {@code Span.Builder} to create and start a new {@code Span}. */ private Builder getSpanBuilder(String spanName, Context context) { Span parentSpan = getSpan(context); String spanNameKey = getSpanName(context); if (spanNameKey == null) { spanNameKey = spanName; } if (parentSpan == null) { parentSpan = TRACER.getCurrentSpan(); } return TRACER.spanBuilder(spanNameKey).setParent(parentSpan); } }
class OpenTelemetryTracer implements com.azure.core.util.tracing.Tracer { private static final Tracer TRACER = OpenTelemetry.getTracerFactory().get("Azure-OpenTelemetry"); static final String COMPONENT = "component"; static final String MESSAGE_BUS_DESTINATION = "message_bus.destination"; static final String PEER_ENDPOINT = "peer.address"; private final ClientLogger logger = new ClientLogger(OpenTelemetryTracer.class); /** * {@inheritDoc} */ @Override public Context start(String spanName, Context context) { Objects.requireNonNull(spanName, "'spanName' cannot be null."); Objects.requireNonNull(context, "'context' cannot be null."); Builder spanBuilder = getSpanBuilder(spanName, context); Span span = spanBuilder.startSpan(); return context.addData(PARENT_SPAN_KEY, span); } /** * {@inheritDoc} */ @Override public Context start(String spanName, Context context, ProcessKind processKind) { Objects.requireNonNull(spanName, "'spanName' cannot be null."); Objects.requireNonNull(context, "'context' cannot be null."); Objects.requireNonNull(processKind, "'processKind' cannot be null."); Span span; Builder spanBuilder; switch (processKind) { case SEND: spanBuilder = getSpanBuilder(spanName, context); span = spanBuilder.setSpanKind(Span.Kind.PRODUCER).startSpan(); if (span.isRecording()) { addSpanRequestAttributes(span, context, spanName); } return context.addData(PARENT_SPAN_KEY, span); case MESSAGE: spanBuilder = getSpanBuilder(spanName, context); span = spanBuilder.startSpan(); context = setContextData(span); return context.addData(PARENT_SPAN_KEY, span); case PROCESS: return startScopedSpan(spanName, context); default: return Context.NONE; } } /** * {@inheritDoc} */ @Override public void end(int responseCode, Throwable throwable, Context context) { Objects.requireNonNull(context, "'context' cannot be null."); final Span span = getSpan(context); if (span == null) { return; } if (span.isRecording()) { span.setStatus(HttpTraceUtil.parseResponseStatus(responseCode, throwable)); } span.end(); } /** * {@inheritDoc} */ @Override /** * {@inheritDoc} */ @Override public Context setSpanName(String spanName, Context context) { return context.addData(USER_SPAN_NAME_KEY, spanName); } /** * {@inheritDoc} */ @Override public void end(String statusMessage, Throwable throwable, Context context) { final Span span = getSpan(context); if (span == null) { logger.warning("Failed to find span to end it."); return; } if (span.isRecording()) { span.setStatus(AmqpTraceUtil.parseStatusMessage(statusMessage, throwable)); } span.end(); } /** * {@inheritDoc} */ @Override public void addLink(Context context) { final Builder spanBuilder = getSpanBuilder(PARENT_SPAN_KEY, context); final SpanContext spanContext = getSpanContext(context); if (spanContext == null) { logger.warning("Failed to find span context to link it."); return; } spanBuilder.addLink(spanContext); } /** * {@inheritDoc} */ @Override public Context extractContext(String diagnosticId, Context context) { return AmqpPropagationFormatUtil.extractContext(diagnosticId, context); } /** * Starts a new child {@link Span} with parent being the remote and uses the {@link * Span} is in the current Context, to return an object that represents that scope. * <p>The scope is exited when the returned object is closed.</p> * * @param spanName The name of the returned Span. * @param context The {@link com.azure.core.util.Context} containing the {@link * SpanContext}. * @return The returned {@link Span} and the scope in a {@link com.azure.core.util.Context} * object. */ private Context startScopedSpan(String spanName, Context context) { Objects.requireNonNull(context, "'context' cannot be null."); Span span; SpanContext spanContext = getSpanContext(context); if (spanContext != null) { span = startSpanWithRemoteParent(spanName, spanContext); } else { Builder spanBuilder = getSpanBuilder(spanName, context); span = spanBuilder.setSpanKind(Span.Kind.SERVER).startSpan(); } return context.addData(PARENT_SPAN_KEY, span).addData("scope", TRACER.withSpan(span)); } /** * Creates a {@link Builder} to create and start a new child {@link Span} with parent being the remote and * designated by the {@link SpanContext}. * * @param spanName The name of the returned Span. * @param spanContext The remote parent context of the returned Span. * @return A {@link Span} with parent being the remote {@link Span} * designated by the {@link SpanContext}. */ private static Span startSpanWithRemoteParent(String spanName, SpanContext spanContext) { Builder spanBuilder = TRACER.spanBuilder(spanName).setParent(spanContext); spanBuilder.setSpanKind(Span.Kind.SERVER); return spanBuilder.startSpan(); } /** * Extracts the {@link SpanContext trace identifiers} and the {@link * SpanContext} of the current tracing span as text and returns in a {@link * com.azure.core.util.Context} object. * * @param span The current tracing span. * @return The {@link com.azure.core.util.Context} containing the {@link SpanContext} and * trace-parent of the current span. */ private static Context setContextData(Span span) { SpanContext spanContext = span.getContext(); final String traceparent = AmqpPropagationFormatUtil.getDiagnosticId(spanContext); return new Context(DIAGNOSTIC_ID_KEY, traceparent).addData(SPAN_CONTEXT_KEY, spanContext); } /** * Extracts request attributes from the given {@code context} and adds it to the started span. * * @param span The span to which request attributes are to be added. * @param context The context containing the request attributes. * @param spanName The name of the returned Span containing the component value. */ private void addSpanRequestAttributes(Span span, Context context, String spanName) { Objects.requireNonNull(span, "'span' cannot be null."); span.setAttribute(COMPONENT, AttributeValue.stringAttributeValue(parseComponentValue(spanName))); span.setAttribute( MESSAGE_BUS_DESTINATION, AttributeValue.stringAttributeValue(getRequestKeyAttribute(context, ENTITY_PATH_KEY))); span.setAttribute( PEER_ENDPOINT, AttributeValue.stringAttributeValue(getRequestKeyAttribute(context, HOST_NAME_KEY))); } /** * Extracts the component name from the given span name. * * @param spanName The spanName containing the component name i.e spanName = "Azure.eventhubs.send" * @return The component name contained in the context i.e "eventhubs" */ private static String parseComponentValue(String spanName) { if (spanName != null && !spanName.isEmpty()) { int componentNameStartIndex = spanName.indexOf("."); int componentNameEndIndex = spanName.lastIndexOf("."); if (componentNameStartIndex != -1 && componentNameEndIndex != -1) { return spanName.substring(componentNameStartIndex + 1, componentNameEndIndex); } } return ""; } /** * Extracts request attributes from the given {@code context} and provided key. * * @param context The context containing the specified attribute key. * @param key The name of the attribute that needs to be extracted from the {@code Context}. * @return The value for the provided key contained in the context. */ private String getRequestKeyAttribute(Context context, String key) { final Object value = getOptionalObject(context, key); if (!(value instanceof String)) { logger.warning("Could not extract {}. Data in context for key {} is not of type String.", key); return ""; } return value.toString(); } /** * Extracts a {@link Span} from the given {@code context}. * * @param context The context containing the span. * @return The {@link Span} contained in the context, and {@code null} if it does not. */ private Span getSpan(Context context) { final Object value = getOptionalObject(context, PARENT_SPAN_KEY); if (!(value instanceof Span)) { logger.warning("Could not extract span. Data in context for key {} is not of type Span.", PARENT_SPAN_KEY); return null; } return (Span) value; } /** * Extracts the span name from the given {@code context}. * * @param context The context containing the span name. * @return The span name contained in the context, and {@code null} if it does not. */ private String getSpanName(Context context) { final Object value = getOptionalObject(context, USER_SPAN_NAME_KEY); if (!(value instanceof String)) { logger.warning("Could not extract span name. Data in context for key {} is not of type String.", USER_SPAN_NAME_KEY); return null; } return value.toString(); } /** * Extracts a {@link SpanContext} from the given {@code context}. * * @param context The context containing the span context. * @return The {@link SpanContext} contained in the context, and {@code null} if it does not. */ private SpanContext getSpanContext(Context context) { final Object value = getOptionalObject(context, SPAN_CONTEXT_KEY); if (!(value instanceof SpanContext)) { logger.warning("Could not extract span context. Data is in context for key {} not of type SpanContext.", SPAN_CONTEXT_KEY); return null; } return (SpanContext) value; } /** * Returns a {@link Builder} to create and start a new child {@link Span} with parent being * the designated {@code Span}. * * @param spanName The name of the returned Span. * @param context The context containing the span and the span name. * @return A {@code Span.Builder} to create and start a new {@code Span}. */ private Builder getSpanBuilder(String spanName, Context context) { Span parentSpan = getSpan(context); String spanNameKey = getSpanName(context); if (spanNameKey == null) { spanNameKey = spanName; } if (parentSpan == null) { parentSpan = TRACER.getCurrentSpan(); } return TRACER.spanBuilder(spanNameKey).setParent(parentSpan); } /** * Returns the value of the specified key from the context. * * @param context The context containing the specified key. * @param key The name of the attribute that needs to be extracted from the {@code Context}. * @return The value for the provided key contained in the context. */ private Object getOptionalObject(Context context, String key) { final Optional<Object> optionalObject = context.getData(key); if (!optionalObject.isPresent()) { logger.warning("Failed to find {} in the context.", key); return null; } return optionalObject.get(); } }
Is there a need for this?
public String getVaultUrl() { return vaultUrl; }
}
public String getVaultUrl() { return vaultUrl; }
class CertificateAsyncClient { static final String API_VERSION = "7.0"; static final String ACCEPT_LANGUAGE = "en-US"; static final int DEFAULT_MAX_PAGE_RESULTS = 25; static final String CONTENT_TYPE_HEADER_VALUE = "application/json"; private final String vaultUrl; private final CertificateService service; private final ClientLogger logger = new ClientLogger(CertificateAsyncClient.class); /** * Creates a CertificateAsyncClient that uses {@code pipeline} to service requests * * @param vaultUrl URL for the Azure KeyVault service. * @param pipeline HttpPipeline that the HTTP requests and responses flow through. * @param version {@link CertificateServiceVersion} of the service to be used when making requests. */ CertificateAsyncClient(URL vaultUrl, HttpPipeline pipeline, CertificateServiceVersion version) { Objects.requireNonNull(vaultUrl, KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.VAULT_END_POINT_REQUIRED)); this.vaultUrl = vaultUrl.toString(); this.service = RestProxy.create(CertificateService.class, pipeline); } /** * Get the vault endpoint url to which service requests are sent to. * @return the vault endpoint url */ /** * Creates a new certificate. If this is the first version, the certificate resource is created. This operation requires * the certificates/create permission. * * <p><strong>Code Samples</strong></p> * <p>Create certificate is a long running operation. The {@link PollerFlux poller} allows users to automatically poll on the create certificate * operation status. It is possible to monitor each intermediate poll response during the poll operation.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.createCertificate * * @param name The name of the certificate to be created. * @param policy The policy of the certificate to be created. * @param enabled The enabled status for the certificate. * @param tags The application specific metadata to set. * @throws ResourceModifiedException when invalid certificate policy configuration is provided. * @return A {@link PollerFlux} polling on the create certificate operation status. */ public PollerFlux<CertificateOperation, KeyVaultCertificate> beginCreateCertificate(String name, CertificatePolicy policy, boolean enabled, Map<String, String> tags) { return new PollerFlux<>(Duration.ofSeconds(1), activationOperation(name, policy, enabled, tags), createPollOperation(name), cancelOperation(name), fetchResultOperation(name)); } private BiFunction<PollingContext<CertificateOperation>, PollResponse<CertificateOperation>, Mono<CertificateOperation>> cancelOperation(String name) { return (pollingContext, firstResponse) -> withContext(context -> cancelCertificateOperationWithResponse(name, context)).flatMap(FluxUtil::toMono); } private Function<PollingContext<CertificateOperation>, Mono<CertificateOperation>> activationOperation(String name, CertificatePolicy policy, boolean enabled, Map<String, String> tags) { return (pollingContext) -> withContext(context -> createCertificateWithResponse(name, policy, enabled, tags, context) .flatMap(certificateOperationResponse -> Mono.just(certificateOperationResponse.getValue()))); } private Function<PollingContext<CertificateOperation>, Mono<KeyVaultCertificate>> fetchResultOperation(String name) { return (pollingContext) -> withContext(context -> getCertificateWithResponse(name, "", context) .flatMap(certificateResponse -> Mono.just(certificateResponse.getValue()))); } /** * Creates a new certificate. If this is the first version, the certificate resource is created. This operation requires * the certificates/create permission. * * <p><strong>Code Samples</strong></p> * <p>Create certificate is a long running operation. The {@link PollerFlux poller} allows users to automatically poll on the create certificate * operation status. It is possible to monitor each intermediate poll response during the poll operation.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.createCertificate * * @param name The name of the certificate to be created. * @param policy The policy of the certificate to be created. * @throws ResourceModifiedException when invalid certificate policy configuration is provided. * @return A {@link PollerFlux} polling on the create certificate operation status. */ public PollerFlux<CertificateOperation, KeyVaultCertificate> beginCreateCertificate(String name, CertificatePolicy policy) { return beginCreateCertificate(name, policy, true, null); } /* Polling operation to poll on create certificate operation status. */ private Function<PollingContext<CertificateOperation>, Mono<PollResponse<CertificateOperation>>> createPollOperation(String certificateName) { return (pollingContext) -> { try { return withContext(context -> service.getCertificateOperation(vaultUrl, certificateName, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .flatMap(this::processCertificateOperationResponse)); } catch (HttpRequestException e) { logger.logExceptionAsError(e); return Mono.just(new PollResponse<>(LongRunningOperationStatus.FAILED, null)); } }; } private Mono<PollResponse<CertificateOperation>> processCertificateOperationResponse(Response<CertificateOperation> certificateOperationResponse) { LongRunningOperationStatus status = null; switch (certificateOperationResponse.getValue().getStatus()) { case "inProgress": status = LongRunningOperationStatus.IN_PROGRESS; break; case "completed": status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; break; case "failed": status = LongRunningOperationStatus.FAILED; break; default: break; } return Mono.just(new PollResponse<>(status, certificateOperationResponse.getValue())); } Mono<Response<CertificateOperation>> createCertificateWithResponse(String name, CertificatePolicy certificatePolicy, boolean enabled, Map<String, String> tags, Context context) { CertificateRequestParameters certificateRequestParameters = new CertificateRequestParameters() .certificatePolicy(new CertificatePolicyRequest(certificatePolicy)) .certificateAttributes(new CertificateRequestAttributes().enabled(enabled)) .tags(tags); return service.createCertificate(vaultUrl, name, API_VERSION, ACCEPT_LANGUAGE, certificateRequestParameters, CONTENT_TYPE_HEADER_VALUE, context); } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificate * * @param name The name of the certificate to retrieve, cannot be null * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing the requested {@link KeyVaultCertificateWithPolicy certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<KeyVaultCertificateWithPolicy> getCertificate(String name) { try { return withContext(context -> getCertificateWithResponse(name, "", context)).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificateWithResponse * * @param name The name of the certificate to retrieve, cannot be null * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<KeyVaultCertificateWithPolicy>> getCertificateWithResponse(String name) { try { return withContext(context -> getCertificateWithResponse(name, "", context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<KeyVaultCertificateWithPolicy>> getCertificateWithResponse(String name, String version, Context context) { return service.getCertificateWithPolicy(vaultUrl, name, version, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Retrieving certificate - {}", name)) .doOnSuccess(response -> logger.info("Retrieved the certificate - {}", response.getValue().getProperties().getName())) .doOnError(error -> logger.warning("Failed to Retrieve the certificate - {}", name, error)); } Mono<Response<KeyVaultCertificate>> getCertificateVersionWithResponse(String name, String version, Context context) { return service.getCertificate(vaultUrl, name, version, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Retrieving certificate - {}", name)) .doOnSuccess(response -> logger.info("Retrieved the certificate - {}", response.getValue().getProperties().getName())) .doOnError(error -> logger.warning("Failed to Retrieve the certificate - {}", name, error)); } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificateVersionWithResponse * * @param name The name of the certificate to retrieve, cannot be null * @param version The version of the certificate to retrieve. If this is an empty String or null then latest version of the certificate is retrieved. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<KeyVaultCertificate>> getCertificateVersionWithResponse(String name, String version) { try { return withContext(context -> getCertificateVersionWithResponse(name, version == null ? "" : version, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets information about the specified version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificateVersion * * @param name The name of the certificate to retrieve, cannot be null * @param version The version of the certificate to retrieve. If this is an empty String or null then latest version of the certificate is retrieved. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing the requested {@link KeyVaultCertificate certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<KeyVaultCertificate> getCertificateVersion(String name, String version) { try { return withContext(context -> getCertificateVersionWithResponse(name, version == null ? "" : version, context)).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Updates the specified attributes associated with the specified certificate. The update operation changes specified attributes of an existing * stored certificate and attributes that are not specified in the request are left unchanged. This operation requires the certificates/update permission. * * <p><strong>Code Samples</strong></p> * <p>Gets latest version of the certificate, changes its tags and enabled status and then updates it in the Azure Key Vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.updateCertificateProperties * * @param certificateProperties The {@link CertificateProperties} object with updated properties. * @throws NullPointerException if {@code certificate} is {@code null}. * @throws ResourceNotFoundException when a certificate with {@link CertificateProperties * @throws HttpRequestException if {@link CertificateProperties * @return A {@link Mono} containing the {@link CertificateProperties updated certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<KeyVaultCertificate> updateCertificateProperties(CertificateProperties certificateProperties) { try { return withContext(context -> updateCertificatePropertiesWithResponse(certificateProperties, context)).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Updates the specified attributes associated with the specified certificate. The update operation changes specified attributes of an existing * stored certificate and attributes that are not specified in the request are left unchanged. This operation requires the certificates/update permission. * * <p><strong>Code Samples</strong></p> * <p>Gets latest version of the certificate, changes its enabled status and then updates it in the Azure Key Vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.updateCertificatePropertiesWithResponse * * @param certificateProperties The {@link CertificateProperties} object with updated properties. * @throws NullPointerException if {@code certificate} is {@code null}. * @throws ResourceNotFoundException when a certificate with {@link CertificateProperties * @throws HttpRequestException if {@link CertificateProperties * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<KeyVaultCertificate>> updateCertificatePropertiesWithResponse(CertificateProperties certificateProperties) { try { return withContext(context -> updateCertificatePropertiesWithResponse(certificateProperties, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<KeyVaultCertificate>> updateCertificatePropertiesWithResponse(CertificateProperties certificateProperties, Context context) { Objects.requireNonNull(certificateProperties, "certificateProperties' cannot be null."); CertificateUpdateParameters parameters = new CertificateUpdateParameters() .tags(certificateProperties.getTags()) .certificateAttributes(new CertificateRequestAttributes(certificateProperties)); return service.updateCertificate(vaultUrl, certificateProperties.getName(), API_VERSION, ACCEPT_LANGUAGE, parameters, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Updating certificate - {}", certificateProperties.getName())) .doOnSuccess(response -> logger.info("Updated the certificate - {}", certificateProperties.getName())) .doOnError(error -> logger.warning("Failed to update the certificate - {}", certificateProperties.getName(), error)); } /** * Gets information about the certificate which represents the {@link CertificateProperties} from the key vault. This * operation requires the certificates/get permission. * * <p>The list operations {@link CertificateAsyncClient * the {@link Flux} containing {@link CertificateProperties} as output excluding the properties like secretId and keyId of the certificate. * This operation can then be used to get the full certificate with its properties excluding the policy from {@code certificateProperties}.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificate * * @param certificateProperties The {@link CertificateProperties} holding attributes of the certificate being requested. * @throws ResourceNotFoundException when a certificate with {@link CertificateProperties * @throws HttpRequestException if {@link CertificateProperties
class CertificateAsyncClient { static final String API_VERSION = "7.0"; static final String ACCEPT_LANGUAGE = "en-US"; static final int DEFAULT_MAX_PAGE_RESULTS = 25; static final String CONTENT_TYPE_HEADER_VALUE = "application/json"; private final String vaultUrl; private final CertificateService service; private final ClientLogger logger = new ClientLogger(CertificateAsyncClient.class); /** * Creates a CertificateAsyncClient that uses {@code pipeline} to service requests * * @param vaultUrl URL for the Azure KeyVault service. * @param pipeline HttpPipeline that the HTTP requests and responses flow through. * @param version {@link CertificateServiceVersion} of the service to be used when making requests. */ CertificateAsyncClient(URL vaultUrl, HttpPipeline pipeline, CertificateServiceVersion version) { Objects.requireNonNull(vaultUrl, KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.VAULT_END_POINT_REQUIRED)); this.vaultUrl = vaultUrl.toString(); this.service = RestProxy.create(CertificateService.class, pipeline); } /** * Get the vault endpoint url to which service requests are sent to. * @return the vault endpoint url */ /** * Creates a new certificate. If this is the first version, the certificate resource is created. This operation requires * the certificates/create permission. * * <p><strong>Code Samples</strong></p> * <p>Create certificate is a long running operation. The {@link PollerFlux poller} allows users to automatically poll on the create certificate * operation status. It is possible to monitor each intermediate poll response during the poll operation.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.createCertificate * * @param name The name of the certificate to be created. * @param policy The policy of the certificate to be created. * @param enabled The enabled status for the certificate. * @param tags The application specific metadata to set. * @throws ResourceModifiedException when invalid certificate policy configuration is provided. * @return A {@link PollerFlux} polling on the create certificate operation status. */ public PollerFlux<CertificateOperation, KeyVaultCertificate> beginCreateCertificate(String name, CertificatePolicy policy, boolean enabled, Map<String, String> tags) { return new PollerFlux<>(Duration.ofSeconds(1), activationOperation(name, policy, enabled, tags), createPollOperation(name), cancelOperation(name), fetchResultOperation(name)); } private BiFunction<PollingContext<CertificateOperation>, PollResponse<CertificateOperation>, Mono<CertificateOperation>> cancelOperation(String name) { return (pollingContext, firstResponse) -> withContext(context -> cancelCertificateOperationWithResponse(name, context)).flatMap(FluxUtil::toMono); } private Function<PollingContext<CertificateOperation>, Mono<CertificateOperation>> activationOperation(String name, CertificatePolicy policy, boolean enabled, Map<String, String> tags) { return (pollingContext) -> withContext(context -> createCertificateWithResponse(name, policy, enabled, tags, context) .flatMap(certificateOperationResponse -> Mono.just(certificateOperationResponse.getValue()))); } private Function<PollingContext<CertificateOperation>, Mono<KeyVaultCertificate>> fetchResultOperation(String name) { return (pollingContext) -> withContext(context -> getCertificateWithResponse(name, "", context) .flatMap(certificateResponse -> Mono.just(certificateResponse.getValue()))); } /** * Creates a new certificate. If this is the first version, the certificate resource is created. This operation requires * the certificates/create permission. * * <p><strong>Code Samples</strong></p> * <p>Create certificate is a long running operation. The {@link PollerFlux poller} allows users to automatically poll on the create certificate * operation status. It is possible to monitor each intermediate poll response during the poll operation.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.createCertificate * * @param name The name of the certificate to be created. * @param policy The policy of the certificate to be created. * @throws ResourceModifiedException when invalid certificate policy configuration is provided. * @return A {@link PollerFlux} polling on the create certificate operation status. */ public PollerFlux<CertificateOperation, KeyVaultCertificate> beginCreateCertificate(String name, CertificatePolicy policy) { return beginCreateCertificate(name, policy, true, null); } /* Polling operation to poll on create certificate operation status. */ private Function<PollingContext<CertificateOperation>, Mono<PollResponse<CertificateOperation>>> createPollOperation(String certificateName) { return (pollingContext) -> { try { return withContext(context -> service.getCertificateOperation(vaultUrl, certificateName, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .flatMap(this::processCertificateOperationResponse)); } catch (HttpRequestException e) { logger.logExceptionAsError(e); return Mono.just(new PollResponse<>(LongRunningOperationStatus.FAILED, null)); } }; } private Mono<PollResponse<CertificateOperation>> processCertificateOperationResponse(Response<CertificateOperation> certificateOperationResponse) { LongRunningOperationStatus status = null; switch (certificateOperationResponse.getValue().getStatus()) { case "inProgress": status = LongRunningOperationStatus.IN_PROGRESS; break; case "completed": status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; break; case "failed": status = LongRunningOperationStatus.FAILED; break; default: break; } return Mono.just(new PollResponse<>(status, certificateOperationResponse.getValue())); } Mono<Response<CertificateOperation>> createCertificateWithResponse(String name, CertificatePolicy certificatePolicy, boolean enabled, Map<String, String> tags, Context context) { CertificateRequestParameters certificateRequestParameters = new CertificateRequestParameters() .certificatePolicy(new CertificatePolicyRequest(certificatePolicy)) .certificateAttributes(new CertificateRequestAttributes().enabled(enabled)) .tags(tags); return service.createCertificate(vaultUrl, name, API_VERSION, ACCEPT_LANGUAGE, certificateRequestParameters, CONTENT_TYPE_HEADER_VALUE, context); } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificate * * @param name The name of the certificate to retrieve, cannot be null * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing the requested {@link KeyVaultCertificateWithPolicy certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<KeyVaultCertificateWithPolicy> getCertificate(String name) { try { return withContext(context -> getCertificateWithResponse(name, "", context)).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificateWithResponse * * @param name The name of the certificate to retrieve, cannot be null * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<KeyVaultCertificateWithPolicy>> getCertificateWithResponse(String name) { try { return withContext(context -> getCertificateWithResponse(name, "", context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<KeyVaultCertificateWithPolicy>> getCertificateWithResponse(String name, String version, Context context) { return service.getCertificateWithPolicy(vaultUrl, name, version, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Retrieving certificate - {}", name)) .doOnSuccess(response -> logger.info("Retrieved the certificate - {}", response.getValue().getProperties().getName())) .doOnError(error -> logger.warning("Failed to Retrieve the certificate - {}", name, error)); } Mono<Response<KeyVaultCertificate>> getCertificateVersionWithResponse(String name, String version, Context context) { return service.getCertificate(vaultUrl, name, version, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Retrieving certificate - {}", name)) .doOnSuccess(response -> logger.info("Retrieved the certificate - {}", response.getValue().getProperties().getName())) .doOnError(error -> logger.warning("Failed to Retrieve the certificate - {}", name, error)); } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificateVersionWithResponse * * @param name The name of the certificate to retrieve, cannot be null * @param version The version of the certificate to retrieve. If this is an empty String or null then latest version of the certificate is retrieved. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<KeyVaultCertificate>> getCertificateVersionWithResponse(String name, String version) { try { return withContext(context -> getCertificateVersionWithResponse(name, version == null ? "" : version, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets information about the specified version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificateVersion * * @param name The name of the certificate to retrieve, cannot be null * @param version The version of the certificate to retrieve. If this is an empty String or null then latest version of the certificate is retrieved. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing the requested {@link KeyVaultCertificate certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<KeyVaultCertificate> getCertificateVersion(String name, String version) { try { return withContext(context -> getCertificateVersionWithResponse(name, version == null ? "" : version, context)).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Updates the specified attributes associated with the specified certificate. The update operation changes specified attributes of an existing * stored certificate and attributes that are not specified in the request are left unchanged. This operation requires the certificates/update permission. * * <p><strong>Code Samples</strong></p> * <p>Gets latest version of the certificate, changes its tags and enabled status and then updates it in the Azure Key Vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.updateCertificateProperties * * @param certificateProperties The {@link CertificateProperties} object with updated properties. * @throws NullPointerException if {@code certificate} is {@code null}. * @throws ResourceNotFoundException when a certificate with {@link CertificateProperties * @throws HttpRequestException if {@link CertificateProperties * @return A {@link Mono} containing the {@link CertificateProperties updated certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<KeyVaultCertificate> updateCertificateProperties(CertificateProperties certificateProperties) { try { return withContext(context -> updateCertificatePropertiesWithResponse(certificateProperties, context)).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Updates the specified attributes associated with the specified certificate. The update operation changes specified attributes of an existing * stored certificate and attributes that are not specified in the request are left unchanged. This operation requires the certificates/update permission. * * <p><strong>Code Samples</strong></p> * <p>Gets latest version of the certificate, changes its enabled status and then updates it in the Azure Key Vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.updateCertificatePropertiesWithResponse * * @param certificateProperties The {@link CertificateProperties} object with updated properties. * @throws NullPointerException if {@code certificate} is {@code null}. * @throws ResourceNotFoundException when a certificate with {@link CertificateProperties * @throws HttpRequestException if {@link CertificateProperties * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<KeyVaultCertificate>> updateCertificatePropertiesWithResponse(CertificateProperties certificateProperties) { try { return withContext(context -> updateCertificatePropertiesWithResponse(certificateProperties, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<KeyVaultCertificate>> updateCertificatePropertiesWithResponse(CertificateProperties certificateProperties, Context context) { Objects.requireNonNull(certificateProperties, "certificateProperties' cannot be null."); CertificateUpdateParameters parameters = new CertificateUpdateParameters() .tags(certificateProperties.getTags()) .certificateAttributes(new CertificateRequestAttributes(certificateProperties)); return service.updateCertificate(vaultUrl, certificateProperties.getName(), API_VERSION, ACCEPT_LANGUAGE, parameters, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Updating certificate - {}", certificateProperties.getName())) .doOnSuccess(response -> logger.info("Updated the certificate - {}", certificateProperties.getName())) .doOnError(error -> logger.warning("Failed to update the certificate - {}", certificateProperties.getName(), error)); } /** * Gets information about the certificate which represents the {@link CertificateProperties} from the key vault. This * operation requires the certificates/get permission. * * <p>The list operations {@link CertificateAsyncClient * the {@link Flux} containing {@link CertificateProperties} as output excluding the properties like secretId and keyId of the certificate. * This operation can then be used to get the full certificate with its properties excluding the policy from {@code certificateProperties}.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificate * * @param certificateProperties The {@link CertificateProperties} holding attributes of the certificate being requested. * @throws ResourceNotFoundException when a certificate with {@link CertificateProperties * @throws HttpRequestException if {@link CertificateProperties
Once the client is built and locked to an endpoint There is no way to find out which vault url it points to. If user has multiple clients set up, this can help to figure out different vault clients. Plus, this change was standardized across all languages.
public String getVaultUrl() { return vaultUrl; }
}
public String getVaultUrl() { return vaultUrl; }
class CertificateAsyncClient { static final String API_VERSION = "7.0"; static final String ACCEPT_LANGUAGE = "en-US"; static final int DEFAULT_MAX_PAGE_RESULTS = 25; static final String CONTENT_TYPE_HEADER_VALUE = "application/json"; private final String vaultUrl; private final CertificateService service; private final ClientLogger logger = new ClientLogger(CertificateAsyncClient.class); /** * Creates a CertificateAsyncClient that uses {@code pipeline} to service requests * * @param vaultUrl URL for the Azure KeyVault service. * @param pipeline HttpPipeline that the HTTP requests and responses flow through. * @param version {@link CertificateServiceVersion} of the service to be used when making requests. */ CertificateAsyncClient(URL vaultUrl, HttpPipeline pipeline, CertificateServiceVersion version) { Objects.requireNonNull(vaultUrl, KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.VAULT_END_POINT_REQUIRED)); this.vaultUrl = vaultUrl.toString(); this.service = RestProxy.create(CertificateService.class, pipeline); } /** * Get the vault endpoint url to which service requests are sent to. * @return the vault endpoint url */ /** * Creates a new certificate. If this is the first version, the certificate resource is created. This operation requires * the certificates/create permission. * * <p><strong>Code Samples</strong></p> * <p>Create certificate is a long running operation. The {@link PollerFlux poller} allows users to automatically poll on the create certificate * operation status. It is possible to monitor each intermediate poll response during the poll operation.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.createCertificate * * @param name The name of the certificate to be created. * @param policy The policy of the certificate to be created. * @param enabled The enabled status for the certificate. * @param tags The application specific metadata to set. * @throws ResourceModifiedException when invalid certificate policy configuration is provided. * @return A {@link PollerFlux} polling on the create certificate operation status. */ public PollerFlux<CertificateOperation, KeyVaultCertificate> beginCreateCertificate(String name, CertificatePolicy policy, boolean enabled, Map<String, String> tags) { return new PollerFlux<>(Duration.ofSeconds(1), activationOperation(name, policy, enabled, tags), createPollOperation(name), cancelOperation(name), fetchResultOperation(name)); } private BiFunction<PollingContext<CertificateOperation>, PollResponse<CertificateOperation>, Mono<CertificateOperation>> cancelOperation(String name) { return (pollingContext, firstResponse) -> withContext(context -> cancelCertificateOperationWithResponse(name, context)).flatMap(FluxUtil::toMono); } private Function<PollingContext<CertificateOperation>, Mono<CertificateOperation>> activationOperation(String name, CertificatePolicy policy, boolean enabled, Map<String, String> tags) { return (pollingContext) -> withContext(context -> createCertificateWithResponse(name, policy, enabled, tags, context) .flatMap(certificateOperationResponse -> Mono.just(certificateOperationResponse.getValue()))); } private Function<PollingContext<CertificateOperation>, Mono<KeyVaultCertificate>> fetchResultOperation(String name) { return (pollingContext) -> withContext(context -> getCertificateWithResponse(name, "", context) .flatMap(certificateResponse -> Mono.just(certificateResponse.getValue()))); } /** * Creates a new certificate. If this is the first version, the certificate resource is created. This operation requires * the certificates/create permission. * * <p><strong>Code Samples</strong></p> * <p>Create certificate is a long running operation. The {@link PollerFlux poller} allows users to automatically poll on the create certificate * operation status. It is possible to monitor each intermediate poll response during the poll operation.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.createCertificate * * @param name The name of the certificate to be created. * @param policy The policy of the certificate to be created. * @throws ResourceModifiedException when invalid certificate policy configuration is provided. * @return A {@link PollerFlux} polling on the create certificate operation status. */ public PollerFlux<CertificateOperation, KeyVaultCertificate> beginCreateCertificate(String name, CertificatePolicy policy) { return beginCreateCertificate(name, policy, true, null); } /* Polling operation to poll on create certificate operation status. */ private Function<PollingContext<CertificateOperation>, Mono<PollResponse<CertificateOperation>>> createPollOperation(String certificateName) { return (pollingContext) -> { try { return withContext(context -> service.getCertificateOperation(vaultUrl, certificateName, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .flatMap(this::processCertificateOperationResponse)); } catch (HttpRequestException e) { logger.logExceptionAsError(e); return Mono.just(new PollResponse<>(LongRunningOperationStatus.FAILED, null)); } }; } private Mono<PollResponse<CertificateOperation>> processCertificateOperationResponse(Response<CertificateOperation> certificateOperationResponse) { LongRunningOperationStatus status = null; switch (certificateOperationResponse.getValue().getStatus()) { case "inProgress": status = LongRunningOperationStatus.IN_PROGRESS; break; case "completed": status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; break; case "failed": status = LongRunningOperationStatus.FAILED; break; default: break; } return Mono.just(new PollResponse<>(status, certificateOperationResponse.getValue())); } Mono<Response<CertificateOperation>> createCertificateWithResponse(String name, CertificatePolicy certificatePolicy, boolean enabled, Map<String, String> tags, Context context) { CertificateRequestParameters certificateRequestParameters = new CertificateRequestParameters() .certificatePolicy(new CertificatePolicyRequest(certificatePolicy)) .certificateAttributes(new CertificateRequestAttributes().enabled(enabled)) .tags(tags); return service.createCertificate(vaultUrl, name, API_VERSION, ACCEPT_LANGUAGE, certificateRequestParameters, CONTENT_TYPE_HEADER_VALUE, context); } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificate * * @param name The name of the certificate to retrieve, cannot be null * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing the requested {@link KeyVaultCertificateWithPolicy certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<KeyVaultCertificateWithPolicy> getCertificate(String name) { try { return withContext(context -> getCertificateWithResponse(name, "", context)).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificateWithResponse * * @param name The name of the certificate to retrieve, cannot be null * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<KeyVaultCertificateWithPolicy>> getCertificateWithResponse(String name) { try { return withContext(context -> getCertificateWithResponse(name, "", context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<KeyVaultCertificateWithPolicy>> getCertificateWithResponse(String name, String version, Context context) { return service.getCertificateWithPolicy(vaultUrl, name, version, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Retrieving certificate - {}", name)) .doOnSuccess(response -> logger.info("Retrieved the certificate - {}", response.getValue().getProperties().getName())) .doOnError(error -> logger.warning("Failed to Retrieve the certificate - {}", name, error)); } Mono<Response<KeyVaultCertificate>> getCertificateVersionWithResponse(String name, String version, Context context) { return service.getCertificate(vaultUrl, name, version, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Retrieving certificate - {}", name)) .doOnSuccess(response -> logger.info("Retrieved the certificate - {}", response.getValue().getProperties().getName())) .doOnError(error -> logger.warning("Failed to Retrieve the certificate - {}", name, error)); } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificateVersionWithResponse * * @param name The name of the certificate to retrieve, cannot be null * @param version The version of the certificate to retrieve. If this is an empty String or null then latest version of the certificate is retrieved. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<KeyVaultCertificate>> getCertificateVersionWithResponse(String name, String version) { try { return withContext(context -> getCertificateVersionWithResponse(name, version == null ? "" : version, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets information about the specified version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificateVersion * * @param name The name of the certificate to retrieve, cannot be null * @param version The version of the certificate to retrieve. If this is an empty String or null then latest version of the certificate is retrieved. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing the requested {@link KeyVaultCertificate certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<KeyVaultCertificate> getCertificateVersion(String name, String version) { try { return withContext(context -> getCertificateVersionWithResponse(name, version == null ? "" : version, context)).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Updates the specified attributes associated with the specified certificate. The update operation changes specified attributes of an existing * stored certificate and attributes that are not specified in the request are left unchanged. This operation requires the certificates/update permission. * * <p><strong>Code Samples</strong></p> * <p>Gets latest version of the certificate, changes its tags and enabled status and then updates it in the Azure Key Vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.updateCertificateProperties * * @param certificateProperties The {@link CertificateProperties} object with updated properties. * @throws NullPointerException if {@code certificate} is {@code null}. * @throws ResourceNotFoundException when a certificate with {@link CertificateProperties * @throws HttpRequestException if {@link CertificateProperties * @return A {@link Mono} containing the {@link CertificateProperties updated certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<KeyVaultCertificate> updateCertificateProperties(CertificateProperties certificateProperties) { try { return withContext(context -> updateCertificatePropertiesWithResponse(certificateProperties, context)).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Updates the specified attributes associated with the specified certificate. The update operation changes specified attributes of an existing * stored certificate and attributes that are not specified in the request are left unchanged. This operation requires the certificates/update permission. * * <p><strong>Code Samples</strong></p> * <p>Gets latest version of the certificate, changes its enabled status and then updates it in the Azure Key Vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.updateCertificatePropertiesWithResponse * * @param certificateProperties The {@link CertificateProperties} object with updated properties. * @throws NullPointerException if {@code certificate} is {@code null}. * @throws ResourceNotFoundException when a certificate with {@link CertificateProperties * @throws HttpRequestException if {@link CertificateProperties * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<KeyVaultCertificate>> updateCertificatePropertiesWithResponse(CertificateProperties certificateProperties) { try { return withContext(context -> updateCertificatePropertiesWithResponse(certificateProperties, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<KeyVaultCertificate>> updateCertificatePropertiesWithResponse(CertificateProperties certificateProperties, Context context) { Objects.requireNonNull(certificateProperties, "certificateProperties' cannot be null."); CertificateUpdateParameters parameters = new CertificateUpdateParameters() .tags(certificateProperties.getTags()) .certificateAttributes(new CertificateRequestAttributes(certificateProperties)); return service.updateCertificate(vaultUrl, certificateProperties.getName(), API_VERSION, ACCEPT_LANGUAGE, parameters, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Updating certificate - {}", certificateProperties.getName())) .doOnSuccess(response -> logger.info("Updated the certificate - {}", certificateProperties.getName())) .doOnError(error -> logger.warning("Failed to update the certificate - {}", certificateProperties.getName(), error)); } /** * Gets information about the certificate which represents the {@link CertificateProperties} from the key vault. This * operation requires the certificates/get permission. * * <p>The list operations {@link CertificateAsyncClient * the {@link Flux} containing {@link CertificateProperties} as output excluding the properties like secretId and keyId of the certificate. * This operation can then be used to get the full certificate with its properties excluding the policy from {@code certificateProperties}.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificate * * @param certificateProperties The {@link CertificateProperties} holding attributes of the certificate being requested. * @throws ResourceNotFoundException when a certificate with {@link CertificateProperties * @throws HttpRequestException if {@link CertificateProperties
class CertificateAsyncClient { static final String API_VERSION = "7.0"; static final String ACCEPT_LANGUAGE = "en-US"; static final int DEFAULT_MAX_PAGE_RESULTS = 25; static final String CONTENT_TYPE_HEADER_VALUE = "application/json"; private final String vaultUrl; private final CertificateService service; private final ClientLogger logger = new ClientLogger(CertificateAsyncClient.class); /** * Creates a CertificateAsyncClient that uses {@code pipeline} to service requests * * @param vaultUrl URL for the Azure KeyVault service. * @param pipeline HttpPipeline that the HTTP requests and responses flow through. * @param version {@link CertificateServiceVersion} of the service to be used when making requests. */ CertificateAsyncClient(URL vaultUrl, HttpPipeline pipeline, CertificateServiceVersion version) { Objects.requireNonNull(vaultUrl, KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.VAULT_END_POINT_REQUIRED)); this.vaultUrl = vaultUrl.toString(); this.service = RestProxy.create(CertificateService.class, pipeline); } /** * Get the vault endpoint url to which service requests are sent to. * @return the vault endpoint url */ /** * Creates a new certificate. If this is the first version, the certificate resource is created. This operation requires * the certificates/create permission. * * <p><strong>Code Samples</strong></p> * <p>Create certificate is a long running operation. The {@link PollerFlux poller} allows users to automatically poll on the create certificate * operation status. It is possible to monitor each intermediate poll response during the poll operation.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.createCertificate * * @param name The name of the certificate to be created. * @param policy The policy of the certificate to be created. * @param enabled The enabled status for the certificate. * @param tags The application specific metadata to set. * @throws ResourceModifiedException when invalid certificate policy configuration is provided. * @return A {@link PollerFlux} polling on the create certificate operation status. */ public PollerFlux<CertificateOperation, KeyVaultCertificate> beginCreateCertificate(String name, CertificatePolicy policy, boolean enabled, Map<String, String> tags) { return new PollerFlux<>(Duration.ofSeconds(1), activationOperation(name, policy, enabled, tags), createPollOperation(name), cancelOperation(name), fetchResultOperation(name)); } private BiFunction<PollingContext<CertificateOperation>, PollResponse<CertificateOperation>, Mono<CertificateOperation>> cancelOperation(String name) { return (pollingContext, firstResponse) -> withContext(context -> cancelCertificateOperationWithResponse(name, context)).flatMap(FluxUtil::toMono); } private Function<PollingContext<CertificateOperation>, Mono<CertificateOperation>> activationOperation(String name, CertificatePolicy policy, boolean enabled, Map<String, String> tags) { return (pollingContext) -> withContext(context -> createCertificateWithResponse(name, policy, enabled, tags, context) .flatMap(certificateOperationResponse -> Mono.just(certificateOperationResponse.getValue()))); } private Function<PollingContext<CertificateOperation>, Mono<KeyVaultCertificate>> fetchResultOperation(String name) { return (pollingContext) -> withContext(context -> getCertificateWithResponse(name, "", context) .flatMap(certificateResponse -> Mono.just(certificateResponse.getValue()))); } /** * Creates a new certificate. If this is the first version, the certificate resource is created. This operation requires * the certificates/create permission. * * <p><strong>Code Samples</strong></p> * <p>Create certificate is a long running operation. The {@link PollerFlux poller} allows users to automatically poll on the create certificate * operation status. It is possible to monitor each intermediate poll response during the poll operation.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.createCertificate * * @param name The name of the certificate to be created. * @param policy The policy of the certificate to be created. * @throws ResourceModifiedException when invalid certificate policy configuration is provided. * @return A {@link PollerFlux} polling on the create certificate operation status. */ public PollerFlux<CertificateOperation, KeyVaultCertificate> beginCreateCertificate(String name, CertificatePolicy policy) { return beginCreateCertificate(name, policy, true, null); } /* Polling operation to poll on create certificate operation status. */ private Function<PollingContext<CertificateOperation>, Mono<PollResponse<CertificateOperation>>> createPollOperation(String certificateName) { return (pollingContext) -> { try { return withContext(context -> service.getCertificateOperation(vaultUrl, certificateName, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .flatMap(this::processCertificateOperationResponse)); } catch (HttpRequestException e) { logger.logExceptionAsError(e); return Mono.just(new PollResponse<>(LongRunningOperationStatus.FAILED, null)); } }; } private Mono<PollResponse<CertificateOperation>> processCertificateOperationResponse(Response<CertificateOperation> certificateOperationResponse) { LongRunningOperationStatus status = null; switch (certificateOperationResponse.getValue().getStatus()) { case "inProgress": status = LongRunningOperationStatus.IN_PROGRESS; break; case "completed": status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; break; case "failed": status = LongRunningOperationStatus.FAILED; break; default: break; } return Mono.just(new PollResponse<>(status, certificateOperationResponse.getValue())); } Mono<Response<CertificateOperation>> createCertificateWithResponse(String name, CertificatePolicy certificatePolicy, boolean enabled, Map<String, String> tags, Context context) { CertificateRequestParameters certificateRequestParameters = new CertificateRequestParameters() .certificatePolicy(new CertificatePolicyRequest(certificatePolicy)) .certificateAttributes(new CertificateRequestAttributes().enabled(enabled)) .tags(tags); return service.createCertificate(vaultUrl, name, API_VERSION, ACCEPT_LANGUAGE, certificateRequestParameters, CONTENT_TYPE_HEADER_VALUE, context); } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificate * * @param name The name of the certificate to retrieve, cannot be null * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing the requested {@link KeyVaultCertificateWithPolicy certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<KeyVaultCertificateWithPolicy> getCertificate(String name) { try { return withContext(context -> getCertificateWithResponse(name, "", context)).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificateWithResponse * * @param name The name of the certificate to retrieve, cannot be null * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<KeyVaultCertificateWithPolicy>> getCertificateWithResponse(String name) { try { return withContext(context -> getCertificateWithResponse(name, "", context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<KeyVaultCertificateWithPolicy>> getCertificateWithResponse(String name, String version, Context context) { return service.getCertificateWithPolicy(vaultUrl, name, version, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Retrieving certificate - {}", name)) .doOnSuccess(response -> logger.info("Retrieved the certificate - {}", response.getValue().getProperties().getName())) .doOnError(error -> logger.warning("Failed to Retrieve the certificate - {}", name, error)); } Mono<Response<KeyVaultCertificate>> getCertificateVersionWithResponse(String name, String version, Context context) { return service.getCertificate(vaultUrl, name, version, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Retrieving certificate - {}", name)) .doOnSuccess(response -> logger.info("Retrieved the certificate - {}", response.getValue().getProperties().getName())) .doOnError(error -> logger.warning("Failed to Retrieve the certificate - {}", name, error)); } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificateVersionWithResponse * * @param name The name of the certificate to retrieve, cannot be null * @param version The version of the certificate to retrieve. If this is an empty String or null then latest version of the certificate is retrieved. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<KeyVaultCertificate>> getCertificateVersionWithResponse(String name, String version) { try { return withContext(context -> getCertificateVersionWithResponse(name, version == null ? "" : version, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets information about the specified version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificateVersion * * @param name The name of the certificate to retrieve, cannot be null * @param version The version of the certificate to retrieve. If this is an empty String or null then latest version of the certificate is retrieved. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing the requested {@link KeyVaultCertificate certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<KeyVaultCertificate> getCertificateVersion(String name, String version) { try { return withContext(context -> getCertificateVersionWithResponse(name, version == null ? "" : version, context)).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Updates the specified attributes associated with the specified certificate. The update operation changes specified attributes of an existing * stored certificate and attributes that are not specified in the request are left unchanged. This operation requires the certificates/update permission. * * <p><strong>Code Samples</strong></p> * <p>Gets latest version of the certificate, changes its tags and enabled status and then updates it in the Azure Key Vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.updateCertificateProperties * * @param certificateProperties The {@link CertificateProperties} object with updated properties. * @throws NullPointerException if {@code certificate} is {@code null}. * @throws ResourceNotFoundException when a certificate with {@link CertificateProperties * @throws HttpRequestException if {@link CertificateProperties * @return A {@link Mono} containing the {@link CertificateProperties updated certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<KeyVaultCertificate> updateCertificateProperties(CertificateProperties certificateProperties) { try { return withContext(context -> updateCertificatePropertiesWithResponse(certificateProperties, context)).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Updates the specified attributes associated with the specified certificate. The update operation changes specified attributes of an existing * stored certificate and attributes that are not specified in the request are left unchanged. This operation requires the certificates/update permission. * * <p><strong>Code Samples</strong></p> * <p>Gets latest version of the certificate, changes its enabled status and then updates it in the Azure Key Vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.updateCertificatePropertiesWithResponse * * @param certificateProperties The {@link CertificateProperties} object with updated properties. * @throws NullPointerException if {@code certificate} is {@code null}. * @throws ResourceNotFoundException when a certificate with {@link CertificateProperties * @throws HttpRequestException if {@link CertificateProperties * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<KeyVaultCertificate>> updateCertificatePropertiesWithResponse(CertificateProperties certificateProperties) { try { return withContext(context -> updateCertificatePropertiesWithResponse(certificateProperties, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<KeyVaultCertificate>> updateCertificatePropertiesWithResponse(CertificateProperties certificateProperties, Context context) { Objects.requireNonNull(certificateProperties, "certificateProperties' cannot be null."); CertificateUpdateParameters parameters = new CertificateUpdateParameters() .tags(certificateProperties.getTags()) .certificateAttributes(new CertificateRequestAttributes(certificateProperties)); return service.updateCertificate(vaultUrl, certificateProperties.getName(), API_VERSION, ACCEPT_LANGUAGE, parameters, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Updating certificate - {}", certificateProperties.getName())) .doOnSuccess(response -> logger.info("Updated the certificate - {}", certificateProperties.getName())) .doOnError(error -> logger.warning("Failed to update the certificate - {}", certificateProperties.getName(), error)); } /** * Gets information about the certificate which represents the {@link CertificateProperties} from the key vault. This * operation requires the certificates/get permission. * * <p>The list operations {@link CertificateAsyncClient * the {@link Flux} containing {@link CertificateProperties} as output excluding the properties like secretId and keyId of the certificate. * This operation can then be used to get the full certificate with its properties excluding the policy from {@code certificateProperties}.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificate * * @param certificateProperties The {@link CertificateProperties} holding attributes of the certificate being requested. * @throws ResourceNotFoundException when a certificate with {@link CertificateProperties * @throws HttpRequestException if {@link CertificateProperties
`getCertificateWithResponse(name, Context.NONE).getValue();`
public KeyVaultCertificateWithPolicy getCertificate(String name) { return client.getCertificate(name).block(); }
return client.getCertificate(name).block();
public KeyVaultCertificateWithPolicy getCertificate(String name) { return client.getCertificate(name).block(); }
class CertificateClient { private final CertificateAsyncClient client; /** * Creates a CertificateClient that uses {@code pipeline} to service requests * * @param client The {@link CertificateAsyncClient} that the client routes its request through. */ CertificateClient(CertificateAsyncClient client) { this.client = client; } /** * Get the vault endpoint url to which service requests are sent to. * @return the vault endpoint url */ public String getVaultUrl() { return client.getVaultUrl(); } /** * Creates a new certificate. If this is the first version, the certificate resource is created. This operation requires * the certificates/create permission. * * <p>Create certificate is a long running operation. It indefinitely waits for the create certificate operation to complete on service side.</p> * * <p><strong>Code Samples</strong></p> * <p>Create certificate is a long running operation. The createCertificate indefinitely waits for the operation to complete and * returns its last status. The details of the last certificate operation status are printed when a response is received</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateClient.createCertificate * * @param name The name of the certificate to be created. * @param policy The policy of the certificate to be created. * @param tags The application specific metadata to set. * @throws ResourceModifiedException when invalid certificate policy configuration is provided. * @return A {@link SyncPoller} polling on the create certificate operation status. */ @ServiceMethod(returns = ReturnType.SINGLE) public SyncPoller<CertificateOperation, KeyVaultCertificate> beginCreateCertificate(String name, CertificatePolicy policy, Map<String, String> tags) { return client.beginCreateCertificate(name, policy, true, tags).getSyncPoller(); } /** * Creates a new certificate. If this is the first version, the certificate resource is created. This operation requires * the certificates/create permission. * * <p><strong>Code Samples</strong></p> * <p>Create certificate is a long running operation. The createCertificate indefinitely waits for the operation to complete and * returns its last status. The details of the last certificate operation status are printed when a response is received</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateClient.createCertificate * * @param name The name of the certificate to be created. * @param policy The policy of the certificate to be created. * @throws ResourceModifiedException when invalid certificate policy configuration is provided. * @return A {@link SyncPoller} polling on the create certificate operation status. */ @ServiceMethod(returns = ReturnType.SINGLE) public SyncPoller<CertificateOperation, KeyVaultCertificate> beginCreateCertificate(String name, CertificatePolicy policy) { return client.beginCreateCertificate(name, policy).getSyncPoller(); } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateClient.getCertificate * * @param name The name of the certificate to retrieve, cannot be null * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return The requested {@link KeyVaultCertificateWithPolicy certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateClient.getCertificateWithResponse * * @param name The name of the certificate to retrieve, cannot be null * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<KeyVaultCertificateWithPolicy> getCertificateWithResponse(String name) { return client.getCertificateWithResponse(name).block(); } /** * Gets information about the certificate which represents the {@link CertificateProperties} from the key vault. This * operation requires the certificates/get permission. * * <p>The list operations {@link CertificateClient * the {@link PagedIterable} containing {@link CertificateProperties} as output excluding the properties like secretId and keyId of the certificate. * This operation can then be used to get the full certificate with its properties excluding the policy from {@code certificateProperties}.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateClient.getCertificate * * @param certificateProperties The {@link CertificateProperties} holding attributes of the certificate being requested. * @throws ResourceNotFoundException when a certificate with {@link CertificateProperties * @throws HttpRequestException if {@link CertificateProperties
class CertificateClient { private final CertificateAsyncClient client; /** * Creates a CertificateClient that uses {@code pipeline} to service requests * * @param client The {@link CertificateAsyncClient} that the client routes its request through. */ CertificateClient(CertificateAsyncClient client) { this.client = client; } /** * Get the vault endpoint url to which service requests are sent to. * @return the vault endpoint url */ public String getVaultUrl() { return client.getVaultUrl(); } /** * Creates a new certificate. If this is the first version, the certificate resource is created. This operation requires * the certificates/create permission. * * <p>Create certificate is a long running operation. It indefinitely waits for the create certificate operation to complete on service side.</p> * * <p><strong>Code Samples</strong></p> * <p>Create certificate is a long running operation. The createCertificate indefinitely waits for the operation to complete and * returns its last status. The details of the last certificate operation status are printed when a response is received</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateClient.createCertificate * * @param name The name of the certificate to be created. * @param policy The policy of the certificate to be created. * @param tags The application specific metadata to set. * @throws ResourceModifiedException when invalid certificate policy configuration is provided. * @return A {@link SyncPoller} polling on the create certificate operation status. */ @ServiceMethod(returns = ReturnType.SINGLE) public SyncPoller<CertificateOperation, KeyVaultCertificate> beginCreateCertificate(String name, CertificatePolicy policy, Map<String, String> tags) { return client.beginCreateCertificate(name, policy, true, tags).getSyncPoller(); } /** * Creates a new certificate. If this is the first version, the certificate resource is created. This operation requires * the certificates/create permission. * * <p><strong>Code Samples</strong></p> * <p>Create certificate is a long running operation. The createCertificate indefinitely waits for the operation to complete and * returns its last status. The details of the last certificate operation status are printed when a response is received</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateClient.createCertificate * * @param name The name of the certificate to be created. * @param policy The policy of the certificate to be created. * @throws ResourceModifiedException when invalid certificate policy configuration is provided. * @return A {@link SyncPoller} polling on the create certificate operation status. */ @ServiceMethod(returns = ReturnType.SINGLE) public SyncPoller<CertificateOperation, KeyVaultCertificate> beginCreateCertificate(String name, CertificatePolicy policy) { return client.beginCreateCertificate(name, policy).getSyncPoller(); } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateClient.getCertificate * * @param name The name of the certificate to retrieve, cannot be null * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return The requested {@link KeyVaultCertificateWithPolicy certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateClient.getCertificateWithResponse * * @param name The name of the certificate to retrieve, cannot be null * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<KeyVaultCertificateWithPolicy> getCertificateWithResponse(String name) { return client.getCertificateWithResponse(name).block(); } /** * Gets information about the certificate which represents the {@link CertificateProperties} from the key vault. This * operation requires the certificates/get permission. * * <p>The list operations {@link CertificateClient * the {@link PagedIterable} containing {@link CertificateProperties} as output excluding the properties like secretId and keyId of the certificate. * This operation can then be used to get the full certificate with its properties excluding the policy from {@code certificateProperties}.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateClient.getCertificate * * @param certificateProperties The {@link CertificateProperties} holding attributes of the certificate being requested. * @throws ResourceNotFoundException when a certificate with {@link CertificateProperties * @throws HttpRequestException if {@link CertificateProperties
`return listPropertiesOfCertificates(false, Context.NONE)` ?
public PagedIterable<CertificateProperties> listPropertiesOfCertificates() { return new PagedIterable<>(client.listPropertiesOfCertificates(false, Context.NONE)); }
return new PagedIterable<>(client.listPropertiesOfCertificates(false, Context.NONE));
public PagedIterable<CertificateProperties> listPropertiesOfCertificates() { return new PagedIterable<>(client.listPropertiesOfCertificates(false, Context.NONE)); }
class CertificateClient { private final CertificateAsyncClient client; /** * Creates a CertificateClient that uses {@code pipeline} to service requests * * @param client The {@link CertificateAsyncClient} that the client routes its request through. */ CertificateClient(CertificateAsyncClient client) { this.client = client; } /** * Get the vault endpoint url to which service requests are sent to. * @return the vault endpoint url */ public String getVaultUrl() { return client.getVaultUrl(); } /** * Creates a new certificate. If this is the first version, the certificate resource is created. This operation requires * the certificates/create permission. * * <p>Create certificate is a long running operation. It indefinitely waits for the create certificate operation to complete on service side.</p> * * <p><strong>Code Samples</strong></p> * <p>Create certificate is a long running operation. The createCertificate indefinitely waits for the operation to complete and * returns its last status. The details of the last certificate operation status are printed when a response is received</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateClient.createCertificate * * @param name The name of the certificate to be created. * @param policy The policy of the certificate to be created. * @param tags The application specific metadata to set. * @throws ResourceModifiedException when invalid certificate policy configuration is provided. * @return A {@link SyncPoller} polling on the create certificate operation status. */ @ServiceMethod(returns = ReturnType.SINGLE) public SyncPoller<CertificateOperation, KeyVaultCertificate> beginCreateCertificate(String name, CertificatePolicy policy, Map<String, String> tags) { return client.beginCreateCertificate(name, policy, true, tags).getSyncPoller(); } /** * Creates a new certificate. If this is the first version, the certificate resource is created. This operation requires * the certificates/create permission. * * <p><strong>Code Samples</strong></p> * <p>Create certificate is a long running operation. The createCertificate indefinitely waits for the operation to complete and * returns its last status. The details of the last certificate operation status are printed when a response is received</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateClient.createCertificate * * @param name The name of the certificate to be created. * @param policy The policy of the certificate to be created. * @throws ResourceModifiedException when invalid certificate policy configuration is provided. * @return A {@link SyncPoller} polling on the create certificate operation status. */ @ServiceMethod(returns = ReturnType.SINGLE) public SyncPoller<CertificateOperation, KeyVaultCertificate> beginCreateCertificate(String name, CertificatePolicy policy) { return client.beginCreateCertificate(name, policy).getSyncPoller(); } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateClient.getCertificate * * @param name The name of the certificate to retrieve, cannot be null * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return The requested {@link KeyVaultCertificateWithPolicy certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public KeyVaultCertificateWithPolicy getCertificate(String name) { return client.getCertificate(name).block(); } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateClient.getCertificateWithResponse * * @param name The name of the certificate to retrieve, cannot be null * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<KeyVaultCertificateWithPolicy> getCertificateWithResponse(String name) { return client.getCertificateWithResponse(name).block(); } /** * Gets information about the certificate which represents the {@link CertificateProperties} from the key vault. This * operation requires the certificates/get permission. * * <p>The list operations {@link CertificateClient * the {@link PagedIterable} containing {@link CertificateProperties} as output excluding the properties like secretId and keyId of the certificate. * This operation can then be used to get the full certificate with its properties excluding the policy from {@code certificateProperties}.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateClient.getCertificate * * @param certificateProperties The {@link CertificateProperties} holding attributes of the certificate being requested. * @throws ResourceNotFoundException when a certificate with {@link CertificateProperties * @throws HttpRequestException if {@link CertificateProperties
class CertificateClient { private final CertificateAsyncClient client; /** * Creates a CertificateClient that uses {@code pipeline} to service requests * * @param client The {@link CertificateAsyncClient} that the client routes its request through. */ CertificateClient(CertificateAsyncClient client) { this.client = client; } /** * Get the vault endpoint url to which service requests are sent to. * @return the vault endpoint url */ public String getVaultUrl() { return client.getVaultUrl(); } /** * Creates a new certificate. If this is the first version, the certificate resource is created. This operation requires * the certificates/create permission. * * <p>Create certificate is a long running operation. It indefinitely waits for the create certificate operation to complete on service side.</p> * * <p><strong>Code Samples</strong></p> * <p>Create certificate is a long running operation. The createCertificate indefinitely waits for the operation to complete and * returns its last status. The details of the last certificate operation status are printed when a response is received</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateClient.createCertificate * * @param name The name of the certificate to be created. * @param policy The policy of the certificate to be created. * @param tags The application specific metadata to set. * @throws ResourceModifiedException when invalid certificate policy configuration is provided. * @return A {@link SyncPoller} polling on the create certificate operation status. */ @ServiceMethod(returns = ReturnType.SINGLE) public SyncPoller<CertificateOperation, KeyVaultCertificate> beginCreateCertificate(String name, CertificatePolicy policy, Map<String, String> tags) { return client.beginCreateCertificate(name, policy, true, tags).getSyncPoller(); } /** * Creates a new certificate. If this is the first version, the certificate resource is created. This operation requires * the certificates/create permission. * * <p><strong>Code Samples</strong></p> * <p>Create certificate is a long running operation. The createCertificate indefinitely waits for the operation to complete and * returns its last status. The details of the last certificate operation status are printed when a response is received</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateClient.createCertificate * * @param name The name of the certificate to be created. * @param policy The policy of the certificate to be created. * @throws ResourceModifiedException when invalid certificate policy configuration is provided. * @return A {@link SyncPoller} polling on the create certificate operation status. */ @ServiceMethod(returns = ReturnType.SINGLE) public SyncPoller<CertificateOperation, KeyVaultCertificate> beginCreateCertificate(String name, CertificatePolicy policy) { return client.beginCreateCertificate(name, policy).getSyncPoller(); } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateClient.getCertificate * * @param name The name of the certificate to retrieve, cannot be null * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return The requested {@link KeyVaultCertificateWithPolicy certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public KeyVaultCertificateWithPolicy getCertificate(String name) { return client.getCertificate(name).block(); } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateClient.getCertificateWithResponse * * @param name The name of the certificate to retrieve, cannot be null * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<KeyVaultCertificateWithPolicy> getCertificateWithResponse(String name) { return client.getCertificateWithResponse(name).block(); } /** * Gets information about the certificate which represents the {@link CertificateProperties} from the key vault. This * operation requires the certificates/get permission. * * <p>The list operations {@link CertificateClient * the {@link PagedIterable} containing {@link CertificateProperties} as output excluding the properties like secretId and keyId of the certificate. * This operation can then be used to get the full certificate with its properties excluding the policy from {@code certificateProperties}.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateClient.getCertificate * * @param certificateProperties The {@link CertificateProperties} holding attributes of the certificate being requested. * @throws ResourceNotFoundException when a certificate with {@link CertificateProperties * @throws HttpRequestException if {@link CertificateProperties
Given that `assertInBounds` is inclusive at the boundaries should this continue to use 1 instead of 0 since 0 buffers doesn't make sense.
public ParallelTransferOptions(Integer blockSize, Integer numBuffers, ProgressReceiver progressReceiver) { if (blockSize != null) { StorageImplUtils.assertInBounds("blockSize", blockSize, 0, BlockBlobAsyncClient.MAX_STAGE_BLOCK_BYTES); } this.blockSize = blockSize; if (numBuffers != null) { StorageImplUtils.assertInBounds("numBuffers", numBuffers, 2, Integer.MAX_VALUE); } this.numBuffers = numBuffers; this.progressReceiver = progressReceiver; }
StorageImplUtils.assertInBounds("blockSize", blockSize, 0, BlockBlobAsyncClient.MAX_STAGE_BLOCK_BYTES);
public ParallelTransferOptions(Integer blockSize, Integer numBuffers, ProgressReceiver progressReceiver) { if (blockSize != null) { StorageImplUtils.assertInBounds("blockSize", blockSize, 1, BlockBlobAsyncClient.MAX_STAGE_BLOCK_BYTES); } this.blockSize = blockSize; if (numBuffers != null) { StorageImplUtils.assertInBounds("numBuffers", numBuffers, 2, Integer.MAX_VALUE); } this.numBuffers = numBuffers; this.progressReceiver = progressReceiver; }
class ParallelTransferOptions { private final Integer blockSize; private final Integer numBuffers; private final ProgressReceiver progressReceiver; /** * Creates a new {@link ParallelTransferOptions} with default parameters applied. * * @param blockSize The block size. * For upload, The block size is the size of each block that will be staged. This value also determines the number * of requests that need to be made. If block size is large, upload will make fewer network calls, but each * individual call will send more data and will therefore take longer. This parameter also determines the size * that each buffer uses when buffering is required and consequently amount of memory consumed by such methods may * be up to blockSize * numBuffers. * @param numBuffers For buffered upload only, the number of buffers is the maximum number of buffers this method * should allocate. Memory will be allocated lazily as needed. Must be at least two. Typically, the larger the * number of buffers, the more parallel, and thus faster, the upload portion of this operation will be. * The amount of memory consumed by methods using this value may be up to blockSize * numBuffers. * @param progressReceiver {@link ProgressReceiver} */ /** * Gets the block size (chunk size) to transfer at a time. * @return The block size. */ public Integer getBlockSize() { return this.blockSize; } /** * Gets the number of buffers being used for a transfer operation. * @return The number of buffers. */ public Integer getNumBuffers() { return this.numBuffers; } /** * Gets the Progress receiver for parallel reporting * @return the progress reporter */ public ProgressReceiver getProgressReceiver() { return this.progressReceiver; } }
class ParallelTransferOptions { private final Integer blockSize; private final Integer numBuffers; private final ProgressReceiver progressReceiver; /** * Creates a new {@link ParallelTransferOptions} with default parameters applied. * * @param blockSize The block size. * For upload, The block size is the size of each block that will be staged. This value also determines the number * of requests that need to be made. If block size is large, upload will make fewer network calls, but each * individual call will send more data and will therefore take longer. This parameter also determines the size * that each buffer uses when buffering is required and consequently amount of memory consumed by such methods may * be up to blockSize * numBuffers. * @param numBuffers For buffered upload only, the number of buffers is the maximum number of buffers this method * should allocate. Memory will be allocated lazily as needed. Must be at least two. Typically, the larger the * number of buffers, the more parallel, and thus faster, the upload portion of this operation will be. * The amount of memory consumed by methods using this value may be up to blockSize * numBuffers. * @param progressReceiver {@link ProgressReceiver} */ /** * Gets the block size (chunk size) to transfer at a time. * @return The block size. */ public Integer getBlockSize() { return this.blockSize; } /** * Gets the number of buffers being used for a transfer operation. * @return The number of buffers. */ public Integer getNumBuffers() { return this.numBuffers; } /** * Gets the Progress receiver for parallel reporting * @return the progress reporter */ public ProgressReceiver getProgressReceiver() { return this.progressReceiver; } }
This is a safeguard in case the operation fails and the user won't have a potentially corrupted file, correct? If so, would we want to log a message here stating that the operation wasn't able to successfully complete, cleaning up resources.
private void downloadToFileCleanup(AsynchronousFileChannel channel, String filePath, SignalType signalType) { try { channel.close(); if (!signalType.equals(SignalType.ON_COMPLETE)) { Files.delete(Paths.get(filePath)); } } catch (IOException e) { throw logger.logExceptionAsError(new UncheckedIOException(e)); } }
if (!signalType.equals(SignalType.ON_COMPLETE)) {
private void downloadToFileCleanup(AsynchronousFileChannel channel, String filePath, SignalType signalType) { try { channel.close(); if (!signalType.equals(SignalType.ON_COMPLETE)) { Files.delete(Paths.get(filePath)); logger.verbose("Downloading to file failed. Cleaning up resources."); } } catch (IOException e) { throw logger.logExceptionAsError(new UncheckedIOException(e)); } }
class BlobAsyncClientBase { private static final int BLOB_DEFAULT_DOWNLOAD_BLOCK_SIZE = 4 * Constants.MB; private static final int BLOB_MAX_DOWNLOAD_BLOCK_SIZE = 100 * Constants.MB; private final ClientLogger logger = new ClientLogger(BlobAsyncClientBase.class); protected final AzureBlobStorageImpl azureBlobStorage; private final String snapshot; private final CpkInfo customerProvidedKey; protected final String accountName; protected final String containerName; protected final String blobName; protected final BlobServiceVersion serviceVersion; /** * Package-private constructor for use by {@link SpecializedBlobClientBuilder}. * * @param pipeline The pipeline used to send and receive service requests. * @param url The endpoint where to send service requests. * @param serviceVersion The version of the service to receive requests. * @param accountName The storage account name. * @param containerName The container name. * @param blobName The blob name. * @param snapshot The snapshot identifier for the blob, pass {@code null} to interact with the blob directly. * @param customerProvidedKey Customer provided key used during encryption of the blob's data on the server, pass * {@code null} to allow the service to use its own encryption. */ protected BlobAsyncClientBase(HttpPipeline pipeline, String url, BlobServiceVersion serviceVersion, String accountName, String containerName, String blobName, String snapshot, CpkInfo customerProvidedKey) { this.azureBlobStorage = new AzureBlobStorageBuilder() .pipeline(pipeline) .url(url) .version(serviceVersion.getVersion()) .build(); this.serviceVersion = serviceVersion; this.accountName = accountName; this.containerName = containerName; this.blobName = Utility.urlEncode(Utility.urlDecode(blobName)); this.snapshot = snapshot; this.customerProvidedKey = customerProvidedKey; } /** * Creates a new {@link BlobAsyncClientBase} linked to the {@code snapshot} of this blob resource. * * @param snapshot the identifier for a specific snapshot of this blob * @return a {@link BlobAsyncClientBase} used to interact with the specific snapshot. */ public BlobAsyncClientBase getSnapshotClient(String snapshot) { return new BlobAsyncClientBase(getHttpPipeline(), getBlobUrl(), getServiceVersion(), getAccountName(), getContainerName(), getBlobName(), snapshot, getCustomerProvidedKey()); } /** * Gets the URL of the blob represented by this client. * * @return the URL. */ public String getBlobUrl() { if (!this.isSnapshot()) { return azureBlobStorage.getUrl(); } else { if (azureBlobStorage.getUrl().contains("?")) { return String.format("%s&snapshot=%s", azureBlobStorage.getUrl(), snapshot); } else { return String.format("%s?snapshot=%s", azureBlobStorage.getUrl(), snapshot); } } } /** * Get the container name. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.getContainerName} * * @return The name of the container. */ public final String getContainerName() { return containerName; } /** * Decodes and gets the blob name. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.getBlobName} * * @return The decoded name of the blob. */ public final String getBlobName() { return (blobName == null) ? null : Utility.urlDecode(blobName); } /** * Gets the {@link HttpPipeline} powering this client. * * @return The pipeline. */ public HttpPipeline getHttpPipeline() { return azureBlobStorage.getHttpPipeline(); } /** * Gets the {@link CpkInfo} used to encrypt this blob's content on the server. * * @return the customer provided key used for encryption. */ public CpkInfo getCustomerProvidedKey() { return customerProvidedKey; } /** * Get associated account name. * * @return account name associated with this storage resource. */ public String getAccountName() { return accountName; } /** * Gets the service version the client is using. * * @return the service version the client is using. */ public BlobServiceVersion getServiceVersion() { return serviceVersion; } /** * Gets the snapshotId for a blob resource * * @return A string that represents the snapshotId of the snapshot blob */ public String getSnapshotId() { return this.snapshot; } /** * Determines if a blob is a snapshot * * @return A boolean that indicates if a blob is a snapshot */ public boolean isSnapshot() { return this.snapshot != null; } /** * Determines if the blob this client represents exists in the cloud. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.exists} * * @return true if the blob exists, false if it doesn't */ public Mono<Boolean> exists() { try { return existsWithResponse().flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Determines if the blob this client represents exists in the cloud. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.existsWithResponse} * * @return true if the blob exists, false if it doesn't */ public Mono<Response<Boolean>> existsWithResponse() { try { return withContext(this::existsWithResponse); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<Boolean>> existsWithResponse(Context context) { return this.getPropertiesWithResponse(null, context) .map(cp -> (Response<Boolean>) new SimpleResponse<>(cp, true)) .onErrorResume(t -> t instanceof BlobStorageException && ((BlobStorageException) t).getStatusCode() == 404, t -> { HttpResponse response = ((BlobStorageException) t).getResponse(); return Mono.just(new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), false)); }); } /** * Copies the data at the source URL to a blob. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.beginCopy * * <p>For more information, see the * <a href="https: * * @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second * is used. * @return A {@link PollerFlux} that polls the blob copy operation until it has completed, has failed, or has been * cancelled. */ public PollerFlux<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) { return beginCopy(sourceUrl, null, null, null, null, null, pollInterval); } /** * Copies the data at the source URL to a blob. * * <p><strong>Starting a copy operation</strong></p> * Starting a copy operation and polling on the responses. * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.beginCopy * * <p><strong>Cancelling a copy operation</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.beginCopyFromUrlCancel * * <p>For more information, see the * <a href="https: * * @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * @param metadata Metadata to associate with the destination blob. * @param tier {@link AccessTier} for the destination blob. * @param priority {@link RehydratePriority} for rehydrating the blob. * @param sourceModifiedAccessConditions {@link RequestConditions} against the source. Standard HTTP * Access conditions related to the modification of data. ETag and LastModifiedTime are used to construct * conditions related to when the blob was changed relative to the given request. The request will fail if the * specified condition is not satisfied. * @param destAccessConditions {@link BlobRequestConditions} against the destination. * @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second * is used. * @return A {@link PollerFlux} that polls the blob copy operation until it has completed, has failed, or has been * cancelled. */ public PollerFlux<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier, RehydratePriority priority, RequestConditions sourceModifiedAccessConditions, BlobRequestConditions destAccessConditions, Duration pollInterval) { final Duration interval = pollInterval != null ? pollInterval : Duration.ofSeconds(1); final RequestConditions sourceModifiedCondition = sourceModifiedAccessConditions == null ? new RequestConditions() : sourceModifiedAccessConditions; final BlobRequestConditions destinationAccessConditions = destAccessConditions == null ? new BlobRequestConditions() : destAccessConditions; final RequestConditions sourceConditions = new RequestConditions() .setIfModifiedSince(sourceModifiedCondition.getIfModifiedSince()) .setIfUnmodifiedSince(sourceModifiedCondition.getIfUnmodifiedSince()) .setIfMatch(sourceModifiedCondition.getIfMatch()) .setIfNoneMatch(sourceModifiedCondition.getIfNoneMatch()); return new PollerFlux<>(interval, (pollingContext) -> { try { return onStart(sourceUrl, metadata, tier, priority, sourceConditions, destinationAccessConditions); } catch (RuntimeException ex) { return monoError(logger, ex); } }, (pollingContext) -> { try { return onPoll(pollingContext.getLatestResponse()); } catch (RuntimeException ex) { return monoError(logger, ex); } }, (pollingContext, firstResponse) -> { if (firstResponse == null || firstResponse.getValue() == null) { return Mono.error(logger.logExceptionAsError( new IllegalArgumentException("Cannot cancel a poll response that never started."))); } final String copyIdentifier = firstResponse.getValue().getCopyId(); if (!ImplUtils.isNullOrEmpty(copyIdentifier)) { logger.info("Cancelling copy operation for copy id: {}", copyIdentifier); return abortCopyFromUrl(copyIdentifier).thenReturn(firstResponse.getValue()); } return Mono.empty(); }, (pollingContext) -> Mono.empty()); } private Mono<BlobCopyInfo> onStart(String sourceUrl, Map<String, String> metadata, AccessTier tier, RehydratePriority priority, RequestConditions sourceModifiedAccessConditions, BlobRequestConditions destinationAccessConditions) { URL url; try { url = new URL(sourceUrl); } catch (MalformedURLException ex) { throw logger.logExceptionAsError(new IllegalArgumentException("'sourceUrl' is not a valid url.", ex)); } return withContext( context -> azureBlobStorage.blobs().startCopyFromURLWithRestResponseAsync(null, null, url, null, metadata, tier, priority, sourceModifiedAccessConditions.getIfModifiedSince(), sourceModifiedAccessConditions.getIfUnmodifiedSince(), sourceModifiedAccessConditions.getIfMatch(), sourceModifiedAccessConditions.getIfNoneMatch(), destinationAccessConditions.getIfModifiedSince(), destinationAccessConditions.getIfUnmodifiedSince(), destinationAccessConditions.getIfMatch(), destinationAccessConditions.getIfNoneMatch(), destinationAccessConditions.getLeaseId(), null, context)) .map(response -> { final BlobStartCopyFromURLHeaders headers = response.getDeserializedHeaders(); return new BlobCopyInfo(sourceUrl, headers.getCopyId(), headers.getCopyStatus(), headers.getETag(), headers.getLastModified(), headers.getErrorCode()); }); } private Mono<PollResponse<BlobCopyInfo>> onPoll(PollResponse<BlobCopyInfo> pollResponse) { if (pollResponse.getStatus() == LongRunningOperationStatus.SUCCESSFULLY_COMPLETED || pollResponse.getStatus() == LongRunningOperationStatus.FAILED) { return Mono.just(pollResponse); } final BlobCopyInfo lastInfo = pollResponse.getValue(); if (lastInfo == null) { logger.warning("BlobCopyInfo does not exist. Activation operation failed."); return Mono.just(new PollResponse<>( LongRunningOperationStatus.fromString("COPY_START_FAILED", true), null)); } return getProperties().map(response -> { final CopyStatusType status = response.getCopyStatus(); final BlobCopyInfo result = new BlobCopyInfo(response.getCopySource(), response.getCopyId(), status, response.getETag(), response.getCopyCompletionTime(), response.getCopyStatusDescription()); LongRunningOperationStatus operationStatus; switch (status) { case SUCCESS: operationStatus = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; break; case FAILED: operationStatus = LongRunningOperationStatus.FAILED; break; case ABORTED: operationStatus = LongRunningOperationStatus.USER_CANCELLED; break; case PENDING: operationStatus = LongRunningOperationStatus.IN_PROGRESS; break; default: throw logger.logExceptionAsError(new IllegalArgumentException( "CopyStatusType is not supported. Status: " + status)); } return new PollResponse<>(operationStatus, result); }).onErrorReturn( new PollResponse<>(LongRunningOperationStatus.fromString("POLLING_FAILED", true), lastInfo)); } /** * Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.abortCopyFromUrl * * <p>For more information, see the * <a href="https: * * @see * @see * @see * @param copyId The id of the copy operation to abort. * @return A reactive response signalling completion. */ public Mono<Void> abortCopyFromUrl(String copyId) { try { return abortCopyFromUrlWithResponse(copyId, null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.abortCopyFromUrlWithResponse * * <p>For more information, see the * <a href="https: * * @see * @see * @see * @param copyId The id of the copy operation to abort. * @param leaseId The lease ID the active lease on the blob must match. * @return A reactive response signalling completion. */ public Mono<Response<Void>> abortCopyFromUrlWithResponse(String copyId, String leaseId) { try { return withContext(context -> abortCopyFromUrlWithResponse(copyId, leaseId, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<Void>> abortCopyFromUrlWithResponse(String copyId, String leaseId, Context context) { return this.azureBlobStorage.blobs().abortCopyFromURLWithRestResponseAsync( null, null, copyId, null, leaseId, null, context) .map(response -> new SimpleResponse<>(response, null)); } /** * Copies the data at the source URL to a blob and waits for the copy to complete before returning a response. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.copyFromUrl * * <p>For more information, see the * <a href="https: * * @param copySource The source URL to copy from. * @return A reactive response containing the copy ID for the long running operation. */ public Mono<String> copyFromUrl(String copySource) { try { return copyFromUrlWithResponse(copySource, null, null, null, null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Copies the data at the source URL to a blob and waits for the copy to complete before returning a response. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.copyFromUrlWithResponse * * <p>For more information, see the * <a href="https: * * @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * @param metadata Metadata to associate with the destination blob. * @param tier {@link AccessTier} for the destination blob. * @param sourceModifiedAccessConditions {@link RequestConditions} against the source. Standard HTTP Access * conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions * related to when the blob was changed relative to the given request. The request will fail if the specified * condition is not satisfied. * @param destAccessConditions {@link BlobRequestConditions} against the destination. * @return A reactive response containing the copy ID for the long running operation. */ public Mono<Response<String>> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier, RequestConditions sourceModifiedAccessConditions, BlobRequestConditions destAccessConditions) { try { return withContext(context -> copyFromUrlWithResponse(copySource, metadata, tier, sourceModifiedAccessConditions, destAccessConditions, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<String>> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier, RequestConditions sourceModifiedAccessConditions, BlobRequestConditions destAccessConditions, Context context) { sourceModifiedAccessConditions = sourceModifiedAccessConditions == null ? new RequestConditions() : sourceModifiedAccessConditions; destAccessConditions = destAccessConditions == null ? new BlobRequestConditions() : destAccessConditions; URL url; try { url = new URL(copySource); } catch (MalformedURLException ex) { throw logger.logExceptionAsError(new IllegalArgumentException("'copySource' is not a valid url.")); } return this.azureBlobStorage.blobs().copyFromURLWithRestResponseAsync( null, null, url, null, metadata, tier, sourceModifiedAccessConditions.getIfModifiedSince(), sourceModifiedAccessConditions.getIfUnmodifiedSince(), sourceModifiedAccessConditions.getIfMatch(), sourceModifiedAccessConditions.getIfNoneMatch(), destAccessConditions.getIfModifiedSince(), destAccessConditions.getIfUnmodifiedSince(), destAccessConditions.getIfMatch(), destAccessConditions.getIfNoneMatch(), destAccessConditions.getLeaseId(), null, context) .map(rb -> new SimpleResponse<>(rb, rb.getDeserializedHeaders().getCopyId())); } /** * Reads the entire blob. Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or * {@link AppendBlobClient}. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.download} * * <p>For more information, see the * <a href="https: * * @return A reactive response containing the blob data. */ public Flux<ByteBuffer> download() { try { return downloadWithResponse(null, null, null, false) .flatMapMany(BlobDownloadAsyncResponse::getValue); } catch (RuntimeException ex) { return fluxError(logger, ex); } } /** * Reads a range of bytes from a blob. Uploading data must be done from the {@link BlockBlobClient}, {@link * PageBlobClient}, or {@link AppendBlobClient}. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.downloadWithResponse * * <p>For more information, see the * <a href="https: * * @param range {@link BlobRange} * @param options {@link DownloadRetryOptions} * @param accessConditions {@link BlobRequestConditions} * @param rangeGetContentMD5 Whether the contentMD5 for the specified blob range should be returned. * @return A reactive response containing the blob data. */ public Mono<BlobDownloadAsyncResponse> downloadWithResponse(BlobRange range, DownloadRetryOptions options, BlobRequestConditions accessConditions, boolean rangeGetContentMD5) { try { return withContext(context -> downloadWithResponse(range, options, accessConditions, rangeGetContentMD5, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<BlobDownloadAsyncResponse> downloadWithResponse(BlobRange range, DownloadRetryOptions options, BlobRequestConditions accessConditions, boolean rangeGetContentMD5, Context context) { return downloadHelper(range, options, accessConditions, rangeGetContentMD5, context) .map(response -> new BlobDownloadAsyncResponse(response.getRequest(), response.getStatusCode(), response.getHeaders(), response.getValue(), response.getDeserializedHeaders())); } private Mono<ReliableDownload> downloadHelper(BlobRange range, DownloadRetryOptions options, BlobRequestConditions accessConditions, boolean rangeGetContentMd5, Context context) { range = range == null ? new BlobRange(0) : range; Boolean getMD5 = rangeGetContentMd5 ? rangeGetContentMd5 : null; accessConditions = accessConditions == null ? new BlobRequestConditions() : accessConditions; HttpGetterInfo info = new HttpGetterInfo() .setOffset(range.getOffset()) .setCount(range.getCount()) .setETag(accessConditions.getIfMatch()); return azureBlobStorage.blobs().downloadWithRestResponseAsync(null, null, snapshot, null, range.toHeaderValue(), accessConditions.getLeaseId(), getMD5, null, accessConditions.getIfModifiedSince(), accessConditions.getIfUnmodifiedSince(), accessConditions.getIfMatch(), accessConditions.getIfNoneMatch(), null, customerProvidedKey, context) .map(response -> { info.setETag(response.getDeserializedHeaders().getETag()); return new ReliableDownload(response, options, info, updatedInfo -> downloadHelper(new BlobRange(updatedInfo.getOffset(), updatedInfo.getCount()), options, new BlobRequestConditions().setIfMatch(info.getETag()), false, context)); }); } /** * Downloads the entire blob into a file specified by the path. * * <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException} * will be thrown.</p> * * <p>Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link * AppendBlobClient}.</p> * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.downloadToFile * * <p>For more information, see the * <a href="https: * * @param filePath A non-null {@link OutputStream} instance where the downloaded data will be written. * @return A reactive response containing the blob properties and metadata. */ public Mono<BlobProperties> downloadToFile(String filePath) { try { return downloadToFileWithResponse(filePath, null, null, null, null, false).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Downloads the entire blob into a file specified by the path. * * <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException} * will be thrown.</p> * * <p>Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link * AppendBlobClient}.</p> * * <p>This method makes an extra HTTP call to get the length of the blob in the beginning. To avoid this extra * call, provide the {@link BlobRange} parameter.</p> * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.downloadToFileWithResponse * * <p>For more information, see the * <a href="https: * * @param filePath A non-null {@link OutputStream} instance where the downloaded data will be written. * @param range {@link BlobRange} * @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel * transfers parameter is ignored. * @param options {@link DownloadRetryOptions} * @param requestConditions {@link BlobRequestConditions} * @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned. * @return A reactive response containing the blob properties and metadata. * @throws IllegalArgumentException If {@code blockSize} is less than 0 or greater than 100MB. * @throws UncheckedIOException If an I/O error occurs. */ public Mono<Response<BlobProperties>> downloadToFileWithResponse(String filePath, BlobRange range, ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean rangeGetContentMd5) { try { return withContext(context -> downloadToFileWithResponse(filePath, range, parallelTransferOptions, options, requestConditions, rangeGetContentMd5, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<BlobProperties>> downloadToFileWithResponse(String filePath, BlobRange range, ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions, BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Context context) { BlobRange rangeReal = range == null ? new BlobRange(0) : range; final ParallelTransferOptions finalParallelTransferOptions = ModelHelper.populateAndApplyDefaults(parallelTransferOptions); BlobRequestConditions conditionsReal = requestConditions == null ? new BlobRequestConditions() : requestConditions; AsynchronousFileChannel channel = downloadToFileResourceSupplier(filePath); return Mono.just(channel) .flatMap(c -> this.downloadToFileImpl(c, rangeReal, finalParallelTransferOptions, downloadRetryOptions, conditionsReal, rangeGetContentMd5, context)) .doFinally(signalType -> this.downloadToFileCleanup(channel, filePath, signalType)); } private AsynchronousFileChannel downloadToFileResourceSupplier(String filePath) { try { return AsynchronousFileChannel.open(Paths.get(filePath), StandardOpenOption.READ, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW); } catch (IOException e) { throw logger.logExceptionAsError(new UncheckedIOException(e)); } } private Mono<Response<BlobProperties>> downloadToFileImpl(AsynchronousFileChannel file, BlobRange rangeReal, ParallelTransferOptions finalParallelTransferOptions, DownloadRetryOptions downloadRetryOptions, BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Context context) { Lock progressLock = new ReentrantLock(); AtomicLong totalProgress = new AtomicLong(0); /* * Downloads the first chunk and gets the size of the data and etag if not specified by the user. */ return getSetupMono(rangeReal, finalParallelTransferOptions, downloadRetryOptions, requestConditions, rangeGetContentMd5, context) .flatMap(setupTuple3 -> { long newCount = setupTuple3.getT1(); BlobRequestConditions realConditions = setupTuple3.getT2(); int numChunks = calculateNumBlocks(newCount, finalParallelTransferOptions.getBlockSize()); numChunks = numChunks == 0 ? 1 : numChunks; BlobDownloadAsyncResponse initialResponse = setupTuple3.getT3(); return Flux.range(0, numChunks) .flatMap(chunkNum -> { if (chunkNum == 0) { return writeBodyToFile(initialResponse, file, 0, finalParallelTransferOptions, progressLock, totalProgress); } long chunkSizeActual = Math.min(finalParallelTransferOptions.getBlockSize(), newCount - (chunkNum * finalParallelTransferOptions.getBlockSize())); BlobRange chunkRange = new BlobRange( rangeReal.getOffset() + (chunkNum * finalParallelTransferOptions.getBlockSize()), chunkSizeActual); return this.downloadWithResponse(chunkRange, downloadRetryOptions, realConditions, rangeGetContentMd5, null) .subscribeOn(Schedulers.elastic()) .flatMap(response -> writeBodyToFile(response, file, chunkNum, finalParallelTransferOptions, progressLock, totalProgress)); }) .last(); }); } private int calculateNumBlocks(long dataSize, long blockLength) { int numBlocks = toIntExact(dataSize / blockLength); if (dataSize % blockLength != 0) { numBlocks++; } return numBlocks; } /* Download the first chunk. Construct a Mono which will emit the total count for calculating the number of chunks, access conditions containing the etag to lock on, and the response from downloading the first chunk. */ private Mono<Tuple3<Long, BlobRequestConditions, BlobDownloadAsyncResponse>> getSetupMono(BlobRange r, ParallelTransferOptions o, DownloadRetryOptions downloadRetryOptions, BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Context context) { long initialChunkSize = r.getCount() != null && r.getCount() < o.getBlockSize() ? r.getCount() : o.getBlockSize(); return this.downloadWithResponse(new BlobRange(r.getOffset(), initialChunkSize), downloadRetryOptions, requestConditions, rangeGetContentMd5, context) .subscribeOn(Schedulers.elastic()) .flatMap(response -> { /* Either the etag was set and it matches because the download succeeded, so this is a no-op, or there was no etag, so we set it here. ETag locking is vital to ensure we download one, consistent view of the file. */ BlobRequestConditions newConditions = setEtag(requestConditions, response.getHeaders() .getValue("ETag")); long totalLength = extractTotalBlobLength(response.getHeaders().getValue("Content-Range")); /* If the user either didn't specify a count or they specified a count greater than the size of the remaining data, take the size of the remaining data. This is to prevent the case where the count is much much larger than the size of the blob and we could try to download at an invalid offset. */ long newCount = r.getCount() == null || r.getCount() > (totalLength - r.getOffset()) ? totalLength - r.getOffset() : r.getCount(); return Mono.zip(Mono.just(newCount), Mono.just(newConditions), Mono.just(response)); }) .onErrorResume(throwable -> { /* In the case of an empty blob, we still want to report success and give back valid headers. Attempting a range download on an empty blob will return an InvalidRange error code and a Content-Range header of the format "bytes * /0". We need to double check that the total size is zero in the case that the customer has attempted an invalid range on a non-zero length blob. */ if (throwable instanceof BlobStorageException && ((BlobStorageException) throwable).getErrorCode() == BlobErrorCode.INVALID_RANGE && extractTotalBlobLength(((BlobStorageException) throwable).getResponse() .getHeaders().getValue("Content-Range")) == 0) { return this.downloadWithResponse(new BlobRange(0, 0L), downloadRetryOptions, requestConditions, rangeGetContentMd5, context) .subscribeOn(Schedulers.elastic()) .flatMap(response -> { /* Ensure the blob is still 0 length by checking our download was the full length. (200 is for full blob; 206 is partial). */ if (response.getStatusCode() != 200) { Mono.error(new IllegalStateException("Blob was modified mid download. It was " + "originally 0 bytes and is now larger.")); } return Mono.zip(Mono.just(0L), Mono.just(requestConditions), Mono.just(response)); }); } return Mono.error(throwable); }); } private static BlobRequestConditions setEtag(BlobRequestConditions accessConditions, String etag) { return new BlobRequestConditions() .setIfModifiedSince( accessConditions.getIfModifiedSince()) .setIfUnmodifiedSince( accessConditions.getIfModifiedSince()) .setIfMatch(etag) .setIfNoneMatch( accessConditions.getIfNoneMatch()) .setLeaseId(accessConditions.getLeaseId()); } private static Mono<Response<BlobProperties>> writeBodyToFile(BlobDownloadAsyncResponse response, AsynchronousFileChannel file, long chunkNum, ParallelTransferOptions optionsReal, Lock progressLock, AtomicLong totalProgress) { Flux<ByteBuffer> data = response.getValue(); data = ProgressReporter.addParallelProgressReporting(data, optionsReal.getProgressReceiver(), progressLock, totalProgress); BlobProperties properties = new BlobProperties(null, response.getDeserializedHeaders().getLastModified(), response.getDeserializedHeaders().getETag(), response.getDeserializedHeaders().getContentLength() == null ? 0 : response.getDeserializedHeaders().getContentLength(), response.getDeserializedHeaders().getContentType(), null, response.getDeserializedHeaders().getContentEncoding(), response.getDeserializedHeaders().getContentDisposition(), response.getDeserializedHeaders().getContentLanguage(), response.getDeserializedHeaders().getCacheControl(), response.getDeserializedHeaders().getBlobSequenceNumber(), response.getDeserializedHeaders().getBlobType(), response.getDeserializedHeaders().getLeaseStatus(), response.getDeserializedHeaders().getLeaseState(), response.getDeserializedHeaders().getLeaseDuration(), response.getDeserializedHeaders().getCopyId(), response.getDeserializedHeaders().getCopyStatus(), response.getDeserializedHeaders().getCopySource(), response.getDeserializedHeaders().getCopyProgress(), response.getDeserializedHeaders().getCopyCompletionTime(), response.getDeserializedHeaders().getCopyStatusDescription(), response.getDeserializedHeaders().isServerEncrypted(), null, null, null, null, null, response.getDeserializedHeaders().getEncryptionKeySha256(), null, response.getDeserializedHeaders().getMetadata(), response.getDeserializedHeaders().getBlobCommittedBlockCount()); return FluxUtil.writeFile(data, file, chunkNum * optionsReal.getBlockSize()) /* Satisfy the return type. We want to eventually give the user back the headers. */ .then(Mono.just(new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), properties))); } private static long extractTotalBlobLength(String contentRange) { return Long.parseLong(contentRange.split("/")[1]); } /** * Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.delete} * * <p>For more information, see the * <a href="https: * * @return A reactive response signalling completion. */ public Mono<Void> delete() { try { return deleteWithResponse(null, null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.deleteWithResponse * * <p>For more information, see the * <a href="https: * * @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include} * will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being * deleted, you must pass null. * @param accessConditions {@link BlobRequestConditions} * @return A reactive response signalling completion. */ public Mono<Response<Void>> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions, BlobRequestConditions accessConditions) { try { return withContext(context -> deleteWithResponse(deleteBlobSnapshotOptions, accessConditions, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<Void>> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions, BlobRequestConditions accessConditions, Context context) { accessConditions = accessConditions == null ? new BlobRequestConditions() : accessConditions; return this.azureBlobStorage.blobs().deleteWithRestResponseAsync(null, null, snapshot, null, accessConditions.getLeaseId(), deleteBlobSnapshotOptions, accessConditions.getIfModifiedSince(), accessConditions.getIfUnmodifiedSince(), accessConditions.getIfMatch(), accessConditions.getIfNoneMatch(), null, context) .map(response -> new SimpleResponse<>(response, null)); } /** * Returns the blob's metadata and properties. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.getProperties} * * <p>For more information, see the * <a href="https: * * @return A reactive response containing the blob properties and metadata. */ public Mono<BlobProperties> getProperties() { try { return getPropertiesWithResponse(null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Returns the blob's metadata and properties. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.getPropertiesWithResponse * * <p>For more information, see the * <a href="https: * * @param accessConditions {@link BlobRequestConditions} * @return A reactive response containing the blob properties and metadata. */ public Mono<Response<BlobProperties>> getPropertiesWithResponse(BlobRequestConditions accessConditions) { try { return withContext(context -> getPropertiesWithResponse(accessConditions, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<BlobProperties>> getPropertiesWithResponse(BlobRequestConditions accessConditions, Context context) { accessConditions = accessConditions == null ? new BlobRequestConditions() : accessConditions; return this.azureBlobStorage.blobs().getPropertiesWithRestResponseAsync( null, null, snapshot, null, accessConditions.getLeaseId(), accessConditions.getIfModifiedSince(), accessConditions.getIfUnmodifiedSince(), accessConditions.getIfMatch(), accessConditions.getIfNoneMatch(), null, customerProvidedKey, context) .map(rb -> { BlobGetPropertiesHeaders hd = rb.getDeserializedHeaders(); BlobProperties properties = new BlobProperties(hd.getCreationTime(), hd.getLastModified(), hd.getETag(), hd.getContentLength() == null ? 0 : hd.getContentLength(), hd.getContentType(), hd.getContentMD5(), hd.getContentEncoding(), hd.getContentDisposition(), hd.getContentLanguage(), hd.getCacheControl(), hd.getBlobSequenceNumber(), hd.getBlobType(), hd.getLeaseStatus(), hd.getLeaseState(), hd.getLeaseDuration(), hd.getCopyId(), hd.getCopyStatus(), hd.getCopySource(), hd.getCopyProgress(), hd.getCopyCompletionTime(), hd.getCopyStatusDescription(), hd.isServerEncrypted(), hd.isIncrementalCopy(), hd.getDestinationSnapshot(), AccessTier.fromString(hd.getAccessTier()), hd.isAccessTierInferred(), ArchiveStatus.fromString(hd.getArchiveStatus()), hd.getEncryptionKeySha256(), hd.getAccessTierChangeTime(), hd.getMetadata(), hd.getBlobCommittedBlockCount()); return new SimpleResponse<>(rb, properties); }); } /** * Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In * order to preserve existing values, they must be passed alongside the header being changed. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.setHttpHeaders * * <p>For more information, see the * <a href="https: * * @param headers {@link BlobHttpHeaders} * @return A reactive response signalling completion. */ public Mono<Void> setHttpHeaders(BlobHttpHeaders headers) { try { return setHttpHeadersWithResponse(headers, null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In * order to preserve existing values, they must be passed alongside the header being changed. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.setHttpHeadersWithResponse * * <p>For more information, see the * <a href="https: * * @param headers {@link BlobHttpHeaders} * @param accessConditions {@link BlobRequestConditions} * @return A reactive response signalling completion. */ public Mono<Response<Void>> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions accessConditions) { try { return withContext(context -> setHttpHeadersWithResponse(headers, accessConditions, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<Void>> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions accessConditions, Context context) { accessConditions = accessConditions == null ? new BlobRequestConditions() : accessConditions; return this.azureBlobStorage.blobs().setHTTPHeadersWithRestResponseAsync( null, null, null, accessConditions.getLeaseId(), accessConditions.getIfModifiedSince(), accessConditions.getIfUnmodifiedSince(), accessConditions.getIfMatch(), accessConditions.getIfNoneMatch(), null, headers, context) .map(response -> new SimpleResponse<>(response, null)); } /** * Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values * must be preserved, they must be downloaded and included in the call to this method. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.setMetadata * * <p>For more information, see the * <a href="https: * * @param metadata Metadata to associate with the blob. * @return A reactive response signalling completion. */ public Mono<Void> setMetadata(Map<String, String> metadata) { try { return setMetadataWithResponse(metadata, null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values * must be preserved, they must be downloaded and included in the call to this method. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.setMetadataWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata Metadata to associate with the blob. * @param accessConditions {@link BlobRequestConditions} * @return A reactive response signalling completion. */ public Mono<Response<Void>> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions accessConditions) { try { return withContext(context -> setMetadataWithResponse(metadata, accessConditions, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<Void>> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions accessConditions, Context context) { accessConditions = accessConditions == null ? new BlobRequestConditions() : accessConditions; return this.azureBlobStorage.blobs().setMetadataWithRestResponseAsync( null, null, null, metadata, accessConditions.getLeaseId(), accessConditions.getIfModifiedSince(), accessConditions.getIfUnmodifiedSince(), accessConditions.getIfMatch(), accessConditions.getIfNoneMatch(), null, customerProvidedKey, context) .map(response -> new SimpleResponse<>(response, null)); } /** * Creates a read-only snapshot of the blob. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.createSnapshot} * * <p>For more information, see the * <a href="https: * * @return A response containing a {@link BlobAsyncClientBase} which is used to interact with the created snapshot, * use {@link */ public Mono<BlobAsyncClientBase> createSnapshot() { try { return createSnapshotWithResponse(null, null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a read-only snapshot of the blob. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.createSnapshotWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata Metadata to associate with the blob snapshot. * @param accessConditions {@link BlobRequestConditions} * @return A response containing a {@link BlobAsyncClientBase} which is used to interact with the created snapshot, * use {@link */ public Mono<Response<BlobAsyncClientBase>> createSnapshotWithResponse(Map<String, String> metadata, BlobRequestConditions accessConditions) { try { return withContext(context -> createSnapshotWithResponse(metadata, accessConditions, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<BlobAsyncClientBase>> createSnapshotWithResponse(Map<String, String> metadata, BlobRequestConditions accessConditions, Context context) { accessConditions = accessConditions == null ? new BlobRequestConditions() : accessConditions; return this.azureBlobStorage.blobs().createSnapshotWithRestResponseAsync( null, null, null, metadata, accessConditions.getIfModifiedSince(), accessConditions.getIfUnmodifiedSince(), accessConditions.getIfMatch(), accessConditions.getIfNoneMatch(), accessConditions.getLeaseId(), null, customerProvidedKey, context) .map(rb -> new SimpleResponse<>(rb, this.getSnapshotClient(rb.getDeserializedHeaders().getSnapshot()))); } /** * Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in * a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's * etag. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.setAccessTier * * <p>For more information, see the * <a href="https: * * @param tier The new tier for the blob. * @return A reactive response signalling completion. * @throws NullPointerException if {@code tier} is null. */ public Mono<Void> setAccessTier(AccessTier tier) { try { return setAccessTierWithResponse(tier, null, null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in * a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's * etag. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.setAccessTierWithResponse * * <p>For more information, see the * <a href="https: * * @param tier The new tier for the blob. * @param priority Optional priority to set for re-hydrating blobs. * @param leaseId The lease ID the active lease on the blob must match. * @return A reactive response signalling completion. * @throws NullPointerException if {@code tier} is null. */ public Mono<Response<Void>> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId) { try { return withContext(context -> setTierWithResponse(tier, priority, leaseId, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<Void>> setTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId, Context context) { StorageImplUtils.assertNotNull("tier", tier); return this.azureBlobStorage.blobs().setTierWithRestResponseAsync( null, null, tier, null, priority, null, leaseId, context) .map(response -> new SimpleResponse<>(response, null)); } /** * Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.undelete} * * <p>For more information, see the * <a href="https: * * @return A reactive response signalling completion. */ public Mono<Void> undelete() { try { return undeleteWithResponse().flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.undeleteWithResponse} * * <p>For more information, see the * <a href="https: * * @return A reactive response signalling completion. */ public Mono<Response<Void>> undeleteWithResponse() { try { return withContext(this::undeleteWithResponse); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<Void>> undeleteWithResponse(Context context) { return this.azureBlobStorage.blobs().undeleteWithRestResponseAsync(null, null, context).map(response -> new SimpleResponse<>(response, null)); } /** * Returns the sku name and account kind for the account. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.getAccountInfo} * * <p>For more information, see the * <a href="https: * * @return a reactor response containing the sku name and account kind. */ public Mono<StorageAccountInfo> getAccountInfo() { try { return getAccountInfoWithResponse().flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Returns the sku name and account kind for the account. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.getAccountInfoWithResponse} * * <p>For more information, see the * <a href="https: * * @return a reactor response containing the sku name and account kind. */ public Mono<Response<StorageAccountInfo>> getAccountInfoWithResponse() { try { return withContext(this::getAccountInfoWithResponse); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<StorageAccountInfo>> getAccountInfoWithResponse(Context context) { return this.azureBlobStorage.blobs().getAccountInfoWithRestResponseAsync(null, null, context) .map(rb -> { BlobGetAccountInfoHeaders hd = rb.getDeserializedHeaders(); return new SimpleResponse<>(rb, new StorageAccountInfo(hd.getSkuName(), hd.getAccountKind())); }); } }
class BlobAsyncClientBase { private static final int BLOB_DEFAULT_DOWNLOAD_BLOCK_SIZE = 4 * Constants.MB; private static final int BLOB_MAX_DOWNLOAD_BLOCK_SIZE = 100 * Constants.MB; private final ClientLogger logger = new ClientLogger(BlobAsyncClientBase.class); protected final AzureBlobStorageImpl azureBlobStorage; private final String snapshot; private final CpkInfo customerProvidedKey; protected final String accountName; protected final String containerName; protected final String blobName; protected final BlobServiceVersion serviceVersion; /** * Package-private constructor for use by {@link SpecializedBlobClientBuilder}. * * @param pipeline The pipeline used to send and receive service requests. * @param url The endpoint where to send service requests. * @param serviceVersion The version of the service to receive requests. * @param accountName The storage account name. * @param containerName The container name. * @param blobName The blob name. * @param snapshot The snapshot identifier for the blob, pass {@code null} to interact with the blob directly. * @param customerProvidedKey Customer provided key used during encryption of the blob's data on the server, pass * {@code null} to allow the service to use its own encryption. */ protected BlobAsyncClientBase(HttpPipeline pipeline, String url, BlobServiceVersion serviceVersion, String accountName, String containerName, String blobName, String snapshot, CpkInfo customerProvidedKey) { this.azureBlobStorage = new AzureBlobStorageBuilder() .pipeline(pipeline) .url(url) .version(serviceVersion.getVersion()) .build(); this.serviceVersion = serviceVersion; this.accountName = accountName; this.containerName = containerName; this.blobName = Utility.urlEncode(Utility.urlDecode(blobName)); this.snapshot = snapshot; this.customerProvidedKey = customerProvidedKey; } /** * Creates a new {@link BlobAsyncClientBase} linked to the {@code snapshot} of this blob resource. * * @param snapshot the identifier for a specific snapshot of this blob * @return a {@link BlobAsyncClientBase} used to interact with the specific snapshot. */ public BlobAsyncClientBase getSnapshotClient(String snapshot) { return new BlobAsyncClientBase(getHttpPipeline(), getBlobUrl(), getServiceVersion(), getAccountName(), getContainerName(), getBlobName(), snapshot, getCustomerProvidedKey()); } /** * Gets the URL of the blob represented by this client. * * @return the URL. */ public String getBlobUrl() { if (!this.isSnapshot()) { return azureBlobStorage.getUrl(); } else { if (azureBlobStorage.getUrl().contains("?")) { return String.format("%s&snapshot=%s", azureBlobStorage.getUrl(), snapshot); } else { return String.format("%s?snapshot=%s", azureBlobStorage.getUrl(), snapshot); } } } /** * Get the container name. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.getContainerName} * * @return The name of the container. */ public final String getContainerName() { return containerName; } /** * Decodes and gets the blob name. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.getBlobName} * * @return The decoded name of the blob. */ public final String getBlobName() { return (blobName == null) ? null : Utility.urlDecode(blobName); } /** * Gets the {@link HttpPipeline} powering this client. * * @return The pipeline. */ public HttpPipeline getHttpPipeline() { return azureBlobStorage.getHttpPipeline(); } /** * Gets the {@link CpkInfo} used to encrypt this blob's content on the server. * * @return the customer provided key used for encryption. */ public CpkInfo getCustomerProvidedKey() { return customerProvidedKey; } /** * Get associated account name. * * @return account name associated with this storage resource. */ public String getAccountName() { return accountName; } /** * Gets the service version the client is using. * * @return the service version the client is using. */ public BlobServiceVersion getServiceVersion() { return serviceVersion; } /** * Gets the snapshotId for a blob resource * * @return A string that represents the snapshotId of the snapshot blob */ public String getSnapshotId() { return this.snapshot; } /** * Determines if a blob is a snapshot * * @return A boolean that indicates if a blob is a snapshot */ public boolean isSnapshot() { return this.snapshot != null; } /** * Determines if the blob this client represents exists in the cloud. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.exists} * * @return true if the blob exists, false if it doesn't */ public Mono<Boolean> exists() { try { return existsWithResponse().flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Determines if the blob this client represents exists in the cloud. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.existsWithResponse} * * @return true if the blob exists, false if it doesn't */ public Mono<Response<Boolean>> existsWithResponse() { try { return withContext(this::existsWithResponse); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<Boolean>> existsWithResponse(Context context) { return this.getPropertiesWithResponse(null, context) .map(cp -> (Response<Boolean>) new SimpleResponse<>(cp, true)) .onErrorResume(t -> t instanceof BlobStorageException && ((BlobStorageException) t).getStatusCode() == 404, t -> { HttpResponse response = ((BlobStorageException) t).getResponse(); return Mono.just(new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), false)); }); } /** * Copies the data at the source URL to a blob. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.beginCopy * * <p>For more information, see the * <a href="https: * * @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second * is used. * @return A {@link PollerFlux} that polls the blob copy operation until it has completed, has failed, or has been * cancelled. */ public PollerFlux<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) { return beginCopy(sourceUrl, null, null, null, null, null, pollInterval); } /** * Copies the data at the source URL to a blob. * * <p><strong>Starting a copy operation</strong></p> * Starting a copy operation and polling on the responses. * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.beginCopy * * <p><strong>Cancelling a copy operation</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.beginCopyFromUrlCancel * * <p>For more information, see the * <a href="https: * * @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * @param metadata Metadata to associate with the destination blob. * @param tier {@link AccessTier} for the destination blob. * @param priority {@link RehydratePriority} for rehydrating the blob. * @param sourceModifiedAccessConditions {@link RequestConditions} against the source. Standard HTTP * Access conditions related to the modification of data. ETag and LastModifiedTime are used to construct * conditions related to when the blob was changed relative to the given request. The request will fail if the * specified condition is not satisfied. * @param destAccessConditions {@link BlobRequestConditions} against the destination. * @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second * is used. * @return A {@link PollerFlux} that polls the blob copy operation until it has completed, has failed, or has been * cancelled. */ public PollerFlux<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier, RehydratePriority priority, RequestConditions sourceModifiedAccessConditions, BlobRequestConditions destAccessConditions, Duration pollInterval) { final Duration interval = pollInterval != null ? pollInterval : Duration.ofSeconds(1); final RequestConditions sourceModifiedCondition = sourceModifiedAccessConditions == null ? new RequestConditions() : sourceModifiedAccessConditions; final BlobRequestConditions destinationAccessConditions = destAccessConditions == null ? new BlobRequestConditions() : destAccessConditions; final RequestConditions sourceConditions = new RequestConditions() .setIfModifiedSince(sourceModifiedCondition.getIfModifiedSince()) .setIfUnmodifiedSince(sourceModifiedCondition.getIfUnmodifiedSince()) .setIfMatch(sourceModifiedCondition.getIfMatch()) .setIfNoneMatch(sourceModifiedCondition.getIfNoneMatch()); return new PollerFlux<>(interval, (pollingContext) -> { try { return onStart(sourceUrl, metadata, tier, priority, sourceConditions, destinationAccessConditions); } catch (RuntimeException ex) { return monoError(logger, ex); } }, (pollingContext) -> { try { return onPoll(pollingContext.getLatestResponse()); } catch (RuntimeException ex) { return monoError(logger, ex); } }, (pollingContext, firstResponse) -> { if (firstResponse == null || firstResponse.getValue() == null) { return Mono.error(logger.logExceptionAsError( new IllegalArgumentException("Cannot cancel a poll response that never started."))); } final String copyIdentifier = firstResponse.getValue().getCopyId(); if (!ImplUtils.isNullOrEmpty(copyIdentifier)) { logger.info("Cancelling copy operation for copy id: {}", copyIdentifier); return abortCopyFromUrl(copyIdentifier).thenReturn(firstResponse.getValue()); } return Mono.empty(); }, (pollingContext) -> Mono.empty()); } private Mono<BlobCopyInfo> onStart(String sourceUrl, Map<String, String> metadata, AccessTier tier, RehydratePriority priority, RequestConditions sourceModifiedAccessConditions, BlobRequestConditions destinationAccessConditions) { URL url; try { url = new URL(sourceUrl); } catch (MalformedURLException ex) { throw logger.logExceptionAsError(new IllegalArgumentException("'sourceUrl' is not a valid url.", ex)); } return withContext( context -> azureBlobStorage.blobs().startCopyFromURLWithRestResponseAsync(null, null, url, null, metadata, tier, priority, sourceModifiedAccessConditions.getIfModifiedSince(), sourceModifiedAccessConditions.getIfUnmodifiedSince(), sourceModifiedAccessConditions.getIfMatch(), sourceModifiedAccessConditions.getIfNoneMatch(), destinationAccessConditions.getIfModifiedSince(), destinationAccessConditions.getIfUnmodifiedSince(), destinationAccessConditions.getIfMatch(), destinationAccessConditions.getIfNoneMatch(), destinationAccessConditions.getLeaseId(), null, context)) .map(response -> { final BlobStartCopyFromURLHeaders headers = response.getDeserializedHeaders(); return new BlobCopyInfo(sourceUrl, headers.getCopyId(), headers.getCopyStatus(), headers.getETag(), headers.getLastModified(), headers.getErrorCode()); }); } private Mono<PollResponse<BlobCopyInfo>> onPoll(PollResponse<BlobCopyInfo> pollResponse) { if (pollResponse.getStatus() == LongRunningOperationStatus.SUCCESSFULLY_COMPLETED || pollResponse.getStatus() == LongRunningOperationStatus.FAILED) { return Mono.just(pollResponse); } final BlobCopyInfo lastInfo = pollResponse.getValue(); if (lastInfo == null) { logger.warning("BlobCopyInfo does not exist. Activation operation failed."); return Mono.just(new PollResponse<>( LongRunningOperationStatus.fromString("COPY_START_FAILED", true), null)); } return getProperties().map(response -> { final CopyStatusType status = response.getCopyStatus(); final BlobCopyInfo result = new BlobCopyInfo(response.getCopySource(), response.getCopyId(), status, response.getETag(), response.getCopyCompletionTime(), response.getCopyStatusDescription()); LongRunningOperationStatus operationStatus; switch (status) { case SUCCESS: operationStatus = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; break; case FAILED: operationStatus = LongRunningOperationStatus.FAILED; break; case ABORTED: operationStatus = LongRunningOperationStatus.USER_CANCELLED; break; case PENDING: operationStatus = LongRunningOperationStatus.IN_PROGRESS; break; default: throw logger.logExceptionAsError(new IllegalArgumentException( "CopyStatusType is not supported. Status: " + status)); } return new PollResponse<>(operationStatus, result); }).onErrorReturn( new PollResponse<>(LongRunningOperationStatus.fromString("POLLING_FAILED", true), lastInfo)); } /** * Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.abortCopyFromUrl * * <p>For more information, see the * <a href="https: * * @see * @see * @see * @param copyId The id of the copy operation to abort. * @return A reactive response signalling completion. */ public Mono<Void> abortCopyFromUrl(String copyId) { try { return abortCopyFromUrlWithResponse(copyId, null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.abortCopyFromUrlWithResponse * * <p>For more information, see the * <a href="https: * * @see * @see * @see * @param copyId The id of the copy operation to abort. * @param leaseId The lease ID the active lease on the blob must match. * @return A reactive response signalling completion. */ public Mono<Response<Void>> abortCopyFromUrlWithResponse(String copyId, String leaseId) { try { return withContext(context -> abortCopyFromUrlWithResponse(copyId, leaseId, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<Void>> abortCopyFromUrlWithResponse(String copyId, String leaseId, Context context) { return this.azureBlobStorage.blobs().abortCopyFromURLWithRestResponseAsync( null, null, copyId, null, leaseId, null, context) .map(response -> new SimpleResponse<>(response, null)); } /** * Copies the data at the source URL to a blob and waits for the copy to complete before returning a response. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.copyFromUrl * * <p>For more information, see the * <a href="https: * * @param copySource The source URL to copy from. * @return A reactive response containing the copy ID for the long running operation. */ public Mono<String> copyFromUrl(String copySource) { try { return copyFromUrlWithResponse(copySource, null, null, null, null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Copies the data at the source URL to a blob and waits for the copy to complete before returning a response. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.copyFromUrlWithResponse * * <p>For more information, see the * <a href="https: * * @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * @param metadata Metadata to associate with the destination blob. * @param tier {@link AccessTier} for the destination blob. * @param sourceModifiedAccessConditions {@link RequestConditions} against the source. Standard HTTP Access * conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions * related to when the blob was changed relative to the given request. The request will fail if the specified * condition is not satisfied. * @param destAccessConditions {@link BlobRequestConditions} against the destination. * @return A reactive response containing the copy ID for the long running operation. */ public Mono<Response<String>> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier, RequestConditions sourceModifiedAccessConditions, BlobRequestConditions destAccessConditions) { try { return withContext(context -> copyFromUrlWithResponse(copySource, metadata, tier, sourceModifiedAccessConditions, destAccessConditions, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<String>> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier, RequestConditions sourceModifiedAccessConditions, BlobRequestConditions destAccessConditions, Context context) { sourceModifiedAccessConditions = sourceModifiedAccessConditions == null ? new RequestConditions() : sourceModifiedAccessConditions; destAccessConditions = destAccessConditions == null ? new BlobRequestConditions() : destAccessConditions; URL url; try { url = new URL(copySource); } catch (MalformedURLException ex) { throw logger.logExceptionAsError(new IllegalArgumentException("'copySource' is not a valid url.")); } return this.azureBlobStorage.blobs().copyFromURLWithRestResponseAsync( null, null, url, null, metadata, tier, sourceModifiedAccessConditions.getIfModifiedSince(), sourceModifiedAccessConditions.getIfUnmodifiedSince(), sourceModifiedAccessConditions.getIfMatch(), sourceModifiedAccessConditions.getIfNoneMatch(), destAccessConditions.getIfModifiedSince(), destAccessConditions.getIfUnmodifiedSince(), destAccessConditions.getIfMatch(), destAccessConditions.getIfNoneMatch(), destAccessConditions.getLeaseId(), null, context) .map(rb -> new SimpleResponse<>(rb, rb.getDeserializedHeaders().getCopyId())); } /** * Reads the entire blob. Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or * {@link AppendBlobClient}. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.download} * * <p>For more information, see the * <a href="https: * * @return A reactive response containing the blob data. */ public Flux<ByteBuffer> download() { try { return downloadWithResponse(null, null, null, false) .flatMapMany(BlobDownloadAsyncResponse::getValue); } catch (RuntimeException ex) { return fluxError(logger, ex); } } /** * Reads a range of bytes from a blob. Uploading data must be done from the {@link BlockBlobClient}, {@link * PageBlobClient}, or {@link AppendBlobClient}. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.downloadWithResponse * * <p>For more information, see the * <a href="https: * * @param range {@link BlobRange} * @param options {@link DownloadRetryOptions} * @param accessConditions {@link BlobRequestConditions} * @param rangeGetContentMD5 Whether the contentMD5 for the specified blob range should be returned. * @return A reactive response containing the blob data. */ public Mono<BlobDownloadAsyncResponse> downloadWithResponse(BlobRange range, DownloadRetryOptions options, BlobRequestConditions accessConditions, boolean rangeGetContentMD5) { try { return withContext(context -> downloadWithResponse(range, options, accessConditions, rangeGetContentMD5, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<BlobDownloadAsyncResponse> downloadWithResponse(BlobRange range, DownloadRetryOptions options, BlobRequestConditions accessConditions, boolean rangeGetContentMD5, Context context) { return downloadHelper(range, options, accessConditions, rangeGetContentMD5, context) .map(response -> new BlobDownloadAsyncResponse(response.getRequest(), response.getStatusCode(), response.getHeaders(), response.getValue(), response.getDeserializedHeaders())); } private Mono<ReliableDownload> downloadHelper(BlobRange range, DownloadRetryOptions options, BlobRequestConditions accessConditions, boolean rangeGetContentMd5, Context context) { range = range == null ? new BlobRange(0) : range; Boolean getMD5 = rangeGetContentMd5 ? rangeGetContentMd5 : null; accessConditions = accessConditions == null ? new BlobRequestConditions() : accessConditions; HttpGetterInfo info = new HttpGetterInfo() .setOffset(range.getOffset()) .setCount(range.getCount()) .setETag(accessConditions.getIfMatch()); return azureBlobStorage.blobs().downloadWithRestResponseAsync(null, null, snapshot, null, range.toHeaderValue(), accessConditions.getLeaseId(), getMD5, null, accessConditions.getIfModifiedSince(), accessConditions.getIfUnmodifiedSince(), accessConditions.getIfMatch(), accessConditions.getIfNoneMatch(), null, customerProvidedKey, context) .map(response -> { info.setETag(response.getDeserializedHeaders().getETag()); return new ReliableDownload(response, options, info, updatedInfo -> downloadHelper(new BlobRange(updatedInfo.getOffset(), updatedInfo.getCount()), options, new BlobRequestConditions().setIfMatch(info.getETag()), false, context)); }); } /** * Downloads the entire blob into a file specified by the path. * * <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException} * will be thrown.</p> * * <p>Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link * AppendBlobClient}.</p> * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.downloadToFile * * <p>For more information, see the * <a href="https: * * @param filePath A non-null {@link OutputStream} instance where the downloaded data will be written. * @return A reactive response containing the blob properties and metadata. */ public Mono<BlobProperties> downloadToFile(String filePath) { try { return downloadToFileWithResponse(filePath, null, null, null, null, false).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Downloads the entire blob into a file specified by the path. * * <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException} * will be thrown.</p> * * <p>Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link * AppendBlobClient}.</p> * * <p>This method makes an extra HTTP call to get the length of the blob in the beginning. To avoid this extra * call, provide the {@link BlobRange} parameter.</p> * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.downloadToFileWithResponse * * <p>For more information, see the * <a href="https: * * @param filePath A non-null {@link OutputStream} instance where the downloaded data will be written. * @param range {@link BlobRange} * @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel * transfers parameter is ignored. * @param options {@link DownloadRetryOptions} * @param requestConditions {@link BlobRequestConditions} * @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned. * @return A reactive response containing the blob properties and metadata. * @throws IllegalArgumentException If {@code blockSize} is less than 0 or greater than 100MB. * @throws UncheckedIOException If an I/O error occurs. */ public Mono<Response<BlobProperties>> downloadToFileWithResponse(String filePath, BlobRange range, ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean rangeGetContentMd5) { try { return withContext(context -> downloadToFileWithResponse(filePath, range, parallelTransferOptions, options, requestConditions, rangeGetContentMd5, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<BlobProperties>> downloadToFileWithResponse(String filePath, BlobRange range, ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions, BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Context context) { BlobRange finalRange = range == null ? new BlobRange(0) : range; final ParallelTransferOptions finalParallelTransferOptions = ModelHelper.populateAndApplyDefaults(parallelTransferOptions); BlobRequestConditions finalConditions = requestConditions == null ? new BlobRequestConditions() : requestConditions; AsynchronousFileChannel channel = downloadToFileResourceSupplier(filePath); return Mono.just(channel) .flatMap(c -> this.downloadToFileImpl(c, finalRange, finalParallelTransferOptions, downloadRetryOptions, finalConditions, rangeGetContentMd5, context)) .doFinally(signalType -> this.downloadToFileCleanup(channel, filePath, signalType)); } private AsynchronousFileChannel downloadToFileResourceSupplier(String filePath) { try { return AsynchronousFileChannel.open(Paths.get(filePath), StandardOpenOption.READ, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW); } catch (IOException e) { throw logger.logExceptionAsError(new UncheckedIOException(e)); } } private Mono<Response<BlobProperties>> downloadToFileImpl(AsynchronousFileChannel file, BlobRange finalRange, ParallelTransferOptions finalParallelTransferOptions, DownloadRetryOptions downloadRetryOptions, BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Context context) { Lock progressLock = new ReentrantLock(); AtomicLong totalProgress = new AtomicLong(0); /* * Downloads the first chunk and gets the size of the data and etag if not specified by the user. */ return getSetupMono(finalRange, finalParallelTransferOptions, downloadRetryOptions, requestConditions, rangeGetContentMd5, context) .flatMap(setupTuple3 -> { long newCount = setupTuple3.getT1(); BlobRequestConditions finalConditions = setupTuple3.getT2(); int numChunks = calculateNumBlocks(newCount, finalParallelTransferOptions.getBlockSize()); numChunks = numChunks == 0 ? 1 : numChunks; BlobDownloadAsyncResponse initialResponse = setupTuple3.getT3(); return Flux.range(0, numChunks) .flatMap(chunkNum -> { if (chunkNum == 0) { return writeBodyToFile(initialResponse, file, 0, finalParallelTransferOptions, progressLock, totalProgress); } long chunkSizeActual = Math.min(finalParallelTransferOptions.getBlockSize(), newCount - (chunkNum.longValue() * finalParallelTransferOptions.getBlockSize().longValue())); BlobRange chunkRange = new BlobRange( finalRange.getOffset() + (chunkNum.longValue() * finalParallelTransferOptions.getBlockSize().longValue()), chunkSizeActual); return this.downloadWithResponse(chunkRange, downloadRetryOptions, finalConditions, rangeGetContentMd5, null) .subscribeOn(Schedulers.elastic()) .flatMap(response -> writeBodyToFile(response, file, chunkNum, finalParallelTransferOptions, progressLock, totalProgress)); }) .then(Mono.just(buildBlobPropertiesResponse(initialResponse))); }); } private int calculateNumBlocks(long dataSize, long blockLength) { int numBlocks = toIntExact(dataSize / blockLength); if (dataSize % blockLength != 0) { numBlocks++; } return numBlocks; } /* Download the first chunk. Construct a Mono which will emit the total count for calculating the number of chunks, access conditions containing the etag to lock on, and the response from downloading the first chunk. */ private Mono<Tuple3<Long, BlobRequestConditions, BlobDownloadAsyncResponse>> getSetupMono(BlobRange range, ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions, BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Context context) { long initialChunkSize = range.getCount() != null && range.getCount() < parallelTransferOptions.getBlockSize() ? range.getCount() : parallelTransferOptions.getBlockSize(); return this.downloadWithResponse(new BlobRange(range.getOffset(), initialChunkSize), downloadRetryOptions, requestConditions, rangeGetContentMd5, context) .subscribeOn(Schedulers.elastic()) .flatMap(response -> { /* Either the etag was set and it matches because the download succeeded, so this is a no-op, or there was no etag, so we set it here. ETag locking is vital to ensure we download one, consistent view of the file. */ BlobRequestConditions newConditions = setEtag(requestConditions, response.getDeserializedHeaders().getETag()); long totalLength = extractTotalBlobLength(response.getDeserializedHeaders().getContentRange()); /* If the user either didn't specify a count or they specified a count greater than the size of the remaining data, take the size of the remaining data. This is to prevent the case where the count is much much larger than the size of the blob and we could try to download at an invalid offset. */ long newCount = range.getCount() == null || range.getCount() > (totalLength - range.getOffset()) ? totalLength - range.getOffset() : range.getCount(); return Mono.zip(Mono.just(newCount), Mono.just(newConditions), Mono.just(response)); }) .onErrorResume(BlobStorageException.class, blobStorageException -> { /* In the case of an empty blob, we still want to report success and give back valid headers. Attempting a range download on an empty blob will return an InvalidRange error code and a Content-Range header of the format "bytes * /0". We need to double check that the total size is zero in the case that the customer has attempted an invalid range on a non-zero length blob. */ if (blobStorageException.getErrorCode() == BlobErrorCode.INVALID_RANGE && extractTotalBlobLength(blobStorageException.getResponse() .getHeaders().getValue("Content-Range")) == 0) { return this.downloadWithResponse(new BlobRange(0, 0L), downloadRetryOptions, requestConditions, rangeGetContentMd5, context) .subscribeOn(Schedulers.elastic()) .flatMap(response -> { /* Ensure the blob is still 0 length by checking our download was the full length. (200 is for full blob; 206 is partial). */ if (response.getStatusCode() != 200) { Mono.error(new IllegalStateException("Blob was modified mid download. It was " + "originally 0 bytes and is now larger.")); } return Mono.zip(Mono.just(0L), Mono.just(requestConditions), Mono.just(response)); }); } return Mono.error(blobStorageException); }); } private static BlobRequestConditions setEtag(BlobRequestConditions accessConditions, String etag) { return new BlobRequestConditions() .setIfModifiedSince( accessConditions.getIfModifiedSince()) .setIfUnmodifiedSince( accessConditions.getIfModifiedSince()) .setIfMatch(etag) .setIfNoneMatch( accessConditions.getIfNoneMatch()) .setLeaseId(accessConditions.getLeaseId()); } private static Mono<Void> writeBodyToFile(BlobDownloadAsyncResponse response, AsynchronousFileChannel file, long chunkNum, ParallelTransferOptions finalParallelTransferOptions, Lock progressLock, AtomicLong totalProgress) { Flux<ByteBuffer> data = response.getValue(); data = ProgressReporter.addParallelProgressReporting(data, finalParallelTransferOptions.getProgressReceiver(), progressLock, totalProgress); return FluxUtil.writeFile(data, file, chunkNum * finalParallelTransferOptions.getBlockSize()); } private static Response<BlobProperties> buildBlobPropertiesResponse(BlobDownloadAsyncResponse response) { BlobProperties properties = new BlobProperties(null, response.getDeserializedHeaders().getLastModified(), response.getDeserializedHeaders().getETag(), response.getDeserializedHeaders().getContentLength() == null ? 0 : response.getDeserializedHeaders().getContentLength(), response.getDeserializedHeaders().getContentType(), null, response.getDeserializedHeaders().getContentEncoding(), response.getDeserializedHeaders().getContentDisposition(), response.getDeserializedHeaders().getContentLanguage(), response.getDeserializedHeaders().getCacheControl(), response.getDeserializedHeaders().getBlobSequenceNumber(), response.getDeserializedHeaders().getBlobType(), response.getDeserializedHeaders().getLeaseStatus(), response.getDeserializedHeaders().getLeaseState(), response.getDeserializedHeaders().getLeaseDuration(), response.getDeserializedHeaders().getCopyId(), response.getDeserializedHeaders().getCopyStatus(), response.getDeserializedHeaders().getCopySource(), response.getDeserializedHeaders().getCopyProgress(), response.getDeserializedHeaders().getCopyCompletionTime(), response.getDeserializedHeaders().getCopyStatusDescription(), response.getDeserializedHeaders().isServerEncrypted(), null, null, null, null, null, response.getDeserializedHeaders().getEncryptionKeySha256(), null, response.getDeserializedHeaders().getMetadata(), response.getDeserializedHeaders().getBlobCommittedBlockCount()); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), properties); } private static long extractTotalBlobLength(String contentRange) { return Long.parseLong(contentRange.split("/")[1]); } /** * Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.delete} * * <p>For more information, see the * <a href="https: * * @return A reactive response signalling completion. */ public Mono<Void> delete() { try { return deleteWithResponse(null, null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.deleteWithResponse * * <p>For more information, see the * <a href="https: * * @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include} * will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being * deleted, you must pass null. * @param accessConditions {@link BlobRequestConditions} * @return A reactive response signalling completion. */ public Mono<Response<Void>> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions, BlobRequestConditions accessConditions) { try { return withContext(context -> deleteWithResponse(deleteBlobSnapshotOptions, accessConditions, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<Void>> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions, BlobRequestConditions accessConditions, Context context) { accessConditions = accessConditions == null ? new BlobRequestConditions() : accessConditions; return this.azureBlobStorage.blobs().deleteWithRestResponseAsync(null, null, snapshot, null, accessConditions.getLeaseId(), deleteBlobSnapshotOptions, accessConditions.getIfModifiedSince(), accessConditions.getIfUnmodifiedSince(), accessConditions.getIfMatch(), accessConditions.getIfNoneMatch(), null, context) .map(response -> new SimpleResponse<>(response, null)); } /** * Returns the blob's metadata and properties. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.getProperties} * * <p>For more information, see the * <a href="https: * * @return A reactive response containing the blob properties and metadata. */ public Mono<BlobProperties> getProperties() { try { return getPropertiesWithResponse(null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Returns the blob's metadata and properties. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.getPropertiesWithResponse * * <p>For more information, see the * <a href="https: * * @param accessConditions {@link BlobRequestConditions} * @return A reactive response containing the blob properties and metadata. */ public Mono<Response<BlobProperties>> getPropertiesWithResponse(BlobRequestConditions accessConditions) { try { return withContext(context -> getPropertiesWithResponse(accessConditions, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<BlobProperties>> getPropertiesWithResponse(BlobRequestConditions accessConditions, Context context) { accessConditions = accessConditions == null ? new BlobRequestConditions() : accessConditions; return this.azureBlobStorage.blobs().getPropertiesWithRestResponseAsync( null, null, snapshot, null, accessConditions.getLeaseId(), accessConditions.getIfModifiedSince(), accessConditions.getIfUnmodifiedSince(), accessConditions.getIfMatch(), accessConditions.getIfNoneMatch(), null, customerProvidedKey, context) .map(rb -> { BlobGetPropertiesHeaders hd = rb.getDeserializedHeaders(); BlobProperties properties = new BlobProperties(hd.getCreationTime(), hd.getLastModified(), hd.getETag(), hd.getContentLength() == null ? 0 : hd.getContentLength(), hd.getContentType(), hd.getContentMD5(), hd.getContentEncoding(), hd.getContentDisposition(), hd.getContentLanguage(), hd.getCacheControl(), hd.getBlobSequenceNumber(), hd.getBlobType(), hd.getLeaseStatus(), hd.getLeaseState(), hd.getLeaseDuration(), hd.getCopyId(), hd.getCopyStatus(), hd.getCopySource(), hd.getCopyProgress(), hd.getCopyCompletionTime(), hd.getCopyStatusDescription(), hd.isServerEncrypted(), hd.isIncrementalCopy(), hd.getDestinationSnapshot(), AccessTier.fromString(hd.getAccessTier()), hd.isAccessTierInferred(), ArchiveStatus.fromString(hd.getArchiveStatus()), hd.getEncryptionKeySha256(), hd.getAccessTierChangeTime(), hd.getMetadata(), hd.getBlobCommittedBlockCount()); return new SimpleResponse<>(rb, properties); }); } /** * Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In * order to preserve existing values, they must be passed alongside the header being changed. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.setHttpHeaders * * <p>For more information, see the * <a href="https: * * @param headers {@link BlobHttpHeaders} * @return A reactive response signalling completion. */ public Mono<Void> setHttpHeaders(BlobHttpHeaders headers) { try { return setHttpHeadersWithResponse(headers, null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In * order to preserve existing values, they must be passed alongside the header being changed. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.setHttpHeadersWithResponse * * <p>For more information, see the * <a href="https: * * @param headers {@link BlobHttpHeaders} * @param accessConditions {@link BlobRequestConditions} * @return A reactive response signalling completion. */ public Mono<Response<Void>> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions accessConditions) { try { return withContext(context -> setHttpHeadersWithResponse(headers, accessConditions, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<Void>> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions accessConditions, Context context) { accessConditions = accessConditions == null ? new BlobRequestConditions() : accessConditions; return this.azureBlobStorage.blobs().setHTTPHeadersWithRestResponseAsync( null, null, null, accessConditions.getLeaseId(), accessConditions.getIfModifiedSince(), accessConditions.getIfUnmodifiedSince(), accessConditions.getIfMatch(), accessConditions.getIfNoneMatch(), null, headers, context) .map(response -> new SimpleResponse<>(response, null)); } /** * Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values * must be preserved, they must be downloaded and included in the call to this method. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.setMetadata * * <p>For more information, see the * <a href="https: * * @param metadata Metadata to associate with the blob. * @return A reactive response signalling completion. */ public Mono<Void> setMetadata(Map<String, String> metadata) { try { return setMetadataWithResponse(metadata, null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values * must be preserved, they must be downloaded and included in the call to this method. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.setMetadataWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata Metadata to associate with the blob. * @param accessConditions {@link BlobRequestConditions} * @return A reactive response signalling completion. */ public Mono<Response<Void>> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions accessConditions) { try { return withContext(context -> setMetadataWithResponse(metadata, accessConditions, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<Void>> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions accessConditions, Context context) { accessConditions = accessConditions == null ? new BlobRequestConditions() : accessConditions; return this.azureBlobStorage.blobs().setMetadataWithRestResponseAsync( null, null, null, metadata, accessConditions.getLeaseId(), accessConditions.getIfModifiedSince(), accessConditions.getIfUnmodifiedSince(), accessConditions.getIfMatch(), accessConditions.getIfNoneMatch(), null, customerProvidedKey, context) .map(response -> new SimpleResponse<>(response, null)); } /** * Creates a read-only snapshot of the blob. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.createSnapshot} * * <p>For more information, see the * <a href="https: * * @return A response containing a {@link BlobAsyncClientBase} which is used to interact with the created snapshot, * use {@link */ public Mono<BlobAsyncClientBase> createSnapshot() { try { return createSnapshotWithResponse(null, null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a read-only snapshot of the blob. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.createSnapshotWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata Metadata to associate with the blob snapshot. * @param accessConditions {@link BlobRequestConditions} * @return A response containing a {@link BlobAsyncClientBase} which is used to interact with the created snapshot, * use {@link */ public Mono<Response<BlobAsyncClientBase>> createSnapshotWithResponse(Map<String, String> metadata, BlobRequestConditions accessConditions) { try { return withContext(context -> createSnapshotWithResponse(metadata, accessConditions, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<BlobAsyncClientBase>> createSnapshotWithResponse(Map<String, String> metadata, BlobRequestConditions accessConditions, Context context) { accessConditions = accessConditions == null ? new BlobRequestConditions() : accessConditions; return this.azureBlobStorage.blobs().createSnapshotWithRestResponseAsync( null, null, null, metadata, accessConditions.getIfModifiedSince(), accessConditions.getIfUnmodifiedSince(), accessConditions.getIfMatch(), accessConditions.getIfNoneMatch(), accessConditions.getLeaseId(), null, customerProvidedKey, context) .map(rb -> new SimpleResponse<>(rb, this.getSnapshotClient(rb.getDeserializedHeaders().getSnapshot()))); } /** * Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in * a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's * etag. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.setAccessTier * * <p>For more information, see the * <a href="https: * * @param tier The new tier for the blob. * @return A reactive response signalling completion. * @throws NullPointerException if {@code tier} is null. */ public Mono<Void> setAccessTier(AccessTier tier) { try { return setAccessTierWithResponse(tier, null, null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in * a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's * etag. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.setAccessTierWithResponse * * <p>For more information, see the * <a href="https: * * @param tier The new tier for the blob. * @param priority Optional priority to set for re-hydrating blobs. * @param leaseId The lease ID the active lease on the blob must match. * @return A reactive response signalling completion. * @throws NullPointerException if {@code tier} is null. */ public Mono<Response<Void>> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId) { try { return withContext(context -> setTierWithResponse(tier, priority, leaseId, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<Void>> setTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId, Context context) { StorageImplUtils.assertNotNull("tier", tier); return this.azureBlobStorage.blobs().setTierWithRestResponseAsync( null, null, tier, null, priority, null, leaseId, context) .map(response -> new SimpleResponse<>(response, null)); } /** * Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.undelete} * * <p>For more information, see the * <a href="https: * * @return A reactive response signalling completion. */ public Mono<Void> undelete() { try { return undeleteWithResponse().flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.undeleteWithResponse} * * <p>For more information, see the * <a href="https: * * @return A reactive response signalling completion. */ public Mono<Response<Void>> undeleteWithResponse() { try { return withContext(this::undeleteWithResponse); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<Void>> undeleteWithResponse(Context context) { return this.azureBlobStorage.blobs().undeleteWithRestResponseAsync(null, null, context).map(response -> new SimpleResponse<>(response, null)); } /** * Returns the sku name and account kind for the account. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.getAccountInfo} * * <p>For more information, see the * <a href="https: * * @return a reactor response containing the sku name and account kind. */ public Mono<StorageAccountInfo> getAccountInfo() { try { return getAccountInfoWithResponse().flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Returns the sku name and account kind for the account. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.getAccountInfoWithResponse} * * <p>For more information, see the * <a href="https: * * @return a reactor response containing the sku name and account kind. */ public Mono<Response<StorageAccountInfo>> getAccountInfoWithResponse() { try { return withContext(this::getAccountInfoWithResponse); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<StorageAccountInfo>> getAccountInfoWithResponse(Context context) { return this.azureBlobStorage.blobs().getAccountInfoWithRestResponseAsync(null, null, context) .map(rb -> { BlobGetAccountInfoHeaders hd = rb.getDeserializedHeaders(); return new SimpleResponse<>(rb, new StorageAccountInfo(hd.getSkuName(), hd.getAccountKind())); }); } }
nit: this could be slightly less verbose by making this method return just BlobProperties and only accept the headers as an argument. and wherever you call it you can essentially just say new SimpleResponse<>(response, buildBlobProperties(response.getDeserializedHeaders()));
private static Response<BlobProperties> buildBlobPropertiesResponse(BlobDownloadAsyncResponse response) { BlobProperties properties = new BlobProperties(null, response.getDeserializedHeaders().getLastModified(), response.getDeserializedHeaders().getETag(), response.getDeserializedHeaders().getContentLength() == null ? 0 : response.getDeserializedHeaders().getContentLength(), response.getDeserializedHeaders().getContentType(), null, response.getDeserializedHeaders().getContentEncoding(), response.getDeserializedHeaders().getContentDisposition(), response.getDeserializedHeaders().getContentLanguage(), response.getDeserializedHeaders().getCacheControl(), response.getDeserializedHeaders().getBlobSequenceNumber(), response.getDeserializedHeaders().getBlobType(), response.getDeserializedHeaders().getLeaseStatus(), response.getDeserializedHeaders().getLeaseState(), response.getDeserializedHeaders().getLeaseDuration(), response.getDeserializedHeaders().getCopyId(), response.getDeserializedHeaders().getCopyStatus(), response.getDeserializedHeaders().getCopySource(), response.getDeserializedHeaders().getCopyProgress(), response.getDeserializedHeaders().getCopyCompletionTime(), response.getDeserializedHeaders().getCopyStatusDescription(), response.getDeserializedHeaders().isServerEncrypted(), null, null, null, null, null, response.getDeserializedHeaders().getEncryptionKeySha256(), null, response.getDeserializedHeaders().getMetadata(), response.getDeserializedHeaders().getBlobCommittedBlockCount()); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), properties); }
BlobProperties properties = new BlobProperties(null, response.getDeserializedHeaders().getLastModified(),
private static Response<BlobProperties> buildBlobPropertiesResponse(BlobDownloadAsyncResponse response) { BlobProperties properties = new BlobProperties(null, response.getDeserializedHeaders().getLastModified(), response.getDeserializedHeaders().getETag(), response.getDeserializedHeaders().getContentLength() == null ? 0 : response.getDeserializedHeaders().getContentLength(), response.getDeserializedHeaders().getContentType(), null, response.getDeserializedHeaders().getContentEncoding(), response.getDeserializedHeaders().getContentDisposition(), response.getDeserializedHeaders().getContentLanguage(), response.getDeserializedHeaders().getCacheControl(), response.getDeserializedHeaders().getBlobSequenceNumber(), response.getDeserializedHeaders().getBlobType(), response.getDeserializedHeaders().getLeaseStatus(), response.getDeserializedHeaders().getLeaseState(), response.getDeserializedHeaders().getLeaseDuration(), response.getDeserializedHeaders().getCopyId(), response.getDeserializedHeaders().getCopyStatus(), response.getDeserializedHeaders().getCopySource(), response.getDeserializedHeaders().getCopyProgress(), response.getDeserializedHeaders().getCopyCompletionTime(), response.getDeserializedHeaders().getCopyStatusDescription(), response.getDeserializedHeaders().isServerEncrypted(), null, null, null, null, null, response.getDeserializedHeaders().getEncryptionKeySha256(), null, response.getDeserializedHeaders().getMetadata(), response.getDeserializedHeaders().getBlobCommittedBlockCount()); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), properties); }
class BlobAsyncClientBase { private static final int BLOB_DEFAULT_DOWNLOAD_BLOCK_SIZE = 4 * Constants.MB; private static final int BLOB_MAX_DOWNLOAD_BLOCK_SIZE = 100 * Constants.MB; private final ClientLogger logger = new ClientLogger(BlobAsyncClientBase.class); protected final AzureBlobStorageImpl azureBlobStorage; private final String snapshot; private final CpkInfo customerProvidedKey; protected final String accountName; protected final String containerName; protected final String blobName; protected final BlobServiceVersion serviceVersion; /** * Package-private constructor for use by {@link SpecializedBlobClientBuilder}. * * @param pipeline The pipeline used to send and receive service requests. * @param url The endpoint where to send service requests. * @param serviceVersion The version of the service to receive requests. * @param accountName The storage account name. * @param containerName The container name. * @param blobName The blob name. * @param snapshot The snapshot identifier for the blob, pass {@code null} to interact with the blob directly. * @param customerProvidedKey Customer provided key used during encryption of the blob's data on the server, pass * {@code null} to allow the service to use its own encryption. */ protected BlobAsyncClientBase(HttpPipeline pipeline, String url, BlobServiceVersion serviceVersion, String accountName, String containerName, String blobName, String snapshot, CpkInfo customerProvidedKey) { this.azureBlobStorage = new AzureBlobStorageBuilder() .pipeline(pipeline) .url(url) .version(serviceVersion.getVersion()) .build(); this.serviceVersion = serviceVersion; this.accountName = accountName; this.containerName = containerName; this.blobName = Utility.urlEncode(Utility.urlDecode(blobName)); this.snapshot = snapshot; this.customerProvidedKey = customerProvidedKey; } /** * Creates a new {@link BlobAsyncClientBase} linked to the {@code snapshot} of this blob resource. * * @param snapshot the identifier for a specific snapshot of this blob * @return a {@link BlobAsyncClientBase} used to interact with the specific snapshot. */ public BlobAsyncClientBase getSnapshotClient(String snapshot) { return new BlobAsyncClientBase(getHttpPipeline(), getBlobUrl(), getServiceVersion(), getAccountName(), getContainerName(), getBlobName(), snapshot, getCustomerProvidedKey()); } /** * Gets the URL of the blob represented by this client. * * @return the URL. */ public String getBlobUrl() { if (!this.isSnapshot()) { return azureBlobStorage.getUrl(); } else { if (azureBlobStorage.getUrl().contains("?")) { return String.format("%s&snapshot=%s", azureBlobStorage.getUrl(), snapshot); } else { return String.format("%s?snapshot=%s", azureBlobStorage.getUrl(), snapshot); } } } /** * Get the container name. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.getContainerName} * * @return The name of the container. */ public final String getContainerName() { return containerName; } /** * Decodes and gets the blob name. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.getBlobName} * * @return The decoded name of the blob. */ public final String getBlobName() { return (blobName == null) ? null : Utility.urlDecode(blobName); } /** * Gets the {@link HttpPipeline} powering this client. * * @return The pipeline. */ public HttpPipeline getHttpPipeline() { return azureBlobStorage.getHttpPipeline(); } /** * Gets the {@link CpkInfo} used to encrypt this blob's content on the server. * * @return the customer provided key used for encryption. */ public CpkInfo getCustomerProvidedKey() { return customerProvidedKey; } /** * Get associated account name. * * @return account name associated with this storage resource. */ public String getAccountName() { return accountName; } /** * Gets the service version the client is using. * * @return the service version the client is using. */ public BlobServiceVersion getServiceVersion() { return serviceVersion; } /** * Gets the snapshotId for a blob resource * * @return A string that represents the snapshotId of the snapshot blob */ public String getSnapshotId() { return this.snapshot; } /** * Determines if a blob is a snapshot * * @return A boolean that indicates if a blob is a snapshot */ public boolean isSnapshot() { return this.snapshot != null; } /** * Determines if the blob this client represents exists in the cloud. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.exists} * * @return true if the blob exists, false if it doesn't */ public Mono<Boolean> exists() { try { return existsWithResponse().flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Determines if the blob this client represents exists in the cloud. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.existsWithResponse} * * @return true if the blob exists, false if it doesn't */ public Mono<Response<Boolean>> existsWithResponse() { try { return withContext(this::existsWithResponse); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<Boolean>> existsWithResponse(Context context) { return this.getPropertiesWithResponse(null, context) .map(cp -> (Response<Boolean>) new SimpleResponse<>(cp, true)) .onErrorResume(t -> t instanceof BlobStorageException && ((BlobStorageException) t).getStatusCode() == 404, t -> { HttpResponse response = ((BlobStorageException) t).getResponse(); return Mono.just(new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), false)); }); } /** * Copies the data at the source URL to a blob. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.beginCopy * * <p>For more information, see the * <a href="https: * * @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second * is used. * @return A {@link PollerFlux} that polls the blob copy operation until it has completed, has failed, or has been * cancelled. */ public PollerFlux<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) { return beginCopy(sourceUrl, null, null, null, null, null, pollInterval); } /** * Copies the data at the source URL to a blob. * * <p><strong>Starting a copy operation</strong></p> * Starting a copy operation and polling on the responses. * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.beginCopy * * <p><strong>Cancelling a copy operation</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.beginCopyFromUrlCancel * * <p>For more information, see the * <a href="https: * * @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * @param metadata Metadata to associate with the destination blob. * @param tier {@link AccessTier} for the destination blob. * @param priority {@link RehydratePriority} for rehydrating the blob. * @param sourceModifiedAccessConditions {@link RequestConditions} against the source. Standard HTTP * Access conditions related to the modification of data. ETag and LastModifiedTime are used to construct * conditions related to when the blob was changed relative to the given request. The request will fail if the * specified condition is not satisfied. * @param destAccessConditions {@link BlobRequestConditions} against the destination. * @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second * is used. * @return A {@link PollerFlux} that polls the blob copy operation until it has completed, has failed, or has been * cancelled. */ public PollerFlux<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier, RehydratePriority priority, RequestConditions sourceModifiedAccessConditions, BlobRequestConditions destAccessConditions, Duration pollInterval) { final Duration interval = pollInterval != null ? pollInterval : Duration.ofSeconds(1); final RequestConditions sourceModifiedCondition = sourceModifiedAccessConditions == null ? new RequestConditions() : sourceModifiedAccessConditions; final BlobRequestConditions destinationAccessConditions = destAccessConditions == null ? new BlobRequestConditions() : destAccessConditions; final RequestConditions sourceConditions = new RequestConditions() .setIfModifiedSince(sourceModifiedCondition.getIfModifiedSince()) .setIfUnmodifiedSince(sourceModifiedCondition.getIfUnmodifiedSince()) .setIfMatch(sourceModifiedCondition.getIfMatch()) .setIfNoneMatch(sourceModifiedCondition.getIfNoneMatch()); return new PollerFlux<>(interval, (pollingContext) -> { try { return onStart(sourceUrl, metadata, tier, priority, sourceConditions, destinationAccessConditions); } catch (RuntimeException ex) { return monoError(logger, ex); } }, (pollingContext) -> { try { return onPoll(pollingContext.getLatestResponse()); } catch (RuntimeException ex) { return monoError(logger, ex); } }, (pollingContext, firstResponse) -> { if (firstResponse == null || firstResponse.getValue() == null) { return Mono.error(logger.logExceptionAsError( new IllegalArgumentException("Cannot cancel a poll response that never started."))); } final String copyIdentifier = firstResponse.getValue().getCopyId(); if (!ImplUtils.isNullOrEmpty(copyIdentifier)) { logger.info("Cancelling copy operation for copy id: {}", copyIdentifier); return abortCopyFromUrl(copyIdentifier).thenReturn(firstResponse.getValue()); } return Mono.empty(); }, (pollingContext) -> Mono.empty()); } private Mono<BlobCopyInfo> onStart(String sourceUrl, Map<String, String> metadata, AccessTier tier, RehydratePriority priority, RequestConditions sourceModifiedAccessConditions, BlobRequestConditions destinationAccessConditions) { URL url; try { url = new URL(sourceUrl); } catch (MalformedURLException ex) { throw logger.logExceptionAsError(new IllegalArgumentException("'sourceUrl' is not a valid url.", ex)); } return withContext( context -> azureBlobStorage.blobs().startCopyFromURLWithRestResponseAsync(null, null, url, null, metadata, tier, priority, sourceModifiedAccessConditions.getIfModifiedSince(), sourceModifiedAccessConditions.getIfUnmodifiedSince(), sourceModifiedAccessConditions.getIfMatch(), sourceModifiedAccessConditions.getIfNoneMatch(), destinationAccessConditions.getIfModifiedSince(), destinationAccessConditions.getIfUnmodifiedSince(), destinationAccessConditions.getIfMatch(), destinationAccessConditions.getIfNoneMatch(), destinationAccessConditions.getLeaseId(), null, context)) .map(response -> { final BlobStartCopyFromURLHeaders headers = response.getDeserializedHeaders(); return new BlobCopyInfo(sourceUrl, headers.getCopyId(), headers.getCopyStatus(), headers.getETag(), headers.getLastModified(), headers.getErrorCode()); }); } private Mono<PollResponse<BlobCopyInfo>> onPoll(PollResponse<BlobCopyInfo> pollResponse) { if (pollResponse.getStatus() == LongRunningOperationStatus.SUCCESSFULLY_COMPLETED || pollResponse.getStatus() == LongRunningOperationStatus.FAILED) { return Mono.just(pollResponse); } final BlobCopyInfo lastInfo = pollResponse.getValue(); if (lastInfo == null) { logger.warning("BlobCopyInfo does not exist. Activation operation failed."); return Mono.just(new PollResponse<>( LongRunningOperationStatus.fromString("COPY_START_FAILED", true), null)); } return getProperties().map(response -> { final CopyStatusType status = response.getCopyStatus(); final BlobCopyInfo result = new BlobCopyInfo(response.getCopySource(), response.getCopyId(), status, response.getETag(), response.getCopyCompletionTime(), response.getCopyStatusDescription()); LongRunningOperationStatus operationStatus; switch (status) { case SUCCESS: operationStatus = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; break; case FAILED: operationStatus = LongRunningOperationStatus.FAILED; break; case ABORTED: operationStatus = LongRunningOperationStatus.USER_CANCELLED; break; case PENDING: operationStatus = LongRunningOperationStatus.IN_PROGRESS; break; default: throw logger.logExceptionAsError(new IllegalArgumentException( "CopyStatusType is not supported. Status: " + status)); } return new PollResponse<>(operationStatus, result); }).onErrorReturn( new PollResponse<>(LongRunningOperationStatus.fromString("POLLING_FAILED", true), lastInfo)); } /** * Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.abortCopyFromUrl * * <p>For more information, see the * <a href="https: * * @see * @see * @see * @param copyId The id of the copy operation to abort. * @return A reactive response signalling completion. */ public Mono<Void> abortCopyFromUrl(String copyId) { try { return abortCopyFromUrlWithResponse(copyId, null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.abortCopyFromUrlWithResponse * * <p>For more information, see the * <a href="https: * * @see * @see * @see * @param copyId The id of the copy operation to abort. * @param leaseId The lease ID the active lease on the blob must match. * @return A reactive response signalling completion. */ public Mono<Response<Void>> abortCopyFromUrlWithResponse(String copyId, String leaseId) { try { return withContext(context -> abortCopyFromUrlWithResponse(copyId, leaseId, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<Void>> abortCopyFromUrlWithResponse(String copyId, String leaseId, Context context) { return this.azureBlobStorage.blobs().abortCopyFromURLWithRestResponseAsync( null, null, copyId, null, leaseId, null, context) .map(response -> new SimpleResponse<>(response, null)); } /** * Copies the data at the source URL to a blob and waits for the copy to complete before returning a response. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.copyFromUrl * * <p>For more information, see the * <a href="https: * * @param copySource The source URL to copy from. * @return A reactive response containing the copy ID for the long running operation. */ public Mono<String> copyFromUrl(String copySource) { try { return copyFromUrlWithResponse(copySource, null, null, null, null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Copies the data at the source URL to a blob and waits for the copy to complete before returning a response. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.copyFromUrlWithResponse * * <p>For more information, see the * <a href="https: * * @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * @param metadata Metadata to associate with the destination blob. * @param tier {@link AccessTier} for the destination blob. * @param sourceModifiedAccessConditions {@link RequestConditions} against the source. Standard HTTP Access * conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions * related to when the blob was changed relative to the given request. The request will fail if the specified * condition is not satisfied. * @param destAccessConditions {@link BlobRequestConditions} against the destination. * @return A reactive response containing the copy ID for the long running operation. */ public Mono<Response<String>> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier, RequestConditions sourceModifiedAccessConditions, BlobRequestConditions destAccessConditions) { try { return withContext(context -> copyFromUrlWithResponse(copySource, metadata, tier, sourceModifiedAccessConditions, destAccessConditions, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<String>> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier, RequestConditions sourceModifiedAccessConditions, BlobRequestConditions destAccessConditions, Context context) { sourceModifiedAccessConditions = sourceModifiedAccessConditions == null ? new RequestConditions() : sourceModifiedAccessConditions; destAccessConditions = destAccessConditions == null ? new BlobRequestConditions() : destAccessConditions; URL url; try { url = new URL(copySource); } catch (MalformedURLException ex) { throw logger.logExceptionAsError(new IllegalArgumentException("'copySource' is not a valid url.")); } return this.azureBlobStorage.blobs().copyFromURLWithRestResponseAsync( null, null, url, null, metadata, tier, sourceModifiedAccessConditions.getIfModifiedSince(), sourceModifiedAccessConditions.getIfUnmodifiedSince(), sourceModifiedAccessConditions.getIfMatch(), sourceModifiedAccessConditions.getIfNoneMatch(), destAccessConditions.getIfModifiedSince(), destAccessConditions.getIfUnmodifiedSince(), destAccessConditions.getIfMatch(), destAccessConditions.getIfNoneMatch(), destAccessConditions.getLeaseId(), null, context) .map(rb -> new SimpleResponse<>(rb, rb.getDeserializedHeaders().getCopyId())); } /** * Reads the entire blob. Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or * {@link AppendBlobClient}. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.download} * * <p>For more information, see the * <a href="https: * * @return A reactive response containing the blob data. */ public Flux<ByteBuffer> download() { try { return downloadWithResponse(null, null, null, false) .flatMapMany(BlobDownloadAsyncResponse::getValue); } catch (RuntimeException ex) { return fluxError(logger, ex); } } /** * Reads a range of bytes from a blob. Uploading data must be done from the {@link BlockBlobClient}, {@link * PageBlobClient}, or {@link AppendBlobClient}. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.downloadWithResponse * * <p>For more information, see the * <a href="https: * * @param range {@link BlobRange} * @param options {@link DownloadRetryOptions} * @param accessConditions {@link BlobRequestConditions} * @param rangeGetContentMD5 Whether the contentMD5 for the specified blob range should be returned. * @return A reactive response containing the blob data. */ public Mono<BlobDownloadAsyncResponse> downloadWithResponse(BlobRange range, DownloadRetryOptions options, BlobRequestConditions accessConditions, boolean rangeGetContentMD5) { try { return withContext(context -> downloadWithResponse(range, options, accessConditions, rangeGetContentMD5, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<BlobDownloadAsyncResponse> downloadWithResponse(BlobRange range, DownloadRetryOptions options, BlobRequestConditions accessConditions, boolean rangeGetContentMD5, Context context) { return downloadHelper(range, options, accessConditions, rangeGetContentMD5, context) .map(response -> new BlobDownloadAsyncResponse(response.getRequest(), response.getStatusCode(), response.getHeaders(), response.getValue(), response.getDeserializedHeaders())); } private Mono<ReliableDownload> downloadHelper(BlobRange range, DownloadRetryOptions options, BlobRequestConditions accessConditions, boolean rangeGetContentMd5, Context context) { range = range == null ? new BlobRange(0) : range; Boolean getMD5 = rangeGetContentMd5 ? rangeGetContentMd5 : null; accessConditions = accessConditions == null ? new BlobRequestConditions() : accessConditions; HttpGetterInfo info = new HttpGetterInfo() .setOffset(range.getOffset()) .setCount(range.getCount()) .setETag(accessConditions.getIfMatch()); return azureBlobStorage.blobs().downloadWithRestResponseAsync(null, null, snapshot, null, range.toHeaderValue(), accessConditions.getLeaseId(), getMD5, null, accessConditions.getIfModifiedSince(), accessConditions.getIfUnmodifiedSince(), accessConditions.getIfMatch(), accessConditions.getIfNoneMatch(), null, customerProvidedKey, context) .map(response -> { info.setETag(response.getDeserializedHeaders().getETag()); return new ReliableDownload(response, options, info, updatedInfo -> downloadHelper(new BlobRange(updatedInfo.getOffset(), updatedInfo.getCount()), options, new BlobRequestConditions().setIfMatch(info.getETag()), false, context)); }); } /** * Downloads the entire blob into a file specified by the path. * * <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException} * will be thrown.</p> * * <p>Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link * AppendBlobClient}.</p> * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.downloadToFile * * <p>For more information, see the * <a href="https: * * @param filePath A non-null {@link OutputStream} instance where the downloaded data will be written. * @return A reactive response containing the blob properties and metadata. */ public Mono<BlobProperties> downloadToFile(String filePath) { try { return downloadToFileWithResponse(filePath, null, null, null, null, false).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Downloads the entire blob into a file specified by the path. * * <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException} * will be thrown.</p> * * <p>Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link * AppendBlobClient}.</p> * * <p>This method makes an extra HTTP call to get the length of the blob in the beginning. To avoid this extra * call, provide the {@link BlobRange} parameter.</p> * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.downloadToFileWithResponse * * <p>For more information, see the * <a href="https: * * @param filePath A non-null {@link OutputStream} instance where the downloaded data will be written. * @param range {@link BlobRange} * @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel * transfers parameter is ignored. * @param options {@link DownloadRetryOptions} * @param requestConditions {@link BlobRequestConditions} * @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned. * @return A reactive response containing the blob properties and metadata. * @throws IllegalArgumentException If {@code blockSize} is less than 0 or greater than 100MB. * @throws UncheckedIOException If an I/O error occurs. */ public Mono<Response<BlobProperties>> downloadToFileWithResponse(String filePath, BlobRange range, ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean rangeGetContentMd5) { try { return withContext(context -> downloadToFileWithResponse(filePath, range, parallelTransferOptions, options, requestConditions, rangeGetContentMd5, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<BlobProperties>> downloadToFileWithResponse(String filePath, BlobRange range, ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions, BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Context context) { BlobRange rangeReal = range == null ? new BlobRange(0) : range; final ParallelTransferOptions finalParallelTransferOptions = ModelHelper.populateAndApplyDefaults(parallelTransferOptions); BlobRequestConditions conditionsReal = requestConditions == null ? new BlobRequestConditions() : requestConditions; AsynchronousFileChannel channel = downloadToFileResourceSupplier(filePath); return Mono.just(channel) .flatMap(c -> this.downloadToFileImpl(c, rangeReal, finalParallelTransferOptions, downloadRetryOptions, conditionsReal, rangeGetContentMd5, context)) .doFinally(signalType -> this.downloadToFileCleanup(channel, filePath, signalType)); } private AsynchronousFileChannel downloadToFileResourceSupplier(String filePath) { try { return AsynchronousFileChannel.open(Paths.get(filePath), StandardOpenOption.READ, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW); } catch (IOException e) { throw logger.logExceptionAsError(new UncheckedIOException(e)); } } private Mono<Response<BlobProperties>> downloadToFileImpl(AsynchronousFileChannel file, BlobRange rangeReal, ParallelTransferOptions finalParallelTransferOptions, DownloadRetryOptions downloadRetryOptions, BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Context context) { Lock progressLock = new ReentrantLock(); AtomicLong totalProgress = new AtomicLong(0); /* * Downloads the first chunk and gets the size of the data and etag if not specified by the user. */ return getSetupMono(rangeReal, finalParallelTransferOptions, downloadRetryOptions, requestConditions, rangeGetContentMd5, context) .flatMap(setupTuple3 -> { long newCount = setupTuple3.getT1(); BlobRequestConditions realConditions = setupTuple3.getT2(); int numChunks = calculateNumBlocks(newCount, finalParallelTransferOptions.getBlockSize()); numChunks = numChunks == 0 ? 1 : numChunks; BlobDownloadAsyncResponse initialResponse = setupTuple3.getT3(); return Flux.range(0, numChunks) .flatMap(chunkNum -> { if (chunkNum == 0) { return writeBodyToFile(initialResponse, file, 0, finalParallelTransferOptions, progressLock, totalProgress); } long chunkSizeActual = Math.min(finalParallelTransferOptions.getBlockSize(), newCount - (chunkNum * finalParallelTransferOptions.getBlockSize())); BlobRange chunkRange = new BlobRange( rangeReal.getOffset() + (chunkNum * finalParallelTransferOptions.getBlockSize()), chunkSizeActual); return this.downloadWithResponse(chunkRange, downloadRetryOptions, realConditions, rangeGetContentMd5, null) .subscribeOn(Schedulers.elastic()) .flatMap(response -> writeBodyToFile(response, file, chunkNum, finalParallelTransferOptions, progressLock, totalProgress)); }) .then(Mono.just(buildBlobPropertiesResponse(initialResponse))); }); } private int calculateNumBlocks(long dataSize, long blockLength) { int numBlocks = toIntExact(dataSize / blockLength); if (dataSize % blockLength != 0) { numBlocks++; } return numBlocks; } /* Download the first chunk. Construct a Mono which will emit the total count for calculating the number of chunks, access conditions containing the etag to lock on, and the response from downloading the first chunk. */ private Mono<Tuple3<Long, BlobRequestConditions, BlobDownloadAsyncResponse>> getSetupMono(BlobRange range, ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions, BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Context context) { long initialChunkSize = range.getCount() != null && range.getCount() < parallelTransferOptions.getBlockSize() ? range.getCount() : parallelTransferOptions.getBlockSize(); return this.downloadWithResponse(new BlobRange(range.getOffset(), initialChunkSize), downloadRetryOptions, requestConditions, rangeGetContentMd5, context) .subscribeOn(Schedulers.elastic()) .flatMap(response -> { /* Either the etag was set and it matches because the download succeeded, so this is a no-op, or there was no etag, so we set it here. ETag locking is vital to ensure we download one, consistent view of the file. */ BlobRequestConditions newConditions = setEtag(requestConditions, response.getDeserializedHeaders().getETag()); long totalLength = extractTotalBlobLength(response.getDeserializedHeaders().getContentRange()); /* If the user either didn't specify a count or they specified a count greater than the size of the remaining data, take the size of the remaining data. This is to prevent the case where the count is much much larger than the size of the blob and we could try to download at an invalid offset. */ long newCount = range.getCount() == null || range.getCount() > (totalLength - range.getOffset()) ? totalLength - range.getOffset() : range.getCount(); return Mono.zip(Mono.just(newCount), Mono.just(newConditions), Mono.just(response)); }) .onErrorResume(BlobStorageException.class, blobStorageException -> { /* In the case of an empty blob, we still want to report success and give back valid headers. Attempting a range download on an empty blob will return an InvalidRange error code and a Content-Range header of the format "bytes * /0". We need to double check that the total size is zero in the case that the customer has attempted an invalid range on a non-zero length blob. */ if (blobStorageException.getErrorCode() == BlobErrorCode.INVALID_RANGE && extractTotalBlobLength(blobStorageException.getResponse() .getHeaders().getValue("Content-Range")) == 0) { return this.downloadWithResponse(new BlobRange(0, 0L), downloadRetryOptions, requestConditions, rangeGetContentMd5, context) .subscribeOn(Schedulers.elastic()) .flatMap(response -> { /* Ensure the blob is still 0 length by checking our download was the full length. (200 is for full blob; 206 is partial). */ if (response.getStatusCode() != 200) { Mono.error(new IllegalStateException("Blob was modified mid download. It was " + "originally 0 bytes and is now larger.")); } return Mono.zip(Mono.just(0L), Mono.just(requestConditions), Mono.just(response)); }); } return Mono.error(blobStorageException); }); } private static BlobRequestConditions setEtag(BlobRequestConditions accessConditions, String etag) { return new BlobRequestConditions() .setIfModifiedSince( accessConditions.getIfModifiedSince()) .setIfUnmodifiedSince( accessConditions.getIfModifiedSince()) .setIfMatch(etag) .setIfNoneMatch( accessConditions.getIfNoneMatch()) .setLeaseId(accessConditions.getLeaseId()); } private static Mono<Void> writeBodyToFile(BlobDownloadAsyncResponse response, AsynchronousFileChannel file, long chunkNum, ParallelTransferOptions optionsReal, Lock progressLock, AtomicLong totalProgress) { Flux<ByteBuffer> data = response.getValue(); data = ProgressReporter.addParallelProgressReporting(data, optionsReal.getProgressReceiver(), progressLock, totalProgress); return FluxUtil.writeFile(data, file, chunkNum * optionsReal.getBlockSize()); } private static long extractTotalBlobLength(String contentRange) { return Long.parseLong(contentRange.split("/")[1]); } private void downloadToFileCleanup(AsynchronousFileChannel channel, String filePath, SignalType signalType) { try { channel.close(); if (!signalType.equals(SignalType.ON_COMPLETE)) { Files.delete(Paths.get(filePath)); logger.verbose("Downloading to file failed. Cleaning up resources."); } } catch (IOException e) { throw logger.logExceptionAsError(new UncheckedIOException(e)); } } /** * Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.delete} * * <p>For more information, see the * <a href="https: * * @return A reactive response signalling completion. */ public Mono<Void> delete() { try { return deleteWithResponse(null, null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.deleteWithResponse * * <p>For more information, see the * <a href="https: * * @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include} * will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being * deleted, you must pass null. * @param accessConditions {@link BlobRequestConditions} * @return A reactive response signalling completion. */ public Mono<Response<Void>> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions, BlobRequestConditions accessConditions) { try { return withContext(context -> deleteWithResponse(deleteBlobSnapshotOptions, accessConditions, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<Void>> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions, BlobRequestConditions accessConditions, Context context) { accessConditions = accessConditions == null ? new BlobRequestConditions() : accessConditions; return this.azureBlobStorage.blobs().deleteWithRestResponseAsync(null, null, snapshot, null, accessConditions.getLeaseId(), deleteBlobSnapshotOptions, accessConditions.getIfModifiedSince(), accessConditions.getIfUnmodifiedSince(), accessConditions.getIfMatch(), accessConditions.getIfNoneMatch(), null, context) .map(response -> new SimpleResponse<>(response, null)); } /** * Returns the blob's metadata and properties. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.getProperties} * * <p>For more information, see the * <a href="https: * * @return A reactive response containing the blob properties and metadata. */ public Mono<BlobProperties> getProperties() { try { return getPropertiesWithResponse(null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Returns the blob's metadata and properties. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.getPropertiesWithResponse * * <p>For more information, see the * <a href="https: * * @param accessConditions {@link BlobRequestConditions} * @return A reactive response containing the blob properties and metadata. */ public Mono<Response<BlobProperties>> getPropertiesWithResponse(BlobRequestConditions accessConditions) { try { return withContext(context -> getPropertiesWithResponse(accessConditions, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<BlobProperties>> getPropertiesWithResponse(BlobRequestConditions accessConditions, Context context) { accessConditions = accessConditions == null ? new BlobRequestConditions() : accessConditions; return this.azureBlobStorage.blobs().getPropertiesWithRestResponseAsync( null, null, snapshot, null, accessConditions.getLeaseId(), accessConditions.getIfModifiedSince(), accessConditions.getIfUnmodifiedSince(), accessConditions.getIfMatch(), accessConditions.getIfNoneMatch(), null, customerProvidedKey, context) .map(rb -> { BlobGetPropertiesHeaders hd = rb.getDeserializedHeaders(); BlobProperties properties = new BlobProperties(hd.getCreationTime(), hd.getLastModified(), hd.getETag(), hd.getContentLength() == null ? 0 : hd.getContentLength(), hd.getContentType(), hd.getContentMD5(), hd.getContentEncoding(), hd.getContentDisposition(), hd.getContentLanguage(), hd.getCacheControl(), hd.getBlobSequenceNumber(), hd.getBlobType(), hd.getLeaseStatus(), hd.getLeaseState(), hd.getLeaseDuration(), hd.getCopyId(), hd.getCopyStatus(), hd.getCopySource(), hd.getCopyProgress(), hd.getCopyCompletionTime(), hd.getCopyStatusDescription(), hd.isServerEncrypted(), hd.isIncrementalCopy(), hd.getDestinationSnapshot(), AccessTier.fromString(hd.getAccessTier()), hd.isAccessTierInferred(), ArchiveStatus.fromString(hd.getArchiveStatus()), hd.getEncryptionKeySha256(), hd.getAccessTierChangeTime(), hd.getMetadata(), hd.getBlobCommittedBlockCount()); return new SimpleResponse<>(rb, properties); }); } /** * Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In * order to preserve existing values, they must be passed alongside the header being changed. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.setHttpHeaders * * <p>For more information, see the * <a href="https: * * @param headers {@link BlobHttpHeaders} * @return A reactive response signalling completion. */ public Mono<Void> setHttpHeaders(BlobHttpHeaders headers) { try { return setHttpHeadersWithResponse(headers, null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In * order to preserve existing values, they must be passed alongside the header being changed. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.setHttpHeadersWithResponse * * <p>For more information, see the * <a href="https: * * @param headers {@link BlobHttpHeaders} * @param accessConditions {@link BlobRequestConditions} * @return A reactive response signalling completion. */ public Mono<Response<Void>> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions accessConditions) { try { return withContext(context -> setHttpHeadersWithResponse(headers, accessConditions, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<Void>> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions accessConditions, Context context) { accessConditions = accessConditions == null ? new BlobRequestConditions() : accessConditions; return this.azureBlobStorage.blobs().setHTTPHeadersWithRestResponseAsync( null, null, null, accessConditions.getLeaseId(), accessConditions.getIfModifiedSince(), accessConditions.getIfUnmodifiedSince(), accessConditions.getIfMatch(), accessConditions.getIfNoneMatch(), null, headers, context) .map(response -> new SimpleResponse<>(response, null)); } /** * Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values * must be preserved, they must be downloaded and included in the call to this method. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.setMetadata * * <p>For more information, see the * <a href="https: * * @param metadata Metadata to associate with the blob. * @return A reactive response signalling completion. */ public Mono<Void> setMetadata(Map<String, String> metadata) { try { return setMetadataWithResponse(metadata, null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values * must be preserved, they must be downloaded and included in the call to this method. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.setMetadataWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata Metadata to associate with the blob. * @param accessConditions {@link BlobRequestConditions} * @return A reactive response signalling completion. */ public Mono<Response<Void>> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions accessConditions) { try { return withContext(context -> setMetadataWithResponse(metadata, accessConditions, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<Void>> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions accessConditions, Context context) { accessConditions = accessConditions == null ? new BlobRequestConditions() : accessConditions; return this.azureBlobStorage.blobs().setMetadataWithRestResponseAsync( null, null, null, metadata, accessConditions.getLeaseId(), accessConditions.getIfModifiedSince(), accessConditions.getIfUnmodifiedSince(), accessConditions.getIfMatch(), accessConditions.getIfNoneMatch(), null, customerProvidedKey, context) .map(response -> new SimpleResponse<>(response, null)); } /** * Creates a read-only snapshot of the blob. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.createSnapshot} * * <p>For more information, see the * <a href="https: * * @return A response containing a {@link BlobAsyncClientBase} which is used to interact with the created snapshot, * use {@link */ public Mono<BlobAsyncClientBase> createSnapshot() { try { return createSnapshotWithResponse(null, null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a read-only snapshot of the blob. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.createSnapshotWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata Metadata to associate with the blob snapshot. * @param accessConditions {@link BlobRequestConditions} * @return A response containing a {@link BlobAsyncClientBase} which is used to interact with the created snapshot, * use {@link */ public Mono<Response<BlobAsyncClientBase>> createSnapshotWithResponse(Map<String, String> metadata, BlobRequestConditions accessConditions) { try { return withContext(context -> createSnapshotWithResponse(metadata, accessConditions, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<BlobAsyncClientBase>> createSnapshotWithResponse(Map<String, String> metadata, BlobRequestConditions accessConditions, Context context) { accessConditions = accessConditions == null ? new BlobRequestConditions() : accessConditions; return this.azureBlobStorage.blobs().createSnapshotWithRestResponseAsync( null, null, null, metadata, accessConditions.getIfModifiedSince(), accessConditions.getIfUnmodifiedSince(), accessConditions.getIfMatch(), accessConditions.getIfNoneMatch(), accessConditions.getLeaseId(), null, customerProvidedKey, context) .map(rb -> new SimpleResponse<>(rb, this.getSnapshotClient(rb.getDeserializedHeaders().getSnapshot()))); } /** * Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in * a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's * etag. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.setAccessTier * * <p>For more information, see the * <a href="https: * * @param tier The new tier for the blob. * @return A reactive response signalling completion. * @throws NullPointerException if {@code tier} is null. */ public Mono<Void> setAccessTier(AccessTier tier) { try { return setAccessTierWithResponse(tier, null, null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in * a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's * etag. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.setAccessTierWithResponse * * <p>For more information, see the * <a href="https: * * @param tier The new tier for the blob. * @param priority Optional priority to set for re-hydrating blobs. * @param leaseId The lease ID the active lease on the blob must match. * @return A reactive response signalling completion. * @throws NullPointerException if {@code tier} is null. */ public Mono<Response<Void>> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId) { try { return withContext(context -> setTierWithResponse(tier, priority, leaseId, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<Void>> setTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId, Context context) { StorageImplUtils.assertNotNull("tier", tier); return this.azureBlobStorage.blobs().setTierWithRestResponseAsync( null, null, tier, null, priority, null, leaseId, context) .map(response -> new SimpleResponse<>(response, null)); } /** * Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.undelete} * * <p>For more information, see the * <a href="https: * * @return A reactive response signalling completion. */ public Mono<Void> undelete() { try { return undeleteWithResponse().flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.undeleteWithResponse} * * <p>For more information, see the * <a href="https: * * @return A reactive response signalling completion. */ public Mono<Response<Void>> undeleteWithResponse() { try { return withContext(this::undeleteWithResponse); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<Void>> undeleteWithResponse(Context context) { return this.azureBlobStorage.blobs().undeleteWithRestResponseAsync(null, null, context).map(response -> new SimpleResponse<>(response, null)); } /** * Returns the sku name and account kind for the account. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.getAccountInfo} * * <p>For more information, see the * <a href="https: * * @return a reactor response containing the sku name and account kind. */ public Mono<StorageAccountInfo> getAccountInfo() { try { return getAccountInfoWithResponse().flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Returns the sku name and account kind for the account. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.getAccountInfoWithResponse} * * <p>For more information, see the * <a href="https: * * @return a reactor response containing the sku name and account kind. */ public Mono<Response<StorageAccountInfo>> getAccountInfoWithResponse() { try { return withContext(this::getAccountInfoWithResponse); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<StorageAccountInfo>> getAccountInfoWithResponse(Context context) { return this.azureBlobStorage.blobs().getAccountInfoWithRestResponseAsync(null, null, context) .map(rb -> { BlobGetAccountInfoHeaders hd = rb.getDeserializedHeaders(); return new SimpleResponse<>(rb, new StorageAccountInfo(hd.getSkuName(), hd.getAccountKind())); }); } }
class BlobAsyncClientBase { private static final int BLOB_DEFAULT_DOWNLOAD_BLOCK_SIZE = 4 * Constants.MB; private static final int BLOB_MAX_DOWNLOAD_BLOCK_SIZE = 100 * Constants.MB; private final ClientLogger logger = new ClientLogger(BlobAsyncClientBase.class); protected final AzureBlobStorageImpl azureBlobStorage; private final String snapshot; private final CpkInfo customerProvidedKey; protected final String accountName; protected final String containerName; protected final String blobName; protected final BlobServiceVersion serviceVersion; /** * Package-private constructor for use by {@link SpecializedBlobClientBuilder}. * * @param pipeline The pipeline used to send and receive service requests. * @param url The endpoint where to send service requests. * @param serviceVersion The version of the service to receive requests. * @param accountName The storage account name. * @param containerName The container name. * @param blobName The blob name. * @param snapshot The snapshot identifier for the blob, pass {@code null} to interact with the blob directly. * @param customerProvidedKey Customer provided key used during encryption of the blob's data on the server, pass * {@code null} to allow the service to use its own encryption. */ protected BlobAsyncClientBase(HttpPipeline pipeline, String url, BlobServiceVersion serviceVersion, String accountName, String containerName, String blobName, String snapshot, CpkInfo customerProvidedKey) { this.azureBlobStorage = new AzureBlobStorageBuilder() .pipeline(pipeline) .url(url) .version(serviceVersion.getVersion()) .build(); this.serviceVersion = serviceVersion; this.accountName = accountName; this.containerName = containerName; this.blobName = Utility.urlEncode(Utility.urlDecode(blobName)); this.snapshot = snapshot; this.customerProvidedKey = customerProvidedKey; } /** * Creates a new {@link BlobAsyncClientBase} linked to the {@code snapshot} of this blob resource. * * @param snapshot the identifier for a specific snapshot of this blob * @return a {@link BlobAsyncClientBase} used to interact with the specific snapshot. */ public BlobAsyncClientBase getSnapshotClient(String snapshot) { return new BlobAsyncClientBase(getHttpPipeline(), getBlobUrl(), getServiceVersion(), getAccountName(), getContainerName(), getBlobName(), snapshot, getCustomerProvidedKey()); } /** * Gets the URL of the blob represented by this client. * * @return the URL. */ public String getBlobUrl() { if (!this.isSnapshot()) { return azureBlobStorage.getUrl(); } else { if (azureBlobStorage.getUrl().contains("?")) { return String.format("%s&snapshot=%s", azureBlobStorage.getUrl(), snapshot); } else { return String.format("%s?snapshot=%s", azureBlobStorage.getUrl(), snapshot); } } } /** * Get the container name. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.getContainerName} * * @return The name of the container. */ public final String getContainerName() { return containerName; } /** * Decodes and gets the blob name. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.getBlobName} * * @return The decoded name of the blob. */ public final String getBlobName() { return (blobName == null) ? null : Utility.urlDecode(blobName); } /** * Gets the {@link HttpPipeline} powering this client. * * @return The pipeline. */ public HttpPipeline getHttpPipeline() { return azureBlobStorage.getHttpPipeline(); } /** * Gets the {@link CpkInfo} used to encrypt this blob's content on the server. * * @return the customer provided key used for encryption. */ public CpkInfo getCustomerProvidedKey() { return customerProvidedKey; } /** * Get associated account name. * * @return account name associated with this storage resource. */ public String getAccountName() { return accountName; } /** * Gets the service version the client is using. * * @return the service version the client is using. */ public BlobServiceVersion getServiceVersion() { return serviceVersion; } /** * Gets the snapshotId for a blob resource * * @return A string that represents the snapshotId of the snapshot blob */ public String getSnapshotId() { return this.snapshot; } /** * Determines if a blob is a snapshot * * @return A boolean that indicates if a blob is a snapshot */ public boolean isSnapshot() { return this.snapshot != null; } /** * Determines if the blob this client represents exists in the cloud. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.exists} * * @return true if the blob exists, false if it doesn't */ public Mono<Boolean> exists() { try { return existsWithResponse().flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Determines if the blob this client represents exists in the cloud. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.existsWithResponse} * * @return true if the blob exists, false if it doesn't */ public Mono<Response<Boolean>> existsWithResponse() { try { return withContext(this::existsWithResponse); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<Boolean>> existsWithResponse(Context context) { return this.getPropertiesWithResponse(null, context) .map(cp -> (Response<Boolean>) new SimpleResponse<>(cp, true)) .onErrorResume(t -> t instanceof BlobStorageException && ((BlobStorageException) t).getStatusCode() == 404, t -> { HttpResponse response = ((BlobStorageException) t).getResponse(); return Mono.just(new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), false)); }); } /** * Copies the data at the source URL to a blob. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.beginCopy * * <p>For more information, see the * <a href="https: * * @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second * is used. * @return A {@link PollerFlux} that polls the blob copy operation until it has completed, has failed, or has been * cancelled. */ public PollerFlux<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) { return beginCopy(sourceUrl, null, null, null, null, null, pollInterval); } /** * Copies the data at the source URL to a blob. * * <p><strong>Starting a copy operation</strong></p> * Starting a copy operation and polling on the responses. * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.beginCopy * * <p><strong>Cancelling a copy operation</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.beginCopyFromUrlCancel * * <p>For more information, see the * <a href="https: * * @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * @param metadata Metadata to associate with the destination blob. * @param tier {@link AccessTier} for the destination blob. * @param priority {@link RehydratePriority} for rehydrating the blob. * @param sourceModifiedAccessConditions {@link RequestConditions} against the source. Standard HTTP * Access conditions related to the modification of data. ETag and LastModifiedTime are used to construct * conditions related to when the blob was changed relative to the given request. The request will fail if the * specified condition is not satisfied. * @param destAccessConditions {@link BlobRequestConditions} against the destination. * @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second * is used. * @return A {@link PollerFlux} that polls the blob copy operation until it has completed, has failed, or has been * cancelled. */ public PollerFlux<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier, RehydratePriority priority, RequestConditions sourceModifiedAccessConditions, BlobRequestConditions destAccessConditions, Duration pollInterval) { final Duration interval = pollInterval != null ? pollInterval : Duration.ofSeconds(1); final RequestConditions sourceModifiedCondition = sourceModifiedAccessConditions == null ? new RequestConditions() : sourceModifiedAccessConditions; final BlobRequestConditions destinationAccessConditions = destAccessConditions == null ? new BlobRequestConditions() : destAccessConditions; final RequestConditions sourceConditions = new RequestConditions() .setIfModifiedSince(sourceModifiedCondition.getIfModifiedSince()) .setIfUnmodifiedSince(sourceModifiedCondition.getIfUnmodifiedSince()) .setIfMatch(sourceModifiedCondition.getIfMatch()) .setIfNoneMatch(sourceModifiedCondition.getIfNoneMatch()); return new PollerFlux<>(interval, (pollingContext) -> { try { return onStart(sourceUrl, metadata, tier, priority, sourceConditions, destinationAccessConditions); } catch (RuntimeException ex) { return monoError(logger, ex); } }, (pollingContext) -> { try { return onPoll(pollingContext.getLatestResponse()); } catch (RuntimeException ex) { return monoError(logger, ex); } }, (pollingContext, firstResponse) -> { if (firstResponse == null || firstResponse.getValue() == null) { return Mono.error(logger.logExceptionAsError( new IllegalArgumentException("Cannot cancel a poll response that never started."))); } final String copyIdentifier = firstResponse.getValue().getCopyId(); if (!ImplUtils.isNullOrEmpty(copyIdentifier)) { logger.info("Cancelling copy operation for copy id: {}", copyIdentifier); return abortCopyFromUrl(copyIdentifier).thenReturn(firstResponse.getValue()); } return Mono.empty(); }, (pollingContext) -> Mono.empty()); } private Mono<BlobCopyInfo> onStart(String sourceUrl, Map<String, String> metadata, AccessTier tier, RehydratePriority priority, RequestConditions sourceModifiedAccessConditions, BlobRequestConditions destinationAccessConditions) { URL url; try { url = new URL(sourceUrl); } catch (MalformedURLException ex) { throw logger.logExceptionAsError(new IllegalArgumentException("'sourceUrl' is not a valid url.", ex)); } return withContext( context -> azureBlobStorage.blobs().startCopyFromURLWithRestResponseAsync(null, null, url, null, metadata, tier, priority, sourceModifiedAccessConditions.getIfModifiedSince(), sourceModifiedAccessConditions.getIfUnmodifiedSince(), sourceModifiedAccessConditions.getIfMatch(), sourceModifiedAccessConditions.getIfNoneMatch(), destinationAccessConditions.getIfModifiedSince(), destinationAccessConditions.getIfUnmodifiedSince(), destinationAccessConditions.getIfMatch(), destinationAccessConditions.getIfNoneMatch(), destinationAccessConditions.getLeaseId(), null, context)) .map(response -> { final BlobStartCopyFromURLHeaders headers = response.getDeserializedHeaders(); return new BlobCopyInfo(sourceUrl, headers.getCopyId(), headers.getCopyStatus(), headers.getETag(), headers.getLastModified(), headers.getErrorCode()); }); } private Mono<PollResponse<BlobCopyInfo>> onPoll(PollResponse<BlobCopyInfo> pollResponse) { if (pollResponse.getStatus() == LongRunningOperationStatus.SUCCESSFULLY_COMPLETED || pollResponse.getStatus() == LongRunningOperationStatus.FAILED) { return Mono.just(pollResponse); } final BlobCopyInfo lastInfo = pollResponse.getValue(); if (lastInfo == null) { logger.warning("BlobCopyInfo does not exist. Activation operation failed."); return Mono.just(new PollResponse<>( LongRunningOperationStatus.fromString("COPY_START_FAILED", true), null)); } return getProperties().map(response -> { final CopyStatusType status = response.getCopyStatus(); final BlobCopyInfo result = new BlobCopyInfo(response.getCopySource(), response.getCopyId(), status, response.getETag(), response.getCopyCompletionTime(), response.getCopyStatusDescription()); LongRunningOperationStatus operationStatus; switch (status) { case SUCCESS: operationStatus = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; break; case FAILED: operationStatus = LongRunningOperationStatus.FAILED; break; case ABORTED: operationStatus = LongRunningOperationStatus.USER_CANCELLED; break; case PENDING: operationStatus = LongRunningOperationStatus.IN_PROGRESS; break; default: throw logger.logExceptionAsError(new IllegalArgumentException( "CopyStatusType is not supported. Status: " + status)); } return new PollResponse<>(operationStatus, result); }).onErrorReturn( new PollResponse<>(LongRunningOperationStatus.fromString("POLLING_FAILED", true), lastInfo)); } /** * Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.abortCopyFromUrl * * <p>For more information, see the * <a href="https: * * @see * @see * @see * @param copyId The id of the copy operation to abort. * @return A reactive response signalling completion. */ public Mono<Void> abortCopyFromUrl(String copyId) { try { return abortCopyFromUrlWithResponse(copyId, null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.abortCopyFromUrlWithResponse * * <p>For more information, see the * <a href="https: * * @see * @see * @see * @param copyId The id of the copy operation to abort. * @param leaseId The lease ID the active lease on the blob must match. * @return A reactive response signalling completion. */ public Mono<Response<Void>> abortCopyFromUrlWithResponse(String copyId, String leaseId) { try { return withContext(context -> abortCopyFromUrlWithResponse(copyId, leaseId, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<Void>> abortCopyFromUrlWithResponse(String copyId, String leaseId, Context context) { return this.azureBlobStorage.blobs().abortCopyFromURLWithRestResponseAsync( null, null, copyId, null, leaseId, null, context) .map(response -> new SimpleResponse<>(response, null)); } /** * Copies the data at the source URL to a blob and waits for the copy to complete before returning a response. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.copyFromUrl * * <p>For more information, see the * <a href="https: * * @param copySource The source URL to copy from. * @return A reactive response containing the copy ID for the long running operation. */ public Mono<String> copyFromUrl(String copySource) { try { return copyFromUrlWithResponse(copySource, null, null, null, null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Copies the data at the source URL to a blob and waits for the copy to complete before returning a response. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.copyFromUrlWithResponse * * <p>For more information, see the * <a href="https: * * @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * @param metadata Metadata to associate with the destination blob. * @param tier {@link AccessTier} for the destination blob. * @param sourceModifiedAccessConditions {@link RequestConditions} against the source. Standard HTTP Access * conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions * related to when the blob was changed relative to the given request. The request will fail if the specified * condition is not satisfied. * @param destAccessConditions {@link BlobRequestConditions} against the destination. * @return A reactive response containing the copy ID for the long running operation. */ public Mono<Response<String>> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier, RequestConditions sourceModifiedAccessConditions, BlobRequestConditions destAccessConditions) { try { return withContext(context -> copyFromUrlWithResponse(copySource, metadata, tier, sourceModifiedAccessConditions, destAccessConditions, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<String>> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier, RequestConditions sourceModifiedAccessConditions, BlobRequestConditions destAccessConditions, Context context) { sourceModifiedAccessConditions = sourceModifiedAccessConditions == null ? new RequestConditions() : sourceModifiedAccessConditions; destAccessConditions = destAccessConditions == null ? new BlobRequestConditions() : destAccessConditions; URL url; try { url = new URL(copySource); } catch (MalformedURLException ex) { throw logger.logExceptionAsError(new IllegalArgumentException("'copySource' is not a valid url.")); } return this.azureBlobStorage.blobs().copyFromURLWithRestResponseAsync( null, null, url, null, metadata, tier, sourceModifiedAccessConditions.getIfModifiedSince(), sourceModifiedAccessConditions.getIfUnmodifiedSince(), sourceModifiedAccessConditions.getIfMatch(), sourceModifiedAccessConditions.getIfNoneMatch(), destAccessConditions.getIfModifiedSince(), destAccessConditions.getIfUnmodifiedSince(), destAccessConditions.getIfMatch(), destAccessConditions.getIfNoneMatch(), destAccessConditions.getLeaseId(), null, context) .map(rb -> new SimpleResponse<>(rb, rb.getDeserializedHeaders().getCopyId())); } /** * Reads the entire blob. Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or * {@link AppendBlobClient}. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.download} * * <p>For more information, see the * <a href="https: * * @return A reactive response containing the blob data. */ public Flux<ByteBuffer> download() { try { return downloadWithResponse(null, null, null, false) .flatMapMany(BlobDownloadAsyncResponse::getValue); } catch (RuntimeException ex) { return fluxError(logger, ex); } } /** * Reads a range of bytes from a blob. Uploading data must be done from the {@link BlockBlobClient}, {@link * PageBlobClient}, or {@link AppendBlobClient}. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.downloadWithResponse * * <p>For more information, see the * <a href="https: * * @param range {@link BlobRange} * @param options {@link DownloadRetryOptions} * @param accessConditions {@link BlobRequestConditions} * @param rangeGetContentMD5 Whether the contentMD5 for the specified blob range should be returned. * @return A reactive response containing the blob data. */ public Mono<BlobDownloadAsyncResponse> downloadWithResponse(BlobRange range, DownloadRetryOptions options, BlobRequestConditions accessConditions, boolean rangeGetContentMD5) { try { return withContext(context -> downloadWithResponse(range, options, accessConditions, rangeGetContentMD5, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<BlobDownloadAsyncResponse> downloadWithResponse(BlobRange range, DownloadRetryOptions options, BlobRequestConditions accessConditions, boolean rangeGetContentMD5, Context context) { return downloadHelper(range, options, accessConditions, rangeGetContentMD5, context) .map(response -> new BlobDownloadAsyncResponse(response.getRequest(), response.getStatusCode(), response.getHeaders(), response.getValue(), response.getDeserializedHeaders())); } private Mono<ReliableDownload> downloadHelper(BlobRange range, DownloadRetryOptions options, BlobRequestConditions accessConditions, boolean rangeGetContentMd5, Context context) { range = range == null ? new BlobRange(0) : range; Boolean getMD5 = rangeGetContentMd5 ? rangeGetContentMd5 : null; accessConditions = accessConditions == null ? new BlobRequestConditions() : accessConditions; HttpGetterInfo info = new HttpGetterInfo() .setOffset(range.getOffset()) .setCount(range.getCount()) .setETag(accessConditions.getIfMatch()); return azureBlobStorage.blobs().downloadWithRestResponseAsync(null, null, snapshot, null, range.toHeaderValue(), accessConditions.getLeaseId(), getMD5, null, accessConditions.getIfModifiedSince(), accessConditions.getIfUnmodifiedSince(), accessConditions.getIfMatch(), accessConditions.getIfNoneMatch(), null, customerProvidedKey, context) .map(response -> { info.setETag(response.getDeserializedHeaders().getETag()); return new ReliableDownload(response, options, info, updatedInfo -> downloadHelper(new BlobRange(updatedInfo.getOffset(), updatedInfo.getCount()), options, new BlobRequestConditions().setIfMatch(info.getETag()), false, context)); }); } /** * Downloads the entire blob into a file specified by the path. * * <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException} * will be thrown.</p> * * <p>Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link * AppendBlobClient}.</p> * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.downloadToFile * * <p>For more information, see the * <a href="https: * * @param filePath A non-null {@link OutputStream} instance where the downloaded data will be written. * @return A reactive response containing the blob properties and metadata. */ public Mono<BlobProperties> downloadToFile(String filePath) { try { return downloadToFileWithResponse(filePath, null, null, null, null, false).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Downloads the entire blob into a file specified by the path. * * <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException} * will be thrown.</p> * * <p>Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link * AppendBlobClient}.</p> * * <p>This method makes an extra HTTP call to get the length of the blob in the beginning. To avoid this extra * call, provide the {@link BlobRange} parameter.</p> * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.downloadToFileWithResponse * * <p>For more information, see the * <a href="https: * * @param filePath A non-null {@link OutputStream} instance where the downloaded data will be written. * @param range {@link BlobRange} * @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel * transfers parameter is ignored. * @param options {@link DownloadRetryOptions} * @param requestConditions {@link BlobRequestConditions} * @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned. * @return A reactive response containing the blob properties and metadata. * @throws IllegalArgumentException If {@code blockSize} is less than 0 or greater than 100MB. * @throws UncheckedIOException If an I/O error occurs. */ public Mono<Response<BlobProperties>> downloadToFileWithResponse(String filePath, BlobRange range, ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean rangeGetContentMd5) { try { return withContext(context -> downloadToFileWithResponse(filePath, range, parallelTransferOptions, options, requestConditions, rangeGetContentMd5, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<BlobProperties>> downloadToFileWithResponse(String filePath, BlobRange range, ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions, BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Context context) { BlobRange finalRange = range == null ? new BlobRange(0) : range; final ParallelTransferOptions finalParallelTransferOptions = ModelHelper.populateAndApplyDefaults(parallelTransferOptions); BlobRequestConditions finalConditions = requestConditions == null ? new BlobRequestConditions() : requestConditions; AsynchronousFileChannel channel = downloadToFileResourceSupplier(filePath); return Mono.just(channel) .flatMap(c -> this.downloadToFileImpl(c, finalRange, finalParallelTransferOptions, downloadRetryOptions, finalConditions, rangeGetContentMd5, context)) .doFinally(signalType -> this.downloadToFileCleanup(channel, filePath, signalType)); } private AsynchronousFileChannel downloadToFileResourceSupplier(String filePath) { try { return AsynchronousFileChannel.open(Paths.get(filePath), StandardOpenOption.READ, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW); } catch (IOException e) { throw logger.logExceptionAsError(new UncheckedIOException(e)); } } private Mono<Response<BlobProperties>> downloadToFileImpl(AsynchronousFileChannel file, BlobRange finalRange, ParallelTransferOptions finalParallelTransferOptions, DownloadRetryOptions downloadRetryOptions, BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Context context) { Lock progressLock = new ReentrantLock(); AtomicLong totalProgress = new AtomicLong(0); /* * Downloads the first chunk and gets the size of the data and etag if not specified by the user. */ return getSetupMono(finalRange, finalParallelTransferOptions, downloadRetryOptions, requestConditions, rangeGetContentMd5, context) .flatMap(setupTuple3 -> { long newCount = setupTuple3.getT1(); BlobRequestConditions finalConditions = setupTuple3.getT2(); int numChunks = calculateNumBlocks(newCount, finalParallelTransferOptions.getBlockSize()); numChunks = numChunks == 0 ? 1 : numChunks; BlobDownloadAsyncResponse initialResponse = setupTuple3.getT3(); return Flux.range(0, numChunks) .flatMap(chunkNum -> { if (chunkNum == 0) { return writeBodyToFile(initialResponse, file, 0, finalParallelTransferOptions, progressLock, totalProgress); } long chunkSizeActual = Math.min(finalParallelTransferOptions.getBlockSize(), newCount - (chunkNum.longValue() * finalParallelTransferOptions.getBlockSize().longValue())); BlobRange chunkRange = new BlobRange( finalRange.getOffset() + (chunkNum.longValue() * finalParallelTransferOptions.getBlockSize().longValue()), chunkSizeActual); return this.downloadWithResponse(chunkRange, downloadRetryOptions, finalConditions, rangeGetContentMd5, null) .subscribeOn(Schedulers.elastic()) .flatMap(response -> writeBodyToFile(response, file, chunkNum, finalParallelTransferOptions, progressLock, totalProgress)); }) .then(Mono.just(buildBlobPropertiesResponse(initialResponse))); }); } private int calculateNumBlocks(long dataSize, long blockLength) { int numBlocks = toIntExact(dataSize / blockLength); if (dataSize % blockLength != 0) { numBlocks++; } return numBlocks; } /* Download the first chunk. Construct a Mono which will emit the total count for calculating the number of chunks, access conditions containing the etag to lock on, and the response from downloading the first chunk. */ private Mono<Tuple3<Long, BlobRequestConditions, BlobDownloadAsyncResponse>> getSetupMono(BlobRange range, ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions, BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Context context) { long initialChunkSize = range.getCount() != null && range.getCount() < parallelTransferOptions.getBlockSize() ? range.getCount() : parallelTransferOptions.getBlockSize(); return this.downloadWithResponse(new BlobRange(range.getOffset(), initialChunkSize), downloadRetryOptions, requestConditions, rangeGetContentMd5, context) .subscribeOn(Schedulers.elastic()) .flatMap(response -> { /* Either the etag was set and it matches because the download succeeded, so this is a no-op, or there was no etag, so we set it here. ETag locking is vital to ensure we download one, consistent view of the file. */ BlobRequestConditions newConditions = setEtag(requestConditions, response.getDeserializedHeaders().getETag()); long totalLength = extractTotalBlobLength(response.getDeserializedHeaders().getContentRange()); /* If the user either didn't specify a count or they specified a count greater than the size of the remaining data, take the size of the remaining data. This is to prevent the case where the count is much much larger than the size of the blob and we could try to download at an invalid offset. */ long newCount = range.getCount() == null || range.getCount() > (totalLength - range.getOffset()) ? totalLength - range.getOffset() : range.getCount(); return Mono.zip(Mono.just(newCount), Mono.just(newConditions), Mono.just(response)); }) .onErrorResume(BlobStorageException.class, blobStorageException -> { /* In the case of an empty blob, we still want to report success and give back valid headers. Attempting a range download on an empty blob will return an InvalidRange error code and a Content-Range header of the format "bytes * /0". We need to double check that the total size is zero in the case that the customer has attempted an invalid range on a non-zero length blob. */ if (blobStorageException.getErrorCode() == BlobErrorCode.INVALID_RANGE && extractTotalBlobLength(blobStorageException.getResponse() .getHeaders().getValue("Content-Range")) == 0) { return this.downloadWithResponse(new BlobRange(0, 0L), downloadRetryOptions, requestConditions, rangeGetContentMd5, context) .subscribeOn(Schedulers.elastic()) .flatMap(response -> { /* Ensure the blob is still 0 length by checking our download was the full length. (200 is for full blob; 206 is partial). */ if (response.getStatusCode() != 200) { Mono.error(new IllegalStateException("Blob was modified mid download. It was " + "originally 0 bytes and is now larger.")); } return Mono.zip(Mono.just(0L), Mono.just(requestConditions), Mono.just(response)); }); } return Mono.error(blobStorageException); }); } private static BlobRequestConditions setEtag(BlobRequestConditions accessConditions, String etag) { return new BlobRequestConditions() .setIfModifiedSince( accessConditions.getIfModifiedSince()) .setIfUnmodifiedSince( accessConditions.getIfModifiedSince()) .setIfMatch(etag) .setIfNoneMatch( accessConditions.getIfNoneMatch()) .setLeaseId(accessConditions.getLeaseId()); } private static Mono<Void> writeBodyToFile(BlobDownloadAsyncResponse response, AsynchronousFileChannel file, long chunkNum, ParallelTransferOptions finalParallelTransferOptions, Lock progressLock, AtomicLong totalProgress) { Flux<ByteBuffer> data = response.getValue(); data = ProgressReporter.addParallelProgressReporting(data, finalParallelTransferOptions.getProgressReceiver(), progressLock, totalProgress); return FluxUtil.writeFile(data, file, chunkNum * finalParallelTransferOptions.getBlockSize()); } private static long extractTotalBlobLength(String contentRange) { return Long.parseLong(contentRange.split("/")[1]); } private void downloadToFileCleanup(AsynchronousFileChannel channel, String filePath, SignalType signalType) { try { channel.close(); if (!signalType.equals(SignalType.ON_COMPLETE)) { Files.delete(Paths.get(filePath)); logger.verbose("Downloading to file failed. Cleaning up resources."); } } catch (IOException e) { throw logger.logExceptionAsError(new UncheckedIOException(e)); } } /** * Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.delete} * * <p>For more information, see the * <a href="https: * * @return A reactive response signalling completion. */ public Mono<Void> delete() { try { return deleteWithResponse(null, null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.deleteWithResponse * * <p>For more information, see the * <a href="https: * * @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include} * will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being * deleted, you must pass null. * @param accessConditions {@link BlobRequestConditions} * @return A reactive response signalling completion. */ public Mono<Response<Void>> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions, BlobRequestConditions accessConditions) { try { return withContext(context -> deleteWithResponse(deleteBlobSnapshotOptions, accessConditions, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<Void>> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions, BlobRequestConditions accessConditions, Context context) { accessConditions = accessConditions == null ? new BlobRequestConditions() : accessConditions; return this.azureBlobStorage.blobs().deleteWithRestResponseAsync(null, null, snapshot, null, accessConditions.getLeaseId(), deleteBlobSnapshotOptions, accessConditions.getIfModifiedSince(), accessConditions.getIfUnmodifiedSince(), accessConditions.getIfMatch(), accessConditions.getIfNoneMatch(), null, context) .map(response -> new SimpleResponse<>(response, null)); } /** * Returns the blob's metadata and properties. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.getProperties} * * <p>For more information, see the * <a href="https: * * @return A reactive response containing the blob properties and metadata. */ public Mono<BlobProperties> getProperties() { try { return getPropertiesWithResponse(null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Returns the blob's metadata and properties. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.getPropertiesWithResponse * * <p>For more information, see the * <a href="https: * * @param accessConditions {@link BlobRequestConditions} * @return A reactive response containing the blob properties and metadata. */ public Mono<Response<BlobProperties>> getPropertiesWithResponse(BlobRequestConditions accessConditions) { try { return withContext(context -> getPropertiesWithResponse(accessConditions, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<BlobProperties>> getPropertiesWithResponse(BlobRequestConditions accessConditions, Context context) { accessConditions = accessConditions == null ? new BlobRequestConditions() : accessConditions; return this.azureBlobStorage.blobs().getPropertiesWithRestResponseAsync( null, null, snapshot, null, accessConditions.getLeaseId(), accessConditions.getIfModifiedSince(), accessConditions.getIfUnmodifiedSince(), accessConditions.getIfMatch(), accessConditions.getIfNoneMatch(), null, customerProvidedKey, context) .map(rb -> { BlobGetPropertiesHeaders hd = rb.getDeserializedHeaders(); BlobProperties properties = new BlobProperties(hd.getCreationTime(), hd.getLastModified(), hd.getETag(), hd.getContentLength() == null ? 0 : hd.getContentLength(), hd.getContentType(), hd.getContentMD5(), hd.getContentEncoding(), hd.getContentDisposition(), hd.getContentLanguage(), hd.getCacheControl(), hd.getBlobSequenceNumber(), hd.getBlobType(), hd.getLeaseStatus(), hd.getLeaseState(), hd.getLeaseDuration(), hd.getCopyId(), hd.getCopyStatus(), hd.getCopySource(), hd.getCopyProgress(), hd.getCopyCompletionTime(), hd.getCopyStatusDescription(), hd.isServerEncrypted(), hd.isIncrementalCopy(), hd.getDestinationSnapshot(), AccessTier.fromString(hd.getAccessTier()), hd.isAccessTierInferred(), ArchiveStatus.fromString(hd.getArchiveStatus()), hd.getEncryptionKeySha256(), hd.getAccessTierChangeTime(), hd.getMetadata(), hd.getBlobCommittedBlockCount()); return new SimpleResponse<>(rb, properties); }); } /** * Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In * order to preserve existing values, they must be passed alongside the header being changed. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.setHttpHeaders * * <p>For more information, see the * <a href="https: * * @param headers {@link BlobHttpHeaders} * @return A reactive response signalling completion. */ public Mono<Void> setHttpHeaders(BlobHttpHeaders headers) { try { return setHttpHeadersWithResponse(headers, null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In * order to preserve existing values, they must be passed alongside the header being changed. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.setHttpHeadersWithResponse * * <p>For more information, see the * <a href="https: * * @param headers {@link BlobHttpHeaders} * @param accessConditions {@link BlobRequestConditions} * @return A reactive response signalling completion. */ public Mono<Response<Void>> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions accessConditions) { try { return withContext(context -> setHttpHeadersWithResponse(headers, accessConditions, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<Void>> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions accessConditions, Context context) { accessConditions = accessConditions == null ? new BlobRequestConditions() : accessConditions; return this.azureBlobStorage.blobs().setHTTPHeadersWithRestResponseAsync( null, null, null, accessConditions.getLeaseId(), accessConditions.getIfModifiedSince(), accessConditions.getIfUnmodifiedSince(), accessConditions.getIfMatch(), accessConditions.getIfNoneMatch(), null, headers, context) .map(response -> new SimpleResponse<>(response, null)); } /** * Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values * must be preserved, they must be downloaded and included in the call to this method. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.setMetadata * * <p>For more information, see the * <a href="https: * * @param metadata Metadata to associate with the blob. * @return A reactive response signalling completion. */ public Mono<Void> setMetadata(Map<String, String> metadata) { try { return setMetadataWithResponse(metadata, null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values * must be preserved, they must be downloaded and included in the call to this method. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.setMetadataWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata Metadata to associate with the blob. * @param accessConditions {@link BlobRequestConditions} * @return A reactive response signalling completion. */ public Mono<Response<Void>> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions accessConditions) { try { return withContext(context -> setMetadataWithResponse(metadata, accessConditions, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<Void>> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions accessConditions, Context context) { accessConditions = accessConditions == null ? new BlobRequestConditions() : accessConditions; return this.azureBlobStorage.blobs().setMetadataWithRestResponseAsync( null, null, null, metadata, accessConditions.getLeaseId(), accessConditions.getIfModifiedSince(), accessConditions.getIfUnmodifiedSince(), accessConditions.getIfMatch(), accessConditions.getIfNoneMatch(), null, customerProvidedKey, context) .map(response -> new SimpleResponse<>(response, null)); } /** * Creates a read-only snapshot of the blob. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.createSnapshot} * * <p>For more information, see the * <a href="https: * * @return A response containing a {@link BlobAsyncClientBase} which is used to interact with the created snapshot, * use {@link */ public Mono<BlobAsyncClientBase> createSnapshot() { try { return createSnapshotWithResponse(null, null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a read-only snapshot of the blob. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.createSnapshotWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata Metadata to associate with the blob snapshot. * @param accessConditions {@link BlobRequestConditions} * @return A response containing a {@link BlobAsyncClientBase} which is used to interact with the created snapshot, * use {@link */ public Mono<Response<BlobAsyncClientBase>> createSnapshotWithResponse(Map<String, String> metadata, BlobRequestConditions accessConditions) { try { return withContext(context -> createSnapshotWithResponse(metadata, accessConditions, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<BlobAsyncClientBase>> createSnapshotWithResponse(Map<String, String> metadata, BlobRequestConditions accessConditions, Context context) { accessConditions = accessConditions == null ? new BlobRequestConditions() : accessConditions; return this.azureBlobStorage.blobs().createSnapshotWithRestResponseAsync( null, null, null, metadata, accessConditions.getIfModifiedSince(), accessConditions.getIfUnmodifiedSince(), accessConditions.getIfMatch(), accessConditions.getIfNoneMatch(), accessConditions.getLeaseId(), null, customerProvidedKey, context) .map(rb -> new SimpleResponse<>(rb, this.getSnapshotClient(rb.getDeserializedHeaders().getSnapshot()))); } /** * Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in * a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's * etag. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.setAccessTier * * <p>For more information, see the * <a href="https: * * @param tier The new tier for the blob. * @return A reactive response signalling completion. * @throws NullPointerException if {@code tier} is null. */ public Mono<Void> setAccessTier(AccessTier tier) { try { return setAccessTierWithResponse(tier, null, null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in * a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's * etag. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.setAccessTierWithResponse * * <p>For more information, see the * <a href="https: * * @param tier The new tier for the blob. * @param priority Optional priority to set for re-hydrating blobs. * @param leaseId The lease ID the active lease on the blob must match. * @return A reactive response signalling completion. * @throws NullPointerException if {@code tier} is null. */ public Mono<Response<Void>> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId) { try { return withContext(context -> setTierWithResponse(tier, priority, leaseId, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<Void>> setTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId, Context context) { StorageImplUtils.assertNotNull("tier", tier); return this.azureBlobStorage.blobs().setTierWithRestResponseAsync( null, null, tier, null, priority, null, leaseId, context) .map(response -> new SimpleResponse<>(response, null)); } /** * Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.undelete} * * <p>For more information, see the * <a href="https: * * @return A reactive response signalling completion. */ public Mono<Void> undelete() { try { return undeleteWithResponse().flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.undeleteWithResponse} * * <p>For more information, see the * <a href="https: * * @return A reactive response signalling completion. */ public Mono<Response<Void>> undeleteWithResponse() { try { return withContext(this::undeleteWithResponse); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<Void>> undeleteWithResponse(Context context) { return this.azureBlobStorage.blobs().undeleteWithRestResponseAsync(null, null, context).map(response -> new SimpleResponse<>(response, null)); } /** * Returns the sku name and account kind for the account. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.getAccountInfo} * * <p>For more information, see the * <a href="https: * * @return a reactor response containing the sku name and account kind. */ public Mono<StorageAccountInfo> getAccountInfo() { try { return getAccountInfoWithResponse().flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Returns the sku name and account kind for the account. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.getAccountInfoWithResponse} * * <p>For more information, see the * <a href="https: * * @return a reactor response containing the sku name and account kind. */ public Mono<Response<StorageAccountInfo>> getAccountInfoWithResponse() { try { return withContext(this::getAccountInfoWithResponse); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<StorageAccountInfo>> getAccountInfoWithResponse(Context context) { return this.azureBlobStorage.blobs().getAccountInfoWithRestResponseAsync(null, null, context) .map(rb -> { BlobGetAccountInfoHeaders hd = rb.getDeserializedHeaders(); return new SimpleResponse<>(rb, new StorageAccountInfo(hd.getSkuName(), hd.getAccountKind())); }); } }
I only ever call this once. It's just to hide this ugly constructor block. I think it's more concise constructing the response in the method cus then I can hide the response construction, too.
private static Response<BlobProperties> buildBlobPropertiesResponse(BlobDownloadAsyncResponse response) { BlobProperties properties = new BlobProperties(null, response.getDeserializedHeaders().getLastModified(), response.getDeserializedHeaders().getETag(), response.getDeserializedHeaders().getContentLength() == null ? 0 : response.getDeserializedHeaders().getContentLength(), response.getDeserializedHeaders().getContentType(), null, response.getDeserializedHeaders().getContentEncoding(), response.getDeserializedHeaders().getContentDisposition(), response.getDeserializedHeaders().getContentLanguage(), response.getDeserializedHeaders().getCacheControl(), response.getDeserializedHeaders().getBlobSequenceNumber(), response.getDeserializedHeaders().getBlobType(), response.getDeserializedHeaders().getLeaseStatus(), response.getDeserializedHeaders().getLeaseState(), response.getDeserializedHeaders().getLeaseDuration(), response.getDeserializedHeaders().getCopyId(), response.getDeserializedHeaders().getCopyStatus(), response.getDeserializedHeaders().getCopySource(), response.getDeserializedHeaders().getCopyProgress(), response.getDeserializedHeaders().getCopyCompletionTime(), response.getDeserializedHeaders().getCopyStatusDescription(), response.getDeserializedHeaders().isServerEncrypted(), null, null, null, null, null, response.getDeserializedHeaders().getEncryptionKeySha256(), null, response.getDeserializedHeaders().getMetadata(), response.getDeserializedHeaders().getBlobCommittedBlockCount()); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), properties); }
BlobProperties properties = new BlobProperties(null, response.getDeserializedHeaders().getLastModified(),
private static Response<BlobProperties> buildBlobPropertiesResponse(BlobDownloadAsyncResponse response) { BlobProperties properties = new BlobProperties(null, response.getDeserializedHeaders().getLastModified(), response.getDeserializedHeaders().getETag(), response.getDeserializedHeaders().getContentLength() == null ? 0 : response.getDeserializedHeaders().getContentLength(), response.getDeserializedHeaders().getContentType(), null, response.getDeserializedHeaders().getContentEncoding(), response.getDeserializedHeaders().getContentDisposition(), response.getDeserializedHeaders().getContentLanguage(), response.getDeserializedHeaders().getCacheControl(), response.getDeserializedHeaders().getBlobSequenceNumber(), response.getDeserializedHeaders().getBlobType(), response.getDeserializedHeaders().getLeaseStatus(), response.getDeserializedHeaders().getLeaseState(), response.getDeserializedHeaders().getLeaseDuration(), response.getDeserializedHeaders().getCopyId(), response.getDeserializedHeaders().getCopyStatus(), response.getDeserializedHeaders().getCopySource(), response.getDeserializedHeaders().getCopyProgress(), response.getDeserializedHeaders().getCopyCompletionTime(), response.getDeserializedHeaders().getCopyStatusDescription(), response.getDeserializedHeaders().isServerEncrypted(), null, null, null, null, null, response.getDeserializedHeaders().getEncryptionKeySha256(), null, response.getDeserializedHeaders().getMetadata(), response.getDeserializedHeaders().getBlobCommittedBlockCount()); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), properties); }
class BlobAsyncClientBase { private static final int BLOB_DEFAULT_DOWNLOAD_BLOCK_SIZE = 4 * Constants.MB; private static final int BLOB_MAX_DOWNLOAD_BLOCK_SIZE = 100 * Constants.MB; private final ClientLogger logger = new ClientLogger(BlobAsyncClientBase.class); protected final AzureBlobStorageImpl azureBlobStorage; private final String snapshot; private final CpkInfo customerProvidedKey; protected final String accountName; protected final String containerName; protected final String blobName; protected final BlobServiceVersion serviceVersion; /** * Package-private constructor for use by {@link SpecializedBlobClientBuilder}. * * @param pipeline The pipeline used to send and receive service requests. * @param url The endpoint where to send service requests. * @param serviceVersion The version of the service to receive requests. * @param accountName The storage account name. * @param containerName The container name. * @param blobName The blob name. * @param snapshot The snapshot identifier for the blob, pass {@code null} to interact with the blob directly. * @param customerProvidedKey Customer provided key used during encryption of the blob's data on the server, pass * {@code null} to allow the service to use its own encryption. */ protected BlobAsyncClientBase(HttpPipeline pipeline, String url, BlobServiceVersion serviceVersion, String accountName, String containerName, String blobName, String snapshot, CpkInfo customerProvidedKey) { this.azureBlobStorage = new AzureBlobStorageBuilder() .pipeline(pipeline) .url(url) .version(serviceVersion.getVersion()) .build(); this.serviceVersion = serviceVersion; this.accountName = accountName; this.containerName = containerName; this.blobName = Utility.urlEncode(Utility.urlDecode(blobName)); this.snapshot = snapshot; this.customerProvidedKey = customerProvidedKey; } /** * Creates a new {@link BlobAsyncClientBase} linked to the {@code snapshot} of this blob resource. * * @param snapshot the identifier for a specific snapshot of this blob * @return a {@link BlobAsyncClientBase} used to interact with the specific snapshot. */ public BlobAsyncClientBase getSnapshotClient(String snapshot) { return new BlobAsyncClientBase(getHttpPipeline(), getBlobUrl(), getServiceVersion(), getAccountName(), getContainerName(), getBlobName(), snapshot, getCustomerProvidedKey()); } /** * Gets the URL of the blob represented by this client. * * @return the URL. */ public String getBlobUrl() { if (!this.isSnapshot()) { return azureBlobStorage.getUrl(); } else { if (azureBlobStorage.getUrl().contains("?")) { return String.format("%s&snapshot=%s", azureBlobStorage.getUrl(), snapshot); } else { return String.format("%s?snapshot=%s", azureBlobStorage.getUrl(), snapshot); } } } /** * Get the container name. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.getContainerName} * * @return The name of the container. */ public final String getContainerName() { return containerName; } /** * Decodes and gets the blob name. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.getBlobName} * * @return The decoded name of the blob. */ public final String getBlobName() { return (blobName == null) ? null : Utility.urlDecode(blobName); } /** * Gets the {@link HttpPipeline} powering this client. * * @return The pipeline. */ public HttpPipeline getHttpPipeline() { return azureBlobStorage.getHttpPipeline(); } /** * Gets the {@link CpkInfo} used to encrypt this blob's content on the server. * * @return the customer provided key used for encryption. */ public CpkInfo getCustomerProvidedKey() { return customerProvidedKey; } /** * Get associated account name. * * @return account name associated with this storage resource. */ public String getAccountName() { return accountName; } /** * Gets the service version the client is using. * * @return the service version the client is using. */ public BlobServiceVersion getServiceVersion() { return serviceVersion; } /** * Gets the snapshotId for a blob resource * * @return A string that represents the snapshotId of the snapshot blob */ public String getSnapshotId() { return this.snapshot; } /** * Determines if a blob is a snapshot * * @return A boolean that indicates if a blob is a snapshot */ public boolean isSnapshot() { return this.snapshot != null; } /** * Determines if the blob this client represents exists in the cloud. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.exists} * * @return true if the blob exists, false if it doesn't */ public Mono<Boolean> exists() { try { return existsWithResponse().flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Determines if the blob this client represents exists in the cloud. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.existsWithResponse} * * @return true if the blob exists, false if it doesn't */ public Mono<Response<Boolean>> existsWithResponse() { try { return withContext(this::existsWithResponse); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<Boolean>> existsWithResponse(Context context) { return this.getPropertiesWithResponse(null, context) .map(cp -> (Response<Boolean>) new SimpleResponse<>(cp, true)) .onErrorResume(t -> t instanceof BlobStorageException && ((BlobStorageException) t).getStatusCode() == 404, t -> { HttpResponse response = ((BlobStorageException) t).getResponse(); return Mono.just(new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), false)); }); } /** * Copies the data at the source URL to a blob. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.beginCopy * * <p>For more information, see the * <a href="https: * * @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second * is used. * @return A {@link PollerFlux} that polls the blob copy operation until it has completed, has failed, or has been * cancelled. */ public PollerFlux<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) { return beginCopy(sourceUrl, null, null, null, null, null, pollInterval); } /** * Copies the data at the source URL to a blob. * * <p><strong>Starting a copy operation</strong></p> * Starting a copy operation and polling on the responses. * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.beginCopy * * <p><strong>Cancelling a copy operation</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.beginCopyFromUrlCancel * * <p>For more information, see the * <a href="https: * * @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * @param metadata Metadata to associate with the destination blob. * @param tier {@link AccessTier} for the destination blob. * @param priority {@link RehydratePriority} for rehydrating the blob. * @param sourceModifiedAccessConditions {@link RequestConditions} against the source. Standard HTTP * Access conditions related to the modification of data. ETag and LastModifiedTime are used to construct * conditions related to when the blob was changed relative to the given request. The request will fail if the * specified condition is not satisfied. * @param destAccessConditions {@link BlobRequestConditions} against the destination. * @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second * is used. * @return A {@link PollerFlux} that polls the blob copy operation until it has completed, has failed, or has been * cancelled. */ public PollerFlux<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier, RehydratePriority priority, RequestConditions sourceModifiedAccessConditions, BlobRequestConditions destAccessConditions, Duration pollInterval) { final Duration interval = pollInterval != null ? pollInterval : Duration.ofSeconds(1); final RequestConditions sourceModifiedCondition = sourceModifiedAccessConditions == null ? new RequestConditions() : sourceModifiedAccessConditions; final BlobRequestConditions destinationAccessConditions = destAccessConditions == null ? new BlobRequestConditions() : destAccessConditions; final RequestConditions sourceConditions = new RequestConditions() .setIfModifiedSince(sourceModifiedCondition.getIfModifiedSince()) .setIfUnmodifiedSince(sourceModifiedCondition.getIfUnmodifiedSince()) .setIfMatch(sourceModifiedCondition.getIfMatch()) .setIfNoneMatch(sourceModifiedCondition.getIfNoneMatch()); return new PollerFlux<>(interval, (pollingContext) -> { try { return onStart(sourceUrl, metadata, tier, priority, sourceConditions, destinationAccessConditions); } catch (RuntimeException ex) { return monoError(logger, ex); } }, (pollingContext) -> { try { return onPoll(pollingContext.getLatestResponse()); } catch (RuntimeException ex) { return monoError(logger, ex); } }, (pollingContext, firstResponse) -> { if (firstResponse == null || firstResponse.getValue() == null) { return Mono.error(logger.logExceptionAsError( new IllegalArgumentException("Cannot cancel a poll response that never started."))); } final String copyIdentifier = firstResponse.getValue().getCopyId(); if (!ImplUtils.isNullOrEmpty(copyIdentifier)) { logger.info("Cancelling copy operation for copy id: {}", copyIdentifier); return abortCopyFromUrl(copyIdentifier).thenReturn(firstResponse.getValue()); } return Mono.empty(); }, (pollingContext) -> Mono.empty()); } private Mono<BlobCopyInfo> onStart(String sourceUrl, Map<String, String> metadata, AccessTier tier, RehydratePriority priority, RequestConditions sourceModifiedAccessConditions, BlobRequestConditions destinationAccessConditions) { URL url; try { url = new URL(sourceUrl); } catch (MalformedURLException ex) { throw logger.logExceptionAsError(new IllegalArgumentException("'sourceUrl' is not a valid url.", ex)); } return withContext( context -> azureBlobStorage.blobs().startCopyFromURLWithRestResponseAsync(null, null, url, null, metadata, tier, priority, sourceModifiedAccessConditions.getIfModifiedSince(), sourceModifiedAccessConditions.getIfUnmodifiedSince(), sourceModifiedAccessConditions.getIfMatch(), sourceModifiedAccessConditions.getIfNoneMatch(), destinationAccessConditions.getIfModifiedSince(), destinationAccessConditions.getIfUnmodifiedSince(), destinationAccessConditions.getIfMatch(), destinationAccessConditions.getIfNoneMatch(), destinationAccessConditions.getLeaseId(), null, context)) .map(response -> { final BlobStartCopyFromURLHeaders headers = response.getDeserializedHeaders(); return new BlobCopyInfo(sourceUrl, headers.getCopyId(), headers.getCopyStatus(), headers.getETag(), headers.getLastModified(), headers.getErrorCode()); }); } private Mono<PollResponse<BlobCopyInfo>> onPoll(PollResponse<BlobCopyInfo> pollResponse) { if (pollResponse.getStatus() == LongRunningOperationStatus.SUCCESSFULLY_COMPLETED || pollResponse.getStatus() == LongRunningOperationStatus.FAILED) { return Mono.just(pollResponse); } final BlobCopyInfo lastInfo = pollResponse.getValue(); if (lastInfo == null) { logger.warning("BlobCopyInfo does not exist. Activation operation failed."); return Mono.just(new PollResponse<>( LongRunningOperationStatus.fromString("COPY_START_FAILED", true), null)); } return getProperties().map(response -> { final CopyStatusType status = response.getCopyStatus(); final BlobCopyInfo result = new BlobCopyInfo(response.getCopySource(), response.getCopyId(), status, response.getETag(), response.getCopyCompletionTime(), response.getCopyStatusDescription()); LongRunningOperationStatus operationStatus; switch (status) { case SUCCESS: operationStatus = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; break; case FAILED: operationStatus = LongRunningOperationStatus.FAILED; break; case ABORTED: operationStatus = LongRunningOperationStatus.USER_CANCELLED; break; case PENDING: operationStatus = LongRunningOperationStatus.IN_PROGRESS; break; default: throw logger.logExceptionAsError(new IllegalArgumentException( "CopyStatusType is not supported. Status: " + status)); } return new PollResponse<>(operationStatus, result); }).onErrorReturn( new PollResponse<>(LongRunningOperationStatus.fromString("POLLING_FAILED", true), lastInfo)); } /** * Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.abortCopyFromUrl * * <p>For more information, see the * <a href="https: * * @see * @see * @see * @param copyId The id of the copy operation to abort. * @return A reactive response signalling completion. */ public Mono<Void> abortCopyFromUrl(String copyId) { try { return abortCopyFromUrlWithResponse(copyId, null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.abortCopyFromUrlWithResponse * * <p>For more information, see the * <a href="https: * * @see * @see * @see * @param copyId The id of the copy operation to abort. * @param leaseId The lease ID the active lease on the blob must match. * @return A reactive response signalling completion. */ public Mono<Response<Void>> abortCopyFromUrlWithResponse(String copyId, String leaseId) { try { return withContext(context -> abortCopyFromUrlWithResponse(copyId, leaseId, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<Void>> abortCopyFromUrlWithResponse(String copyId, String leaseId, Context context) { return this.azureBlobStorage.blobs().abortCopyFromURLWithRestResponseAsync( null, null, copyId, null, leaseId, null, context) .map(response -> new SimpleResponse<>(response, null)); } /** * Copies the data at the source URL to a blob and waits for the copy to complete before returning a response. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.copyFromUrl * * <p>For more information, see the * <a href="https: * * @param copySource The source URL to copy from. * @return A reactive response containing the copy ID for the long running operation. */ public Mono<String> copyFromUrl(String copySource) { try { return copyFromUrlWithResponse(copySource, null, null, null, null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Copies the data at the source URL to a blob and waits for the copy to complete before returning a response. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.copyFromUrlWithResponse * * <p>For more information, see the * <a href="https: * * @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * @param metadata Metadata to associate with the destination blob. * @param tier {@link AccessTier} for the destination blob. * @param sourceModifiedAccessConditions {@link RequestConditions} against the source. Standard HTTP Access * conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions * related to when the blob was changed relative to the given request. The request will fail if the specified * condition is not satisfied. * @param destAccessConditions {@link BlobRequestConditions} against the destination. * @return A reactive response containing the copy ID for the long running operation. */ public Mono<Response<String>> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier, RequestConditions sourceModifiedAccessConditions, BlobRequestConditions destAccessConditions) { try { return withContext(context -> copyFromUrlWithResponse(copySource, metadata, tier, sourceModifiedAccessConditions, destAccessConditions, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<String>> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier, RequestConditions sourceModifiedAccessConditions, BlobRequestConditions destAccessConditions, Context context) { sourceModifiedAccessConditions = sourceModifiedAccessConditions == null ? new RequestConditions() : sourceModifiedAccessConditions; destAccessConditions = destAccessConditions == null ? new BlobRequestConditions() : destAccessConditions; URL url; try { url = new URL(copySource); } catch (MalformedURLException ex) { throw logger.logExceptionAsError(new IllegalArgumentException("'copySource' is not a valid url.")); } return this.azureBlobStorage.blobs().copyFromURLWithRestResponseAsync( null, null, url, null, metadata, tier, sourceModifiedAccessConditions.getIfModifiedSince(), sourceModifiedAccessConditions.getIfUnmodifiedSince(), sourceModifiedAccessConditions.getIfMatch(), sourceModifiedAccessConditions.getIfNoneMatch(), destAccessConditions.getIfModifiedSince(), destAccessConditions.getIfUnmodifiedSince(), destAccessConditions.getIfMatch(), destAccessConditions.getIfNoneMatch(), destAccessConditions.getLeaseId(), null, context) .map(rb -> new SimpleResponse<>(rb, rb.getDeserializedHeaders().getCopyId())); } /** * Reads the entire blob. Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or * {@link AppendBlobClient}. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.download} * * <p>For more information, see the * <a href="https: * * @return A reactive response containing the blob data. */ public Flux<ByteBuffer> download() { try { return downloadWithResponse(null, null, null, false) .flatMapMany(BlobDownloadAsyncResponse::getValue); } catch (RuntimeException ex) { return fluxError(logger, ex); } } /** * Reads a range of bytes from a blob. Uploading data must be done from the {@link BlockBlobClient}, {@link * PageBlobClient}, or {@link AppendBlobClient}. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.downloadWithResponse * * <p>For more information, see the * <a href="https: * * @param range {@link BlobRange} * @param options {@link DownloadRetryOptions} * @param accessConditions {@link BlobRequestConditions} * @param rangeGetContentMD5 Whether the contentMD5 for the specified blob range should be returned. * @return A reactive response containing the blob data. */ public Mono<BlobDownloadAsyncResponse> downloadWithResponse(BlobRange range, DownloadRetryOptions options, BlobRequestConditions accessConditions, boolean rangeGetContentMD5) { try { return withContext(context -> downloadWithResponse(range, options, accessConditions, rangeGetContentMD5, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<BlobDownloadAsyncResponse> downloadWithResponse(BlobRange range, DownloadRetryOptions options, BlobRequestConditions accessConditions, boolean rangeGetContentMD5, Context context) { return downloadHelper(range, options, accessConditions, rangeGetContentMD5, context) .map(response -> new BlobDownloadAsyncResponse(response.getRequest(), response.getStatusCode(), response.getHeaders(), response.getValue(), response.getDeserializedHeaders())); } private Mono<ReliableDownload> downloadHelper(BlobRange range, DownloadRetryOptions options, BlobRequestConditions accessConditions, boolean rangeGetContentMd5, Context context) { range = range == null ? new BlobRange(0) : range; Boolean getMD5 = rangeGetContentMd5 ? rangeGetContentMd5 : null; accessConditions = accessConditions == null ? new BlobRequestConditions() : accessConditions; HttpGetterInfo info = new HttpGetterInfo() .setOffset(range.getOffset()) .setCount(range.getCount()) .setETag(accessConditions.getIfMatch()); return azureBlobStorage.blobs().downloadWithRestResponseAsync(null, null, snapshot, null, range.toHeaderValue(), accessConditions.getLeaseId(), getMD5, null, accessConditions.getIfModifiedSince(), accessConditions.getIfUnmodifiedSince(), accessConditions.getIfMatch(), accessConditions.getIfNoneMatch(), null, customerProvidedKey, context) .map(response -> { info.setETag(response.getDeserializedHeaders().getETag()); return new ReliableDownload(response, options, info, updatedInfo -> downloadHelper(new BlobRange(updatedInfo.getOffset(), updatedInfo.getCount()), options, new BlobRequestConditions().setIfMatch(info.getETag()), false, context)); }); } /** * Downloads the entire blob into a file specified by the path. * * <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException} * will be thrown.</p> * * <p>Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link * AppendBlobClient}.</p> * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.downloadToFile * * <p>For more information, see the * <a href="https: * * @param filePath A non-null {@link OutputStream} instance where the downloaded data will be written. * @return A reactive response containing the blob properties and metadata. */ public Mono<BlobProperties> downloadToFile(String filePath) { try { return downloadToFileWithResponse(filePath, null, null, null, null, false).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Downloads the entire blob into a file specified by the path. * * <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException} * will be thrown.</p> * * <p>Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link * AppendBlobClient}.</p> * * <p>This method makes an extra HTTP call to get the length of the blob in the beginning. To avoid this extra * call, provide the {@link BlobRange} parameter.</p> * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.downloadToFileWithResponse * * <p>For more information, see the * <a href="https: * * @param filePath A non-null {@link OutputStream} instance where the downloaded data will be written. * @param range {@link BlobRange} * @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel * transfers parameter is ignored. * @param options {@link DownloadRetryOptions} * @param requestConditions {@link BlobRequestConditions} * @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned. * @return A reactive response containing the blob properties and metadata. * @throws IllegalArgumentException If {@code blockSize} is less than 0 or greater than 100MB. * @throws UncheckedIOException If an I/O error occurs. */ public Mono<Response<BlobProperties>> downloadToFileWithResponse(String filePath, BlobRange range, ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean rangeGetContentMd5) { try { return withContext(context -> downloadToFileWithResponse(filePath, range, parallelTransferOptions, options, requestConditions, rangeGetContentMd5, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<BlobProperties>> downloadToFileWithResponse(String filePath, BlobRange range, ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions, BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Context context) { BlobRange rangeReal = range == null ? new BlobRange(0) : range; final ParallelTransferOptions finalParallelTransferOptions = ModelHelper.populateAndApplyDefaults(parallelTransferOptions); BlobRequestConditions conditionsReal = requestConditions == null ? new BlobRequestConditions() : requestConditions; AsynchronousFileChannel channel = downloadToFileResourceSupplier(filePath); return Mono.just(channel) .flatMap(c -> this.downloadToFileImpl(c, rangeReal, finalParallelTransferOptions, downloadRetryOptions, conditionsReal, rangeGetContentMd5, context)) .doFinally(signalType -> this.downloadToFileCleanup(channel, filePath, signalType)); } private AsynchronousFileChannel downloadToFileResourceSupplier(String filePath) { try { return AsynchronousFileChannel.open(Paths.get(filePath), StandardOpenOption.READ, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW); } catch (IOException e) { throw logger.logExceptionAsError(new UncheckedIOException(e)); } } private Mono<Response<BlobProperties>> downloadToFileImpl(AsynchronousFileChannel file, BlobRange rangeReal, ParallelTransferOptions finalParallelTransferOptions, DownloadRetryOptions downloadRetryOptions, BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Context context) { Lock progressLock = new ReentrantLock(); AtomicLong totalProgress = new AtomicLong(0); /* * Downloads the first chunk and gets the size of the data and etag if not specified by the user. */ return getSetupMono(rangeReal, finalParallelTransferOptions, downloadRetryOptions, requestConditions, rangeGetContentMd5, context) .flatMap(setupTuple3 -> { long newCount = setupTuple3.getT1(); BlobRequestConditions realConditions = setupTuple3.getT2(); int numChunks = calculateNumBlocks(newCount, finalParallelTransferOptions.getBlockSize()); numChunks = numChunks == 0 ? 1 : numChunks; BlobDownloadAsyncResponse initialResponse = setupTuple3.getT3(); return Flux.range(0, numChunks) .flatMap(chunkNum -> { if (chunkNum == 0) { return writeBodyToFile(initialResponse, file, 0, finalParallelTransferOptions, progressLock, totalProgress); } long chunkSizeActual = Math.min(finalParallelTransferOptions.getBlockSize(), newCount - (chunkNum * finalParallelTransferOptions.getBlockSize())); BlobRange chunkRange = new BlobRange( rangeReal.getOffset() + (chunkNum * finalParallelTransferOptions.getBlockSize()), chunkSizeActual); return this.downloadWithResponse(chunkRange, downloadRetryOptions, realConditions, rangeGetContentMd5, null) .subscribeOn(Schedulers.elastic()) .flatMap(response -> writeBodyToFile(response, file, chunkNum, finalParallelTransferOptions, progressLock, totalProgress)); }) .then(Mono.just(buildBlobPropertiesResponse(initialResponse))); }); } private int calculateNumBlocks(long dataSize, long blockLength) { int numBlocks = toIntExact(dataSize / blockLength); if (dataSize % blockLength != 0) { numBlocks++; } return numBlocks; } /* Download the first chunk. Construct a Mono which will emit the total count for calculating the number of chunks, access conditions containing the etag to lock on, and the response from downloading the first chunk. */ private Mono<Tuple3<Long, BlobRequestConditions, BlobDownloadAsyncResponse>> getSetupMono(BlobRange range, ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions, BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Context context) { long initialChunkSize = range.getCount() != null && range.getCount() < parallelTransferOptions.getBlockSize() ? range.getCount() : parallelTransferOptions.getBlockSize(); return this.downloadWithResponse(new BlobRange(range.getOffset(), initialChunkSize), downloadRetryOptions, requestConditions, rangeGetContentMd5, context) .subscribeOn(Schedulers.elastic()) .flatMap(response -> { /* Either the etag was set and it matches because the download succeeded, so this is a no-op, or there was no etag, so we set it here. ETag locking is vital to ensure we download one, consistent view of the file. */ BlobRequestConditions newConditions = setEtag(requestConditions, response.getDeserializedHeaders().getETag()); long totalLength = extractTotalBlobLength(response.getDeserializedHeaders().getContentRange()); /* If the user either didn't specify a count or they specified a count greater than the size of the remaining data, take the size of the remaining data. This is to prevent the case where the count is much much larger than the size of the blob and we could try to download at an invalid offset. */ long newCount = range.getCount() == null || range.getCount() > (totalLength - range.getOffset()) ? totalLength - range.getOffset() : range.getCount(); return Mono.zip(Mono.just(newCount), Mono.just(newConditions), Mono.just(response)); }) .onErrorResume(BlobStorageException.class, blobStorageException -> { /* In the case of an empty blob, we still want to report success and give back valid headers. Attempting a range download on an empty blob will return an InvalidRange error code and a Content-Range header of the format "bytes * /0". We need to double check that the total size is zero in the case that the customer has attempted an invalid range on a non-zero length blob. */ if (blobStorageException.getErrorCode() == BlobErrorCode.INVALID_RANGE && extractTotalBlobLength(blobStorageException.getResponse() .getHeaders().getValue("Content-Range")) == 0) { return this.downloadWithResponse(new BlobRange(0, 0L), downloadRetryOptions, requestConditions, rangeGetContentMd5, context) .subscribeOn(Schedulers.elastic()) .flatMap(response -> { /* Ensure the blob is still 0 length by checking our download was the full length. (200 is for full blob; 206 is partial). */ if (response.getStatusCode() != 200) { Mono.error(new IllegalStateException("Blob was modified mid download. It was " + "originally 0 bytes and is now larger.")); } return Mono.zip(Mono.just(0L), Mono.just(requestConditions), Mono.just(response)); }); } return Mono.error(blobStorageException); }); } private static BlobRequestConditions setEtag(BlobRequestConditions accessConditions, String etag) { return new BlobRequestConditions() .setIfModifiedSince( accessConditions.getIfModifiedSince()) .setIfUnmodifiedSince( accessConditions.getIfModifiedSince()) .setIfMatch(etag) .setIfNoneMatch( accessConditions.getIfNoneMatch()) .setLeaseId(accessConditions.getLeaseId()); } private static Mono<Void> writeBodyToFile(BlobDownloadAsyncResponse response, AsynchronousFileChannel file, long chunkNum, ParallelTransferOptions optionsReal, Lock progressLock, AtomicLong totalProgress) { Flux<ByteBuffer> data = response.getValue(); data = ProgressReporter.addParallelProgressReporting(data, optionsReal.getProgressReceiver(), progressLock, totalProgress); return FluxUtil.writeFile(data, file, chunkNum * optionsReal.getBlockSize()); } private static long extractTotalBlobLength(String contentRange) { return Long.parseLong(contentRange.split("/")[1]); } private void downloadToFileCleanup(AsynchronousFileChannel channel, String filePath, SignalType signalType) { try { channel.close(); if (!signalType.equals(SignalType.ON_COMPLETE)) { Files.delete(Paths.get(filePath)); logger.verbose("Downloading to file failed. Cleaning up resources."); } } catch (IOException e) { throw logger.logExceptionAsError(new UncheckedIOException(e)); } } /** * Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.delete} * * <p>For more information, see the * <a href="https: * * @return A reactive response signalling completion. */ public Mono<Void> delete() { try { return deleteWithResponse(null, null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.deleteWithResponse * * <p>For more information, see the * <a href="https: * * @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include} * will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being * deleted, you must pass null. * @param accessConditions {@link BlobRequestConditions} * @return A reactive response signalling completion. */ public Mono<Response<Void>> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions, BlobRequestConditions accessConditions) { try { return withContext(context -> deleteWithResponse(deleteBlobSnapshotOptions, accessConditions, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<Void>> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions, BlobRequestConditions accessConditions, Context context) { accessConditions = accessConditions == null ? new BlobRequestConditions() : accessConditions; return this.azureBlobStorage.blobs().deleteWithRestResponseAsync(null, null, snapshot, null, accessConditions.getLeaseId(), deleteBlobSnapshotOptions, accessConditions.getIfModifiedSince(), accessConditions.getIfUnmodifiedSince(), accessConditions.getIfMatch(), accessConditions.getIfNoneMatch(), null, context) .map(response -> new SimpleResponse<>(response, null)); } /** * Returns the blob's metadata and properties. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.getProperties} * * <p>For more information, see the * <a href="https: * * @return A reactive response containing the blob properties and metadata. */ public Mono<BlobProperties> getProperties() { try { return getPropertiesWithResponse(null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Returns the blob's metadata and properties. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.getPropertiesWithResponse * * <p>For more information, see the * <a href="https: * * @param accessConditions {@link BlobRequestConditions} * @return A reactive response containing the blob properties and metadata. */ public Mono<Response<BlobProperties>> getPropertiesWithResponse(BlobRequestConditions accessConditions) { try { return withContext(context -> getPropertiesWithResponse(accessConditions, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<BlobProperties>> getPropertiesWithResponse(BlobRequestConditions accessConditions, Context context) { accessConditions = accessConditions == null ? new BlobRequestConditions() : accessConditions; return this.azureBlobStorage.blobs().getPropertiesWithRestResponseAsync( null, null, snapshot, null, accessConditions.getLeaseId(), accessConditions.getIfModifiedSince(), accessConditions.getIfUnmodifiedSince(), accessConditions.getIfMatch(), accessConditions.getIfNoneMatch(), null, customerProvidedKey, context) .map(rb -> { BlobGetPropertiesHeaders hd = rb.getDeserializedHeaders(); BlobProperties properties = new BlobProperties(hd.getCreationTime(), hd.getLastModified(), hd.getETag(), hd.getContentLength() == null ? 0 : hd.getContentLength(), hd.getContentType(), hd.getContentMD5(), hd.getContentEncoding(), hd.getContentDisposition(), hd.getContentLanguage(), hd.getCacheControl(), hd.getBlobSequenceNumber(), hd.getBlobType(), hd.getLeaseStatus(), hd.getLeaseState(), hd.getLeaseDuration(), hd.getCopyId(), hd.getCopyStatus(), hd.getCopySource(), hd.getCopyProgress(), hd.getCopyCompletionTime(), hd.getCopyStatusDescription(), hd.isServerEncrypted(), hd.isIncrementalCopy(), hd.getDestinationSnapshot(), AccessTier.fromString(hd.getAccessTier()), hd.isAccessTierInferred(), ArchiveStatus.fromString(hd.getArchiveStatus()), hd.getEncryptionKeySha256(), hd.getAccessTierChangeTime(), hd.getMetadata(), hd.getBlobCommittedBlockCount()); return new SimpleResponse<>(rb, properties); }); } /** * Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In * order to preserve existing values, they must be passed alongside the header being changed. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.setHttpHeaders * * <p>For more information, see the * <a href="https: * * @param headers {@link BlobHttpHeaders} * @return A reactive response signalling completion. */ public Mono<Void> setHttpHeaders(BlobHttpHeaders headers) { try { return setHttpHeadersWithResponse(headers, null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In * order to preserve existing values, they must be passed alongside the header being changed. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.setHttpHeadersWithResponse * * <p>For more information, see the * <a href="https: * * @param headers {@link BlobHttpHeaders} * @param accessConditions {@link BlobRequestConditions} * @return A reactive response signalling completion. */ public Mono<Response<Void>> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions accessConditions) { try { return withContext(context -> setHttpHeadersWithResponse(headers, accessConditions, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<Void>> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions accessConditions, Context context) { accessConditions = accessConditions == null ? new BlobRequestConditions() : accessConditions; return this.azureBlobStorage.blobs().setHTTPHeadersWithRestResponseAsync( null, null, null, accessConditions.getLeaseId(), accessConditions.getIfModifiedSince(), accessConditions.getIfUnmodifiedSince(), accessConditions.getIfMatch(), accessConditions.getIfNoneMatch(), null, headers, context) .map(response -> new SimpleResponse<>(response, null)); } /** * Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values * must be preserved, they must be downloaded and included in the call to this method. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.setMetadata * * <p>For more information, see the * <a href="https: * * @param metadata Metadata to associate with the blob. * @return A reactive response signalling completion. */ public Mono<Void> setMetadata(Map<String, String> metadata) { try { return setMetadataWithResponse(metadata, null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values * must be preserved, they must be downloaded and included in the call to this method. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.setMetadataWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata Metadata to associate with the blob. * @param accessConditions {@link BlobRequestConditions} * @return A reactive response signalling completion. */ public Mono<Response<Void>> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions accessConditions) { try { return withContext(context -> setMetadataWithResponse(metadata, accessConditions, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<Void>> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions accessConditions, Context context) { accessConditions = accessConditions == null ? new BlobRequestConditions() : accessConditions; return this.azureBlobStorage.blobs().setMetadataWithRestResponseAsync( null, null, null, metadata, accessConditions.getLeaseId(), accessConditions.getIfModifiedSince(), accessConditions.getIfUnmodifiedSince(), accessConditions.getIfMatch(), accessConditions.getIfNoneMatch(), null, customerProvidedKey, context) .map(response -> new SimpleResponse<>(response, null)); } /** * Creates a read-only snapshot of the blob. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.createSnapshot} * * <p>For more information, see the * <a href="https: * * @return A response containing a {@link BlobAsyncClientBase} which is used to interact with the created snapshot, * use {@link */ public Mono<BlobAsyncClientBase> createSnapshot() { try { return createSnapshotWithResponse(null, null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a read-only snapshot of the blob. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.createSnapshotWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata Metadata to associate with the blob snapshot. * @param accessConditions {@link BlobRequestConditions} * @return A response containing a {@link BlobAsyncClientBase} which is used to interact with the created snapshot, * use {@link */ public Mono<Response<BlobAsyncClientBase>> createSnapshotWithResponse(Map<String, String> metadata, BlobRequestConditions accessConditions) { try { return withContext(context -> createSnapshotWithResponse(metadata, accessConditions, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<BlobAsyncClientBase>> createSnapshotWithResponse(Map<String, String> metadata, BlobRequestConditions accessConditions, Context context) { accessConditions = accessConditions == null ? new BlobRequestConditions() : accessConditions; return this.azureBlobStorage.blobs().createSnapshotWithRestResponseAsync( null, null, null, metadata, accessConditions.getIfModifiedSince(), accessConditions.getIfUnmodifiedSince(), accessConditions.getIfMatch(), accessConditions.getIfNoneMatch(), accessConditions.getLeaseId(), null, customerProvidedKey, context) .map(rb -> new SimpleResponse<>(rb, this.getSnapshotClient(rb.getDeserializedHeaders().getSnapshot()))); } /** * Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in * a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's * etag. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.setAccessTier * * <p>For more information, see the * <a href="https: * * @param tier The new tier for the blob. * @return A reactive response signalling completion. * @throws NullPointerException if {@code tier} is null. */ public Mono<Void> setAccessTier(AccessTier tier) { try { return setAccessTierWithResponse(tier, null, null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in * a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's * etag. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.setAccessTierWithResponse * * <p>For more information, see the * <a href="https: * * @param tier The new tier for the blob. * @param priority Optional priority to set for re-hydrating blobs. * @param leaseId The lease ID the active lease on the blob must match. * @return A reactive response signalling completion. * @throws NullPointerException if {@code tier} is null. */ public Mono<Response<Void>> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId) { try { return withContext(context -> setTierWithResponse(tier, priority, leaseId, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<Void>> setTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId, Context context) { StorageImplUtils.assertNotNull("tier", tier); return this.azureBlobStorage.blobs().setTierWithRestResponseAsync( null, null, tier, null, priority, null, leaseId, context) .map(response -> new SimpleResponse<>(response, null)); } /** * Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.undelete} * * <p>For more information, see the * <a href="https: * * @return A reactive response signalling completion. */ public Mono<Void> undelete() { try { return undeleteWithResponse().flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.undeleteWithResponse} * * <p>For more information, see the * <a href="https: * * @return A reactive response signalling completion. */ public Mono<Response<Void>> undeleteWithResponse() { try { return withContext(this::undeleteWithResponse); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<Void>> undeleteWithResponse(Context context) { return this.azureBlobStorage.blobs().undeleteWithRestResponseAsync(null, null, context).map(response -> new SimpleResponse<>(response, null)); } /** * Returns the sku name and account kind for the account. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.getAccountInfo} * * <p>For more information, see the * <a href="https: * * @return a reactor response containing the sku name and account kind. */ public Mono<StorageAccountInfo> getAccountInfo() { try { return getAccountInfoWithResponse().flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Returns the sku name and account kind for the account. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.getAccountInfoWithResponse} * * <p>For more information, see the * <a href="https: * * @return a reactor response containing the sku name and account kind. */ public Mono<Response<StorageAccountInfo>> getAccountInfoWithResponse() { try { return withContext(this::getAccountInfoWithResponse); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<StorageAccountInfo>> getAccountInfoWithResponse(Context context) { return this.azureBlobStorage.blobs().getAccountInfoWithRestResponseAsync(null, null, context) .map(rb -> { BlobGetAccountInfoHeaders hd = rb.getDeserializedHeaders(); return new SimpleResponse<>(rb, new StorageAccountInfo(hd.getSkuName(), hd.getAccountKind())); }); } }
class BlobAsyncClientBase { private static final int BLOB_DEFAULT_DOWNLOAD_BLOCK_SIZE = 4 * Constants.MB; private static final int BLOB_MAX_DOWNLOAD_BLOCK_SIZE = 100 * Constants.MB; private final ClientLogger logger = new ClientLogger(BlobAsyncClientBase.class); protected final AzureBlobStorageImpl azureBlobStorage; private final String snapshot; private final CpkInfo customerProvidedKey; protected final String accountName; protected final String containerName; protected final String blobName; protected final BlobServiceVersion serviceVersion; /** * Package-private constructor for use by {@link SpecializedBlobClientBuilder}. * * @param pipeline The pipeline used to send and receive service requests. * @param url The endpoint where to send service requests. * @param serviceVersion The version of the service to receive requests. * @param accountName The storage account name. * @param containerName The container name. * @param blobName The blob name. * @param snapshot The snapshot identifier for the blob, pass {@code null} to interact with the blob directly. * @param customerProvidedKey Customer provided key used during encryption of the blob's data on the server, pass * {@code null} to allow the service to use its own encryption. */ protected BlobAsyncClientBase(HttpPipeline pipeline, String url, BlobServiceVersion serviceVersion, String accountName, String containerName, String blobName, String snapshot, CpkInfo customerProvidedKey) { this.azureBlobStorage = new AzureBlobStorageBuilder() .pipeline(pipeline) .url(url) .version(serviceVersion.getVersion()) .build(); this.serviceVersion = serviceVersion; this.accountName = accountName; this.containerName = containerName; this.blobName = Utility.urlEncode(Utility.urlDecode(blobName)); this.snapshot = snapshot; this.customerProvidedKey = customerProvidedKey; } /** * Creates a new {@link BlobAsyncClientBase} linked to the {@code snapshot} of this blob resource. * * @param snapshot the identifier for a specific snapshot of this blob * @return a {@link BlobAsyncClientBase} used to interact with the specific snapshot. */ public BlobAsyncClientBase getSnapshotClient(String snapshot) { return new BlobAsyncClientBase(getHttpPipeline(), getBlobUrl(), getServiceVersion(), getAccountName(), getContainerName(), getBlobName(), snapshot, getCustomerProvidedKey()); } /** * Gets the URL of the blob represented by this client. * * @return the URL. */ public String getBlobUrl() { if (!this.isSnapshot()) { return azureBlobStorage.getUrl(); } else { if (azureBlobStorage.getUrl().contains("?")) { return String.format("%s&snapshot=%s", azureBlobStorage.getUrl(), snapshot); } else { return String.format("%s?snapshot=%s", azureBlobStorage.getUrl(), snapshot); } } } /** * Get the container name. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.getContainerName} * * @return The name of the container. */ public final String getContainerName() { return containerName; } /** * Decodes and gets the blob name. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.getBlobName} * * @return The decoded name of the blob. */ public final String getBlobName() { return (blobName == null) ? null : Utility.urlDecode(blobName); } /** * Gets the {@link HttpPipeline} powering this client. * * @return The pipeline. */ public HttpPipeline getHttpPipeline() { return azureBlobStorage.getHttpPipeline(); } /** * Gets the {@link CpkInfo} used to encrypt this blob's content on the server. * * @return the customer provided key used for encryption. */ public CpkInfo getCustomerProvidedKey() { return customerProvidedKey; } /** * Get associated account name. * * @return account name associated with this storage resource. */ public String getAccountName() { return accountName; } /** * Gets the service version the client is using. * * @return the service version the client is using. */ public BlobServiceVersion getServiceVersion() { return serviceVersion; } /** * Gets the snapshotId for a blob resource * * @return A string that represents the snapshotId of the snapshot blob */ public String getSnapshotId() { return this.snapshot; } /** * Determines if a blob is a snapshot * * @return A boolean that indicates if a blob is a snapshot */ public boolean isSnapshot() { return this.snapshot != null; } /** * Determines if the blob this client represents exists in the cloud. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.exists} * * @return true if the blob exists, false if it doesn't */ public Mono<Boolean> exists() { try { return existsWithResponse().flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Determines if the blob this client represents exists in the cloud. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.existsWithResponse} * * @return true if the blob exists, false if it doesn't */ public Mono<Response<Boolean>> existsWithResponse() { try { return withContext(this::existsWithResponse); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<Boolean>> existsWithResponse(Context context) { return this.getPropertiesWithResponse(null, context) .map(cp -> (Response<Boolean>) new SimpleResponse<>(cp, true)) .onErrorResume(t -> t instanceof BlobStorageException && ((BlobStorageException) t).getStatusCode() == 404, t -> { HttpResponse response = ((BlobStorageException) t).getResponse(); return Mono.just(new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), false)); }); } /** * Copies the data at the source URL to a blob. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.beginCopy * * <p>For more information, see the * <a href="https: * * @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second * is used. * @return A {@link PollerFlux} that polls the blob copy operation until it has completed, has failed, or has been * cancelled. */ public PollerFlux<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) { return beginCopy(sourceUrl, null, null, null, null, null, pollInterval); } /** * Copies the data at the source URL to a blob. * * <p><strong>Starting a copy operation</strong></p> * Starting a copy operation and polling on the responses. * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.beginCopy * * <p><strong>Cancelling a copy operation</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.beginCopyFromUrlCancel * * <p>For more information, see the * <a href="https: * * @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * @param metadata Metadata to associate with the destination blob. * @param tier {@link AccessTier} for the destination blob. * @param priority {@link RehydratePriority} for rehydrating the blob. * @param sourceModifiedAccessConditions {@link RequestConditions} against the source. Standard HTTP * Access conditions related to the modification of data. ETag and LastModifiedTime are used to construct * conditions related to when the blob was changed relative to the given request. The request will fail if the * specified condition is not satisfied. * @param destAccessConditions {@link BlobRequestConditions} against the destination. * @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second * is used. * @return A {@link PollerFlux} that polls the blob copy operation until it has completed, has failed, or has been * cancelled. */ public PollerFlux<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier, RehydratePriority priority, RequestConditions sourceModifiedAccessConditions, BlobRequestConditions destAccessConditions, Duration pollInterval) { final Duration interval = pollInterval != null ? pollInterval : Duration.ofSeconds(1); final RequestConditions sourceModifiedCondition = sourceModifiedAccessConditions == null ? new RequestConditions() : sourceModifiedAccessConditions; final BlobRequestConditions destinationAccessConditions = destAccessConditions == null ? new BlobRequestConditions() : destAccessConditions; final RequestConditions sourceConditions = new RequestConditions() .setIfModifiedSince(sourceModifiedCondition.getIfModifiedSince()) .setIfUnmodifiedSince(sourceModifiedCondition.getIfUnmodifiedSince()) .setIfMatch(sourceModifiedCondition.getIfMatch()) .setIfNoneMatch(sourceModifiedCondition.getIfNoneMatch()); return new PollerFlux<>(interval, (pollingContext) -> { try { return onStart(sourceUrl, metadata, tier, priority, sourceConditions, destinationAccessConditions); } catch (RuntimeException ex) { return monoError(logger, ex); } }, (pollingContext) -> { try { return onPoll(pollingContext.getLatestResponse()); } catch (RuntimeException ex) { return monoError(logger, ex); } }, (pollingContext, firstResponse) -> { if (firstResponse == null || firstResponse.getValue() == null) { return Mono.error(logger.logExceptionAsError( new IllegalArgumentException("Cannot cancel a poll response that never started."))); } final String copyIdentifier = firstResponse.getValue().getCopyId(); if (!ImplUtils.isNullOrEmpty(copyIdentifier)) { logger.info("Cancelling copy operation for copy id: {}", copyIdentifier); return abortCopyFromUrl(copyIdentifier).thenReturn(firstResponse.getValue()); } return Mono.empty(); }, (pollingContext) -> Mono.empty()); } private Mono<BlobCopyInfo> onStart(String sourceUrl, Map<String, String> metadata, AccessTier tier, RehydratePriority priority, RequestConditions sourceModifiedAccessConditions, BlobRequestConditions destinationAccessConditions) { URL url; try { url = new URL(sourceUrl); } catch (MalformedURLException ex) { throw logger.logExceptionAsError(new IllegalArgumentException("'sourceUrl' is not a valid url.", ex)); } return withContext( context -> azureBlobStorage.blobs().startCopyFromURLWithRestResponseAsync(null, null, url, null, metadata, tier, priority, sourceModifiedAccessConditions.getIfModifiedSince(), sourceModifiedAccessConditions.getIfUnmodifiedSince(), sourceModifiedAccessConditions.getIfMatch(), sourceModifiedAccessConditions.getIfNoneMatch(), destinationAccessConditions.getIfModifiedSince(), destinationAccessConditions.getIfUnmodifiedSince(), destinationAccessConditions.getIfMatch(), destinationAccessConditions.getIfNoneMatch(), destinationAccessConditions.getLeaseId(), null, context)) .map(response -> { final BlobStartCopyFromURLHeaders headers = response.getDeserializedHeaders(); return new BlobCopyInfo(sourceUrl, headers.getCopyId(), headers.getCopyStatus(), headers.getETag(), headers.getLastModified(), headers.getErrorCode()); }); } private Mono<PollResponse<BlobCopyInfo>> onPoll(PollResponse<BlobCopyInfo> pollResponse) { if (pollResponse.getStatus() == LongRunningOperationStatus.SUCCESSFULLY_COMPLETED || pollResponse.getStatus() == LongRunningOperationStatus.FAILED) { return Mono.just(pollResponse); } final BlobCopyInfo lastInfo = pollResponse.getValue(); if (lastInfo == null) { logger.warning("BlobCopyInfo does not exist. Activation operation failed."); return Mono.just(new PollResponse<>( LongRunningOperationStatus.fromString("COPY_START_FAILED", true), null)); } return getProperties().map(response -> { final CopyStatusType status = response.getCopyStatus(); final BlobCopyInfo result = new BlobCopyInfo(response.getCopySource(), response.getCopyId(), status, response.getETag(), response.getCopyCompletionTime(), response.getCopyStatusDescription()); LongRunningOperationStatus operationStatus; switch (status) { case SUCCESS: operationStatus = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; break; case FAILED: operationStatus = LongRunningOperationStatus.FAILED; break; case ABORTED: operationStatus = LongRunningOperationStatus.USER_CANCELLED; break; case PENDING: operationStatus = LongRunningOperationStatus.IN_PROGRESS; break; default: throw logger.logExceptionAsError(new IllegalArgumentException( "CopyStatusType is not supported. Status: " + status)); } return new PollResponse<>(operationStatus, result); }).onErrorReturn( new PollResponse<>(LongRunningOperationStatus.fromString("POLLING_FAILED", true), lastInfo)); } /** * Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.abortCopyFromUrl * * <p>For more information, see the * <a href="https: * * @see * @see * @see * @param copyId The id of the copy operation to abort. * @return A reactive response signalling completion. */ public Mono<Void> abortCopyFromUrl(String copyId) { try { return abortCopyFromUrlWithResponse(copyId, null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.abortCopyFromUrlWithResponse * * <p>For more information, see the * <a href="https: * * @see * @see * @see * @param copyId The id of the copy operation to abort. * @param leaseId The lease ID the active lease on the blob must match. * @return A reactive response signalling completion. */ public Mono<Response<Void>> abortCopyFromUrlWithResponse(String copyId, String leaseId) { try { return withContext(context -> abortCopyFromUrlWithResponse(copyId, leaseId, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<Void>> abortCopyFromUrlWithResponse(String copyId, String leaseId, Context context) { return this.azureBlobStorage.blobs().abortCopyFromURLWithRestResponseAsync( null, null, copyId, null, leaseId, null, context) .map(response -> new SimpleResponse<>(response, null)); } /** * Copies the data at the source URL to a blob and waits for the copy to complete before returning a response. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.copyFromUrl * * <p>For more information, see the * <a href="https: * * @param copySource The source URL to copy from. * @return A reactive response containing the copy ID for the long running operation. */ public Mono<String> copyFromUrl(String copySource) { try { return copyFromUrlWithResponse(copySource, null, null, null, null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Copies the data at the source URL to a blob and waits for the copy to complete before returning a response. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.copyFromUrlWithResponse * * <p>For more information, see the * <a href="https: * * @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * @param metadata Metadata to associate with the destination blob. * @param tier {@link AccessTier} for the destination blob. * @param sourceModifiedAccessConditions {@link RequestConditions} against the source. Standard HTTP Access * conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions * related to when the blob was changed relative to the given request. The request will fail if the specified * condition is not satisfied. * @param destAccessConditions {@link BlobRequestConditions} against the destination. * @return A reactive response containing the copy ID for the long running operation. */ public Mono<Response<String>> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier, RequestConditions sourceModifiedAccessConditions, BlobRequestConditions destAccessConditions) { try { return withContext(context -> copyFromUrlWithResponse(copySource, metadata, tier, sourceModifiedAccessConditions, destAccessConditions, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<String>> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier, RequestConditions sourceModifiedAccessConditions, BlobRequestConditions destAccessConditions, Context context) { sourceModifiedAccessConditions = sourceModifiedAccessConditions == null ? new RequestConditions() : sourceModifiedAccessConditions; destAccessConditions = destAccessConditions == null ? new BlobRequestConditions() : destAccessConditions; URL url; try { url = new URL(copySource); } catch (MalformedURLException ex) { throw logger.logExceptionAsError(new IllegalArgumentException("'copySource' is not a valid url.")); } return this.azureBlobStorage.blobs().copyFromURLWithRestResponseAsync( null, null, url, null, metadata, tier, sourceModifiedAccessConditions.getIfModifiedSince(), sourceModifiedAccessConditions.getIfUnmodifiedSince(), sourceModifiedAccessConditions.getIfMatch(), sourceModifiedAccessConditions.getIfNoneMatch(), destAccessConditions.getIfModifiedSince(), destAccessConditions.getIfUnmodifiedSince(), destAccessConditions.getIfMatch(), destAccessConditions.getIfNoneMatch(), destAccessConditions.getLeaseId(), null, context) .map(rb -> new SimpleResponse<>(rb, rb.getDeserializedHeaders().getCopyId())); } /** * Reads the entire blob. Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or * {@link AppendBlobClient}. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.download} * * <p>For more information, see the * <a href="https: * * @return A reactive response containing the blob data. */ public Flux<ByteBuffer> download() { try { return downloadWithResponse(null, null, null, false) .flatMapMany(BlobDownloadAsyncResponse::getValue); } catch (RuntimeException ex) { return fluxError(logger, ex); } } /** * Reads a range of bytes from a blob. Uploading data must be done from the {@link BlockBlobClient}, {@link * PageBlobClient}, or {@link AppendBlobClient}. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.downloadWithResponse * * <p>For more information, see the * <a href="https: * * @param range {@link BlobRange} * @param options {@link DownloadRetryOptions} * @param accessConditions {@link BlobRequestConditions} * @param rangeGetContentMD5 Whether the contentMD5 for the specified blob range should be returned. * @return A reactive response containing the blob data. */ public Mono<BlobDownloadAsyncResponse> downloadWithResponse(BlobRange range, DownloadRetryOptions options, BlobRequestConditions accessConditions, boolean rangeGetContentMD5) { try { return withContext(context -> downloadWithResponse(range, options, accessConditions, rangeGetContentMD5, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<BlobDownloadAsyncResponse> downloadWithResponse(BlobRange range, DownloadRetryOptions options, BlobRequestConditions accessConditions, boolean rangeGetContentMD5, Context context) { return downloadHelper(range, options, accessConditions, rangeGetContentMD5, context) .map(response -> new BlobDownloadAsyncResponse(response.getRequest(), response.getStatusCode(), response.getHeaders(), response.getValue(), response.getDeserializedHeaders())); } private Mono<ReliableDownload> downloadHelper(BlobRange range, DownloadRetryOptions options, BlobRequestConditions accessConditions, boolean rangeGetContentMd5, Context context) { range = range == null ? new BlobRange(0) : range; Boolean getMD5 = rangeGetContentMd5 ? rangeGetContentMd5 : null; accessConditions = accessConditions == null ? new BlobRequestConditions() : accessConditions; HttpGetterInfo info = new HttpGetterInfo() .setOffset(range.getOffset()) .setCount(range.getCount()) .setETag(accessConditions.getIfMatch()); return azureBlobStorage.blobs().downloadWithRestResponseAsync(null, null, snapshot, null, range.toHeaderValue(), accessConditions.getLeaseId(), getMD5, null, accessConditions.getIfModifiedSince(), accessConditions.getIfUnmodifiedSince(), accessConditions.getIfMatch(), accessConditions.getIfNoneMatch(), null, customerProvidedKey, context) .map(response -> { info.setETag(response.getDeserializedHeaders().getETag()); return new ReliableDownload(response, options, info, updatedInfo -> downloadHelper(new BlobRange(updatedInfo.getOffset(), updatedInfo.getCount()), options, new BlobRequestConditions().setIfMatch(info.getETag()), false, context)); }); } /** * Downloads the entire blob into a file specified by the path. * * <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException} * will be thrown.</p> * * <p>Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link * AppendBlobClient}.</p> * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.downloadToFile * * <p>For more information, see the * <a href="https: * * @param filePath A non-null {@link OutputStream} instance where the downloaded data will be written. * @return A reactive response containing the blob properties and metadata. */ public Mono<BlobProperties> downloadToFile(String filePath) { try { return downloadToFileWithResponse(filePath, null, null, null, null, false).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Downloads the entire blob into a file specified by the path. * * <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException} * will be thrown.</p> * * <p>Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link * AppendBlobClient}.</p> * * <p>This method makes an extra HTTP call to get the length of the blob in the beginning. To avoid this extra * call, provide the {@link BlobRange} parameter.</p> * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.downloadToFileWithResponse * * <p>For more information, see the * <a href="https: * * @param filePath A non-null {@link OutputStream} instance where the downloaded data will be written. * @param range {@link BlobRange} * @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel * transfers parameter is ignored. * @param options {@link DownloadRetryOptions} * @param requestConditions {@link BlobRequestConditions} * @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned. * @return A reactive response containing the blob properties and metadata. * @throws IllegalArgumentException If {@code blockSize} is less than 0 or greater than 100MB. * @throws UncheckedIOException If an I/O error occurs. */ public Mono<Response<BlobProperties>> downloadToFileWithResponse(String filePath, BlobRange range, ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean rangeGetContentMd5) { try { return withContext(context -> downloadToFileWithResponse(filePath, range, parallelTransferOptions, options, requestConditions, rangeGetContentMd5, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<BlobProperties>> downloadToFileWithResponse(String filePath, BlobRange range, ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions, BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Context context) { BlobRange finalRange = range == null ? new BlobRange(0) : range; final ParallelTransferOptions finalParallelTransferOptions = ModelHelper.populateAndApplyDefaults(parallelTransferOptions); BlobRequestConditions finalConditions = requestConditions == null ? new BlobRequestConditions() : requestConditions; AsynchronousFileChannel channel = downloadToFileResourceSupplier(filePath); return Mono.just(channel) .flatMap(c -> this.downloadToFileImpl(c, finalRange, finalParallelTransferOptions, downloadRetryOptions, finalConditions, rangeGetContentMd5, context)) .doFinally(signalType -> this.downloadToFileCleanup(channel, filePath, signalType)); } private AsynchronousFileChannel downloadToFileResourceSupplier(String filePath) { try { return AsynchronousFileChannel.open(Paths.get(filePath), StandardOpenOption.READ, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW); } catch (IOException e) { throw logger.logExceptionAsError(new UncheckedIOException(e)); } } private Mono<Response<BlobProperties>> downloadToFileImpl(AsynchronousFileChannel file, BlobRange finalRange, ParallelTransferOptions finalParallelTransferOptions, DownloadRetryOptions downloadRetryOptions, BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Context context) { Lock progressLock = new ReentrantLock(); AtomicLong totalProgress = new AtomicLong(0); /* * Downloads the first chunk and gets the size of the data and etag if not specified by the user. */ return getSetupMono(finalRange, finalParallelTransferOptions, downloadRetryOptions, requestConditions, rangeGetContentMd5, context) .flatMap(setupTuple3 -> { long newCount = setupTuple3.getT1(); BlobRequestConditions finalConditions = setupTuple3.getT2(); int numChunks = calculateNumBlocks(newCount, finalParallelTransferOptions.getBlockSize()); numChunks = numChunks == 0 ? 1 : numChunks; BlobDownloadAsyncResponse initialResponse = setupTuple3.getT3(); return Flux.range(0, numChunks) .flatMap(chunkNum -> { if (chunkNum == 0) { return writeBodyToFile(initialResponse, file, 0, finalParallelTransferOptions, progressLock, totalProgress); } long chunkSizeActual = Math.min(finalParallelTransferOptions.getBlockSize(), newCount - (chunkNum.longValue() * finalParallelTransferOptions.getBlockSize().longValue())); BlobRange chunkRange = new BlobRange( finalRange.getOffset() + (chunkNum.longValue() * finalParallelTransferOptions.getBlockSize().longValue()), chunkSizeActual); return this.downloadWithResponse(chunkRange, downloadRetryOptions, finalConditions, rangeGetContentMd5, null) .subscribeOn(Schedulers.elastic()) .flatMap(response -> writeBodyToFile(response, file, chunkNum, finalParallelTransferOptions, progressLock, totalProgress)); }) .then(Mono.just(buildBlobPropertiesResponse(initialResponse))); }); } private int calculateNumBlocks(long dataSize, long blockLength) { int numBlocks = toIntExact(dataSize / blockLength); if (dataSize % blockLength != 0) { numBlocks++; } return numBlocks; } /* Download the first chunk. Construct a Mono which will emit the total count for calculating the number of chunks, access conditions containing the etag to lock on, and the response from downloading the first chunk. */ private Mono<Tuple3<Long, BlobRequestConditions, BlobDownloadAsyncResponse>> getSetupMono(BlobRange range, ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions, BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Context context) { long initialChunkSize = range.getCount() != null && range.getCount() < parallelTransferOptions.getBlockSize() ? range.getCount() : parallelTransferOptions.getBlockSize(); return this.downloadWithResponse(new BlobRange(range.getOffset(), initialChunkSize), downloadRetryOptions, requestConditions, rangeGetContentMd5, context) .subscribeOn(Schedulers.elastic()) .flatMap(response -> { /* Either the etag was set and it matches because the download succeeded, so this is a no-op, or there was no etag, so we set it here. ETag locking is vital to ensure we download one, consistent view of the file. */ BlobRequestConditions newConditions = setEtag(requestConditions, response.getDeserializedHeaders().getETag()); long totalLength = extractTotalBlobLength(response.getDeserializedHeaders().getContentRange()); /* If the user either didn't specify a count or they specified a count greater than the size of the remaining data, take the size of the remaining data. This is to prevent the case where the count is much much larger than the size of the blob and we could try to download at an invalid offset. */ long newCount = range.getCount() == null || range.getCount() > (totalLength - range.getOffset()) ? totalLength - range.getOffset() : range.getCount(); return Mono.zip(Mono.just(newCount), Mono.just(newConditions), Mono.just(response)); }) .onErrorResume(BlobStorageException.class, blobStorageException -> { /* In the case of an empty blob, we still want to report success and give back valid headers. Attempting a range download on an empty blob will return an InvalidRange error code and a Content-Range header of the format "bytes * /0". We need to double check that the total size is zero in the case that the customer has attempted an invalid range on a non-zero length blob. */ if (blobStorageException.getErrorCode() == BlobErrorCode.INVALID_RANGE && extractTotalBlobLength(blobStorageException.getResponse() .getHeaders().getValue("Content-Range")) == 0) { return this.downloadWithResponse(new BlobRange(0, 0L), downloadRetryOptions, requestConditions, rangeGetContentMd5, context) .subscribeOn(Schedulers.elastic()) .flatMap(response -> { /* Ensure the blob is still 0 length by checking our download was the full length. (200 is for full blob; 206 is partial). */ if (response.getStatusCode() != 200) { Mono.error(new IllegalStateException("Blob was modified mid download. It was " + "originally 0 bytes and is now larger.")); } return Mono.zip(Mono.just(0L), Mono.just(requestConditions), Mono.just(response)); }); } return Mono.error(blobStorageException); }); } private static BlobRequestConditions setEtag(BlobRequestConditions accessConditions, String etag) { return new BlobRequestConditions() .setIfModifiedSince( accessConditions.getIfModifiedSince()) .setIfUnmodifiedSince( accessConditions.getIfModifiedSince()) .setIfMatch(etag) .setIfNoneMatch( accessConditions.getIfNoneMatch()) .setLeaseId(accessConditions.getLeaseId()); } private static Mono<Void> writeBodyToFile(BlobDownloadAsyncResponse response, AsynchronousFileChannel file, long chunkNum, ParallelTransferOptions finalParallelTransferOptions, Lock progressLock, AtomicLong totalProgress) { Flux<ByteBuffer> data = response.getValue(); data = ProgressReporter.addParallelProgressReporting(data, finalParallelTransferOptions.getProgressReceiver(), progressLock, totalProgress); return FluxUtil.writeFile(data, file, chunkNum * finalParallelTransferOptions.getBlockSize()); } private static long extractTotalBlobLength(String contentRange) { return Long.parseLong(contentRange.split("/")[1]); } private void downloadToFileCleanup(AsynchronousFileChannel channel, String filePath, SignalType signalType) { try { channel.close(); if (!signalType.equals(SignalType.ON_COMPLETE)) { Files.delete(Paths.get(filePath)); logger.verbose("Downloading to file failed. Cleaning up resources."); } } catch (IOException e) { throw logger.logExceptionAsError(new UncheckedIOException(e)); } } /** * Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.delete} * * <p>For more information, see the * <a href="https: * * @return A reactive response signalling completion. */ public Mono<Void> delete() { try { return deleteWithResponse(null, null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.deleteWithResponse * * <p>For more information, see the * <a href="https: * * @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include} * will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being * deleted, you must pass null. * @param accessConditions {@link BlobRequestConditions} * @return A reactive response signalling completion. */ public Mono<Response<Void>> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions, BlobRequestConditions accessConditions) { try { return withContext(context -> deleteWithResponse(deleteBlobSnapshotOptions, accessConditions, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<Void>> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions, BlobRequestConditions accessConditions, Context context) { accessConditions = accessConditions == null ? new BlobRequestConditions() : accessConditions; return this.azureBlobStorage.blobs().deleteWithRestResponseAsync(null, null, snapshot, null, accessConditions.getLeaseId(), deleteBlobSnapshotOptions, accessConditions.getIfModifiedSince(), accessConditions.getIfUnmodifiedSince(), accessConditions.getIfMatch(), accessConditions.getIfNoneMatch(), null, context) .map(response -> new SimpleResponse<>(response, null)); } /** * Returns the blob's metadata and properties. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.getProperties} * * <p>For more information, see the * <a href="https: * * @return A reactive response containing the blob properties and metadata. */ public Mono<BlobProperties> getProperties() { try { return getPropertiesWithResponse(null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Returns the blob's metadata and properties. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.getPropertiesWithResponse * * <p>For more information, see the * <a href="https: * * @param accessConditions {@link BlobRequestConditions} * @return A reactive response containing the blob properties and metadata. */ public Mono<Response<BlobProperties>> getPropertiesWithResponse(BlobRequestConditions accessConditions) { try { return withContext(context -> getPropertiesWithResponse(accessConditions, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<BlobProperties>> getPropertiesWithResponse(BlobRequestConditions accessConditions, Context context) { accessConditions = accessConditions == null ? new BlobRequestConditions() : accessConditions; return this.azureBlobStorage.blobs().getPropertiesWithRestResponseAsync( null, null, snapshot, null, accessConditions.getLeaseId(), accessConditions.getIfModifiedSince(), accessConditions.getIfUnmodifiedSince(), accessConditions.getIfMatch(), accessConditions.getIfNoneMatch(), null, customerProvidedKey, context) .map(rb -> { BlobGetPropertiesHeaders hd = rb.getDeserializedHeaders(); BlobProperties properties = new BlobProperties(hd.getCreationTime(), hd.getLastModified(), hd.getETag(), hd.getContentLength() == null ? 0 : hd.getContentLength(), hd.getContentType(), hd.getContentMD5(), hd.getContentEncoding(), hd.getContentDisposition(), hd.getContentLanguage(), hd.getCacheControl(), hd.getBlobSequenceNumber(), hd.getBlobType(), hd.getLeaseStatus(), hd.getLeaseState(), hd.getLeaseDuration(), hd.getCopyId(), hd.getCopyStatus(), hd.getCopySource(), hd.getCopyProgress(), hd.getCopyCompletionTime(), hd.getCopyStatusDescription(), hd.isServerEncrypted(), hd.isIncrementalCopy(), hd.getDestinationSnapshot(), AccessTier.fromString(hd.getAccessTier()), hd.isAccessTierInferred(), ArchiveStatus.fromString(hd.getArchiveStatus()), hd.getEncryptionKeySha256(), hd.getAccessTierChangeTime(), hd.getMetadata(), hd.getBlobCommittedBlockCount()); return new SimpleResponse<>(rb, properties); }); } /** * Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In * order to preserve existing values, they must be passed alongside the header being changed. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.setHttpHeaders * * <p>For more information, see the * <a href="https: * * @param headers {@link BlobHttpHeaders} * @return A reactive response signalling completion. */ public Mono<Void> setHttpHeaders(BlobHttpHeaders headers) { try { return setHttpHeadersWithResponse(headers, null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In * order to preserve existing values, they must be passed alongside the header being changed. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.setHttpHeadersWithResponse * * <p>For more information, see the * <a href="https: * * @param headers {@link BlobHttpHeaders} * @param accessConditions {@link BlobRequestConditions} * @return A reactive response signalling completion. */ public Mono<Response<Void>> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions accessConditions) { try { return withContext(context -> setHttpHeadersWithResponse(headers, accessConditions, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<Void>> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions accessConditions, Context context) { accessConditions = accessConditions == null ? new BlobRequestConditions() : accessConditions; return this.azureBlobStorage.blobs().setHTTPHeadersWithRestResponseAsync( null, null, null, accessConditions.getLeaseId(), accessConditions.getIfModifiedSince(), accessConditions.getIfUnmodifiedSince(), accessConditions.getIfMatch(), accessConditions.getIfNoneMatch(), null, headers, context) .map(response -> new SimpleResponse<>(response, null)); } /** * Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values * must be preserved, they must be downloaded and included in the call to this method. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.setMetadata * * <p>For more information, see the * <a href="https: * * @param metadata Metadata to associate with the blob. * @return A reactive response signalling completion. */ public Mono<Void> setMetadata(Map<String, String> metadata) { try { return setMetadataWithResponse(metadata, null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values * must be preserved, they must be downloaded and included in the call to this method. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.setMetadataWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata Metadata to associate with the blob. * @param accessConditions {@link BlobRequestConditions} * @return A reactive response signalling completion. */ public Mono<Response<Void>> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions accessConditions) { try { return withContext(context -> setMetadataWithResponse(metadata, accessConditions, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<Void>> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions accessConditions, Context context) { accessConditions = accessConditions == null ? new BlobRequestConditions() : accessConditions; return this.azureBlobStorage.blobs().setMetadataWithRestResponseAsync( null, null, null, metadata, accessConditions.getLeaseId(), accessConditions.getIfModifiedSince(), accessConditions.getIfUnmodifiedSince(), accessConditions.getIfMatch(), accessConditions.getIfNoneMatch(), null, customerProvidedKey, context) .map(response -> new SimpleResponse<>(response, null)); } /** * Creates a read-only snapshot of the blob. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.createSnapshot} * * <p>For more information, see the * <a href="https: * * @return A response containing a {@link BlobAsyncClientBase} which is used to interact with the created snapshot, * use {@link */ public Mono<BlobAsyncClientBase> createSnapshot() { try { return createSnapshotWithResponse(null, null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a read-only snapshot of the blob. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.createSnapshotWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata Metadata to associate with the blob snapshot. * @param accessConditions {@link BlobRequestConditions} * @return A response containing a {@link BlobAsyncClientBase} which is used to interact with the created snapshot, * use {@link */ public Mono<Response<BlobAsyncClientBase>> createSnapshotWithResponse(Map<String, String> metadata, BlobRequestConditions accessConditions) { try { return withContext(context -> createSnapshotWithResponse(metadata, accessConditions, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<BlobAsyncClientBase>> createSnapshotWithResponse(Map<String, String> metadata, BlobRequestConditions accessConditions, Context context) { accessConditions = accessConditions == null ? new BlobRequestConditions() : accessConditions; return this.azureBlobStorage.blobs().createSnapshotWithRestResponseAsync( null, null, null, metadata, accessConditions.getIfModifiedSince(), accessConditions.getIfUnmodifiedSince(), accessConditions.getIfMatch(), accessConditions.getIfNoneMatch(), accessConditions.getLeaseId(), null, customerProvidedKey, context) .map(rb -> new SimpleResponse<>(rb, this.getSnapshotClient(rb.getDeserializedHeaders().getSnapshot()))); } /** * Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in * a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's * etag. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.setAccessTier * * <p>For more information, see the * <a href="https: * * @param tier The new tier for the blob. * @return A reactive response signalling completion. * @throws NullPointerException if {@code tier} is null. */ public Mono<Void> setAccessTier(AccessTier tier) { try { return setAccessTierWithResponse(tier, null, null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in * a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's * etag. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.setAccessTierWithResponse * * <p>For more information, see the * <a href="https: * * @param tier The new tier for the blob. * @param priority Optional priority to set for re-hydrating blobs. * @param leaseId The lease ID the active lease on the blob must match. * @return A reactive response signalling completion. * @throws NullPointerException if {@code tier} is null. */ public Mono<Response<Void>> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId) { try { return withContext(context -> setTierWithResponse(tier, priority, leaseId, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<Void>> setTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId, Context context) { StorageImplUtils.assertNotNull("tier", tier); return this.azureBlobStorage.blobs().setTierWithRestResponseAsync( null, null, tier, null, priority, null, leaseId, context) .map(response -> new SimpleResponse<>(response, null)); } /** * Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.undelete} * * <p>For more information, see the * <a href="https: * * @return A reactive response signalling completion. */ public Mono<Void> undelete() { try { return undeleteWithResponse().flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.undeleteWithResponse} * * <p>For more information, see the * <a href="https: * * @return A reactive response signalling completion. */ public Mono<Response<Void>> undeleteWithResponse() { try { return withContext(this::undeleteWithResponse); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<Void>> undeleteWithResponse(Context context) { return this.azureBlobStorage.blobs().undeleteWithRestResponseAsync(null, null, context).map(response -> new SimpleResponse<>(response, null)); } /** * Returns the sku name and account kind for the account. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.getAccountInfo} * * <p>For more information, see the * <a href="https: * * @return a reactor response containing the sku name and account kind. */ public Mono<StorageAccountInfo> getAccountInfo() { try { return getAccountInfoWithResponse().flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Returns the sku name and account kind for the account. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.getAccountInfoWithResponse} * * <p>For more information, see the * <a href="https: * * @return a reactor response containing the sku name and account kind. */ public Mono<Response<StorageAccountInfo>> getAccountInfoWithResponse() { try { return withContext(this::getAccountInfoWithResponse); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<StorageAccountInfo>> getAccountInfoWithResponse(Context context) { return this.azureBlobStorage.blobs().getAccountInfoWithRestResponseAsync(null, null, context) .map(rb -> { BlobGetAccountInfoHeaders hd = rb.getDeserializedHeaders(); return new SimpleResponse<>(rb, new StorageAccountInfo(hd.getSkuName(), hd.getAccountKind())); }); } }
please change to this. ```java long maxGlobalCommittedLsn = (responses != null) ? (Long) responses.stream().map(s -> s.globalCommittedLSN).max(ComparatorUtils.NATURAL_COMPARATOR).orElse(0L) : 0L; ```
private Mono<Boolean> waitForWriteBarrierAsync(RxDocumentServiceRequest barrierRequest, long selectedGlobalCommittedLsn) { AtomicInteger writeBarrierRetryCount = new AtomicInteger(ConsistencyWriter.MAX_NUMBER_OF_WRITE_BARRIER_READ_RETRIES); AtomicLong maxGlobalCommittedLsnReceived = new AtomicLong(0); return Flux.defer(() -> { if (barrierRequest.requestContext.timeoutHelper.isElapsed()) { return Flux.error(new RequestTimeoutException()); } Mono<List<StoreResult>> storeResultListObs = this.storeReader.readMultipleReplicaAsync( barrierRequest, true /*allowPrimary*/, 1 /*any replica with correct globalCommittedLsn is good enough*/, false /*requiresValidLsn*/, false /*useSessionToken*/, ReadMode.Strong, false /*checkMinLsn*/, false /*forceReadAll*/); return storeResultListObs.flatMap( responses -> { if (responses != null && responses.stream().anyMatch(response -> response.globalCommittedLSN >= selectedGlobalCommittedLsn)) { return Mono.just(Boolean.TRUE); } long maxGlobalCommittedLsn = (responses != null && !responses.isEmpty()) ? (Long) responses.stream().map(s -> s.globalCommittedLSN).max(ComparatorUtils.NATURAL_COMPARATOR).get() : 0L; maxGlobalCommittedLsnReceived.set(maxGlobalCommittedLsnReceived.get() > maxGlobalCommittedLsn ? maxGlobalCommittedLsnReceived.get() : maxGlobalCommittedLsn); barrierRequest.requestContext.forceRefreshAddressCache = false; if (writeBarrierRetryCount.getAndDecrement() == 0) { logger.debug("ConsistencyWriter: WaitForWriteBarrierAsync - Last barrier multi-region strong. Responses: {}", responses.stream().map(StoreResult::toString).collect(Collectors.joining("; "))); logger.debug("ConsistencyWriter: Highest global committed lsn received for write barrier call is {}", maxGlobalCommittedLsnReceived); return Mono.just(Boolean.FALSE); } return Mono.empty(); }).flux(); }).repeatWhen(s -> s.flatMap(x -> { if ((ConsistencyWriter.MAX_NUMBER_OF_WRITE_BARRIER_READ_RETRIES - writeBarrierRetryCount.get()) > ConsistencyWriter.MAX_SHORT_BARRIER_RETRIES_FOR_MULTI_REGION) { return Mono.delay(Duration.ofMillis(ConsistencyWriter.DELAY_BETWEEN_WRITE_BARRIER_CALLS_IN_MS)).flux(); } else { return Mono.delay(Duration.ofMillis(ConsistencyWriter.SHORT_BARRIER_RETRY_INTERVAL_IN_MS_FOR_MULTI_REGION)).flux(); } }) ).take(1).single(); }
long maxGlobalCommittedLsn = (responses != null && !responses.isEmpty()) ?
private Mono<Boolean> waitForWriteBarrierAsync(RxDocumentServiceRequest barrierRequest, long selectedGlobalCommittedLsn) { AtomicInteger writeBarrierRetryCount = new AtomicInteger(ConsistencyWriter.MAX_NUMBER_OF_WRITE_BARRIER_READ_RETRIES); AtomicLong maxGlobalCommittedLsnReceived = new AtomicLong(0); return Flux.defer(() -> { if (barrierRequest.requestContext.timeoutHelper.isElapsed()) { return Flux.error(new RequestTimeoutException()); } Mono<List<StoreResult>> storeResultListObs = this.storeReader.readMultipleReplicaAsync( barrierRequest, true /*allowPrimary*/, 1 /*any replica with correct globalCommittedLsn is good enough*/, false /*requiresValidLsn*/, false /*useSessionToken*/, ReadMode.Strong, false /*checkMinLsn*/, false /*forceReadAll*/); return storeResultListObs.flatMap( responses -> { if (responses != null && responses.stream().anyMatch(response -> response.globalCommittedLSN >= selectedGlobalCommittedLsn)) { return Mono.just(Boolean.TRUE); } long maxGlobalCommittedLsn = (responses != null) ? (Long) responses.stream().map(s -> s.globalCommittedLSN).max(ComparatorUtils.NATURAL_COMPARATOR).orElse(0L) : 0L; maxGlobalCommittedLsnReceived.set(maxGlobalCommittedLsnReceived.get() > maxGlobalCommittedLsn ? maxGlobalCommittedLsnReceived.get() : maxGlobalCommittedLsn); barrierRequest.requestContext.forceRefreshAddressCache = false; if (writeBarrierRetryCount.getAndDecrement() == 0) { logger.debug("ConsistencyWriter: WaitForWriteBarrierAsync - Last barrier multi-region strong. Responses: {}", responses.stream().map(StoreResult::toString).collect(Collectors.joining("; "))); logger.debug("ConsistencyWriter: Highest global committed lsn received for write barrier call is {}", maxGlobalCommittedLsnReceived); return Mono.just(Boolean.FALSE); } return Mono.empty(); }).flux(); }).repeatWhen(s -> s.flatMap(x -> { if ((ConsistencyWriter.MAX_NUMBER_OF_WRITE_BARRIER_READ_RETRIES - writeBarrierRetryCount.get()) > ConsistencyWriter.MAX_SHORT_BARRIER_RETRIES_FOR_MULTI_REGION) { return Mono.delay(Duration.ofMillis(ConsistencyWriter.DELAY_BETWEEN_WRITE_BARRIER_CALLS_IN_MS)).flux(); } else { return Mono.delay(Duration.ofMillis(ConsistencyWriter.SHORT_BARRIER_RETRY_INTERVAL_IN_MS_FOR_MULTI_REGION)).flux(); } }) ).take(1).single(); }
class ConsistencyWriter { private final static int MAX_NUMBER_OF_WRITE_BARRIER_READ_RETRIES = 30; private final static int DELAY_BETWEEN_WRITE_BARRIER_CALLS_IN_MS = 30; private final static int MAX_SHORT_BARRIER_RETRIES_FOR_MULTI_REGION = 4; private final static int SHORT_BARRIER_RETRY_INTERVAL_IN_MS_FOR_MULTI_REGION = 10; private final Logger logger = LoggerFactory.getLogger(ConsistencyWriter.class); private final TransportClient transportClient; private final AddressSelector addressSelector; private final ISessionContainer sessionContainer; private final IAuthorizationTokenProvider authorizationTokenProvider; private final boolean useMultipleWriteLocations; private final GatewayServiceConfigurationReader serviceConfigReader; private final StoreReader storeReader; public ConsistencyWriter( AddressSelector addressSelector, ISessionContainer sessionContainer, TransportClient transportClient, IAuthorizationTokenProvider authorizationTokenProvider, GatewayServiceConfigurationReader serviceConfigReader, boolean useMultipleWriteLocations) { this.transportClient = transportClient; this.addressSelector = addressSelector; this.sessionContainer = sessionContainer; this.authorizationTokenProvider = authorizationTokenProvider; this.useMultipleWriteLocations = useMultipleWriteLocations; this.serviceConfigReader = serviceConfigReader; this.storeReader = new StoreReader(transportClient, addressSelector, null /*we need store reader only for global strong, no session is needed*/); } public Mono<StoreResponse> writeAsync( RxDocumentServiceRequest entity, TimeoutHelper timeout, boolean forceRefresh) { if (timeout.isElapsed()) { return Mono.error(new RequestTimeoutException()); } String sessionToken = entity.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN); return this.writePrivateAsync(entity, timeout, forceRefresh).doOnEach( arg -> { try { SessionTokenHelper.setOriginalSessionToken(entity, sessionToken); } catch (Throwable throwable) { logger.error("Unexpected failure in handling orig [{}]: new [{}]", arg, throwable.getMessage(), throwable); } } ); } Mono<StoreResponse> writePrivateAsync( RxDocumentServiceRequest request, TimeoutHelper timeout, boolean forceRefresh) { if (timeout.isElapsed()) { return Mono.error(new RequestTimeoutException()); } request.requestContext.timeoutHelper = timeout; if (request.requestContext.requestChargeTracker == null) { request.requestContext.requestChargeTracker = new RequestChargeTracker(); } if (request.requestContext.cosmosResponseDiagnostics == null) { request.requestContext.cosmosResponseDiagnostics = BridgeInternal.createCosmosResponseDiagnostics(); } request.requestContext.forceRefreshAddressCache = forceRefresh; if (request.requestContext.globalStrongWriteResponse == null) { Mono<List<AddressInformation>> replicaAddressesObs = this.addressSelector.resolveAddressesAsync(request, forceRefresh); AtomicReference<URI> primaryURI = new AtomicReference<>(); return replicaAddressesObs.flatMap(replicaAddresses -> { try { List<URI> contactedReplicas = new ArrayList<>(); replicaAddresses.forEach(replicaAddress -> contactedReplicas.add(HttpUtils.toURI(replicaAddress.getPhysicalUri()))); BridgeInternal.setContactedReplicas(request.requestContext.cosmosResponseDiagnostics, contactedReplicas); return Mono.just(AddressSelector.getPrimaryUri(request, replicaAddresses)); } catch (GoneException e) { return Mono.error(e); } }).flatMap(primaryUri -> { try { primaryURI.set(primaryUri); if (this.useMultipleWriteLocations && RequestHelper.GetConsistencyLevelToUse(this.serviceConfigReader, request) == ConsistencyLevel.SESSION) { SessionTokenHelper.setPartitionLocalSessionToken(request, this.sessionContainer); } else { SessionTokenHelper.validateAndRemoveSessionToken(request); } } catch (Exception e) { return Mono.error(e); } return this.transportClient.invokeResourceOperationAsync(primaryUri, request) .doOnError( t -> { try { Throwable unwrappedException = Exceptions.unwrap(t); CosmosClientException ex = Utils.as(unwrappedException, CosmosClientException.class); try { BridgeInternal.recordResponse(request.requestContext.cosmosResponseDiagnostics, request, storeReader.createStoreResult(null, ex, false, false, primaryUri)); } catch (Exception e) { logger.error("Error occurred while recording response", e); } String value = ex.responseHeaders().get(HttpConstants.HttpHeaders.WRITE_REQUEST_TRIGGER_ADDRESS_REFRESH); if (!Strings.isNullOrWhiteSpace(value)) { Integer result = Integers.tryParse(value); if (result != null && result == 1) { startBackgroundAddressRefresh(request); } } } catch (Throwable throwable) { logger.error("Unexpected failure in handling orig [{}]", t.getMessage(), t); logger.error("Unexpected failure in handling orig [{}] : new [{}]", t.getMessage(), throwable.getMessage(), throwable); } } ); }).flatMap(response -> { try { BridgeInternal.recordResponse(request.requestContext.cosmosResponseDiagnostics, request, storeReader.createStoreResult(response, null, false, false, primaryURI.get())); } catch (Exception e) { logger.error("Error occurred while recording response", e); } return barrierForGlobalStrong(request, response); }); } else { Mono<RxDocumentServiceRequest> barrierRequestObs = BarrierRequestHelper.createAsync(request, this.authorizationTokenProvider, null, request.requestContext.globalCommittedSelectedLSN); return barrierRequestObs.flatMap(barrierRequest -> waitForWriteBarrierAsync(barrierRequest, request.requestContext.globalCommittedSelectedLSN) .flatMap(v -> { if (!v) { logger.warn("ConsistencyWriter: Write barrier has not been met for global strong request. SelectedGlobalCommittedLsn: {}", request.requestContext.globalCommittedSelectedLSN); return Mono.error(new GoneException(RMResources.GlobalStrongWriteBarrierNotMet)); } return Mono.just(request); })).map(req -> req.requestContext.globalStrongWriteResponse); } } boolean isGlobalStrongRequest(RxDocumentServiceRequest request, StoreResponse response) { if (this.serviceConfigReader.getDefaultConsistencyLevel() == ConsistencyLevel.STRONG) { int numberOfReadRegions = -1; String headerValue = null; if ((headerValue = response.getHeaderValue(WFConstants.BackendHeaders.NUMBER_OF_READ_REGIONS)) != null) { numberOfReadRegions = Integer.parseInt(headerValue); } if (numberOfReadRegions > 0 && this.serviceConfigReader.getDefaultConsistencyLevel() == ConsistencyLevel.STRONG) { return true; } } return false; } Mono<StoreResponse> barrierForGlobalStrong(RxDocumentServiceRequest request, StoreResponse response) { try { if (ReplicatedResourceClient.isGlobalStrongEnabled() && this.isGlobalStrongRequest(request, response)) { Utils.ValueHolder<Long> lsn = Utils.ValueHolder.initialize(-1l); Utils.ValueHolder<Long> globalCommittedLsn = Utils.ValueHolder.initialize(-1l); getLsnAndGlobalCommittedLsn(response, lsn, globalCommittedLsn); if (lsn.v == -1 || globalCommittedLsn.v == -1) { logger.error("ConsistencyWriter: lsn {} or GlobalCommittedLsn {} is not set for global strong request", lsn, globalCommittedLsn); throw new GoneException(RMResources.Gone); } request.requestContext.globalStrongWriteResponse = response; request.requestContext.globalCommittedSelectedLSN = lsn.v; request.requestContext.forceRefreshAddressCache = false; logger.debug("ConsistencyWriter: globalCommittedLsn {}, lsn {}", globalCommittedLsn, lsn); if (globalCommittedLsn.v < lsn.v) { Mono<RxDocumentServiceRequest> barrierRequestObs = BarrierRequestHelper.createAsync(request, this.authorizationTokenProvider, null, request.requestContext.globalCommittedSelectedLSN); return barrierRequestObs.flatMap(barrierRequest -> { Mono<Boolean> barrierWait = this.waitForWriteBarrierAsync(barrierRequest, request.requestContext.globalCommittedSelectedLSN); return barrierWait.flatMap(res -> { if (!res) { logger.error("ConsistencyWriter: Write barrier has not been met for global strong request. SelectedGlobalCommittedLsn: {}", request.requestContext.globalCommittedSelectedLSN); return Mono.error(new GoneException(RMResources.GlobalStrongWriteBarrierNotMet)); } return Mono.just(request.requestContext.globalStrongWriteResponse); }); }); } else { return Mono.just(request.requestContext.globalStrongWriteResponse); } } else { return Mono.just(response); } } catch (CosmosClientException e) { return Mono.error(e); } } static void getLsnAndGlobalCommittedLsn(StoreResponse response, Utils.ValueHolder<Long> lsn, Utils.ValueHolder<Long> globalCommittedLsn) { lsn.v = -1L; globalCommittedLsn.v = -1L; String headerValue; if ((headerValue = response.getHeaderValue(WFConstants.BackendHeaders.LSN)) != null) { lsn.v = Long.parseLong(headerValue); } if ((headerValue = response.getHeaderValue(WFConstants.BackendHeaders.GLOBAL_COMMITTED_LSN)) != null) { globalCommittedLsn.v = Long.parseLong(headerValue); } } void startBackgroundAddressRefresh(RxDocumentServiceRequest request) { this.addressSelector.resolvePrimaryUriAsync(request, true) .publishOn(Schedulers.elastic()) .subscribe( r -> { }, e -> logger.warn( "Background refresh of the primary address failed with {}", e.getMessage(), e) ); } }
class ConsistencyWriter { private final static int MAX_NUMBER_OF_WRITE_BARRIER_READ_RETRIES = 30; private final static int DELAY_BETWEEN_WRITE_BARRIER_CALLS_IN_MS = 30; private final static int MAX_SHORT_BARRIER_RETRIES_FOR_MULTI_REGION = 4; private final static int SHORT_BARRIER_RETRY_INTERVAL_IN_MS_FOR_MULTI_REGION = 10; private final Logger logger = LoggerFactory.getLogger(ConsistencyWriter.class); private final TransportClient transportClient; private final AddressSelector addressSelector; private final ISessionContainer sessionContainer; private final IAuthorizationTokenProvider authorizationTokenProvider; private final boolean useMultipleWriteLocations; private final GatewayServiceConfigurationReader serviceConfigReader; private final StoreReader storeReader; public ConsistencyWriter( AddressSelector addressSelector, ISessionContainer sessionContainer, TransportClient transportClient, IAuthorizationTokenProvider authorizationTokenProvider, GatewayServiceConfigurationReader serviceConfigReader, boolean useMultipleWriteLocations) { this.transportClient = transportClient; this.addressSelector = addressSelector; this.sessionContainer = sessionContainer; this.authorizationTokenProvider = authorizationTokenProvider; this.useMultipleWriteLocations = useMultipleWriteLocations; this.serviceConfigReader = serviceConfigReader; this.storeReader = new StoreReader(transportClient, addressSelector, null /*we need store reader only for global strong, no session is needed*/); } public Mono<StoreResponse> writeAsync( RxDocumentServiceRequest entity, TimeoutHelper timeout, boolean forceRefresh) { if (timeout.isElapsed()) { return Mono.error(new RequestTimeoutException()); } String sessionToken = entity.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN); return this.writePrivateAsync(entity, timeout, forceRefresh).doOnEach( arg -> { try { SessionTokenHelper.setOriginalSessionToken(entity, sessionToken); } catch (Throwable throwable) { logger.error("Unexpected failure in handling orig [{}]: new [{}]", arg, throwable.getMessage(), throwable); } } ); } Mono<StoreResponse> writePrivateAsync( RxDocumentServiceRequest request, TimeoutHelper timeout, boolean forceRefresh) { if (timeout.isElapsed()) { return Mono.error(new RequestTimeoutException()); } request.requestContext.timeoutHelper = timeout; if (request.requestContext.requestChargeTracker == null) { request.requestContext.requestChargeTracker = new RequestChargeTracker(); } if (request.requestContext.cosmosResponseDiagnostics == null) { request.requestContext.cosmosResponseDiagnostics = BridgeInternal.createCosmosResponseDiagnostics(); } request.requestContext.forceRefreshAddressCache = forceRefresh; if (request.requestContext.globalStrongWriteResponse == null) { Mono<List<AddressInformation>> replicaAddressesObs = this.addressSelector.resolveAddressesAsync(request, forceRefresh); AtomicReference<URI> primaryURI = new AtomicReference<>(); return replicaAddressesObs.flatMap(replicaAddresses -> { try { List<URI> contactedReplicas = new ArrayList<>(); replicaAddresses.forEach(replicaAddress -> contactedReplicas.add(HttpUtils.toURI(replicaAddress.getPhysicalUri()))); BridgeInternal.setContactedReplicas(request.requestContext.cosmosResponseDiagnostics, contactedReplicas); return Mono.just(AddressSelector.getPrimaryUri(request, replicaAddresses)); } catch (GoneException e) { return Mono.error(e); } }).flatMap(primaryUri -> { try { primaryURI.set(primaryUri); if (this.useMultipleWriteLocations && RequestHelper.GetConsistencyLevelToUse(this.serviceConfigReader, request) == ConsistencyLevel.SESSION) { SessionTokenHelper.setPartitionLocalSessionToken(request, this.sessionContainer); } else { SessionTokenHelper.validateAndRemoveSessionToken(request); } } catch (Exception e) { return Mono.error(e); } return this.transportClient.invokeResourceOperationAsync(primaryUri, request) .doOnError( t -> { try { Throwable unwrappedException = Exceptions.unwrap(t); CosmosClientException ex = Utils.as(unwrappedException, CosmosClientException.class); try { BridgeInternal.recordResponse(request.requestContext.cosmosResponseDiagnostics, request, storeReader.createStoreResult(null, ex, false, false, primaryUri)); } catch (Exception e) { logger.error("Error occurred while recording response", e); } String value = ex.responseHeaders().get(HttpConstants.HttpHeaders.WRITE_REQUEST_TRIGGER_ADDRESS_REFRESH); if (!Strings.isNullOrWhiteSpace(value)) { Integer result = Integers.tryParse(value); if (result != null && result == 1) { startBackgroundAddressRefresh(request); } } } catch (Throwable throwable) { logger.error("Unexpected failure in handling orig [{}]", t.getMessage(), t); logger.error("Unexpected failure in handling orig [{}] : new [{}]", t.getMessage(), throwable.getMessage(), throwable); } } ); }).flatMap(response -> { try { BridgeInternal.recordResponse(request.requestContext.cosmosResponseDiagnostics, request, storeReader.createStoreResult(response, null, false, false, primaryURI.get())); } catch (Exception e) { logger.error("Error occurred while recording response", e); } return barrierForGlobalStrong(request, response); }); } else { Mono<RxDocumentServiceRequest> barrierRequestObs = BarrierRequestHelper.createAsync(request, this.authorizationTokenProvider, null, request.requestContext.globalCommittedSelectedLSN); return barrierRequestObs.flatMap(barrierRequest -> waitForWriteBarrierAsync(barrierRequest, request.requestContext.globalCommittedSelectedLSN) .flatMap(v -> { if (!v) { logger.warn("ConsistencyWriter: Write barrier has not been met for global strong request. SelectedGlobalCommittedLsn: {}", request.requestContext.globalCommittedSelectedLSN); return Mono.error(new GoneException(RMResources.GlobalStrongWriteBarrierNotMet)); } return Mono.just(request); })).map(req -> req.requestContext.globalStrongWriteResponse); } } boolean isGlobalStrongRequest(RxDocumentServiceRequest request, StoreResponse response) { if (this.serviceConfigReader.getDefaultConsistencyLevel() == ConsistencyLevel.STRONG) { int numberOfReadRegions = -1; String headerValue = null; if ((headerValue = response.getHeaderValue(WFConstants.BackendHeaders.NUMBER_OF_READ_REGIONS)) != null) { numberOfReadRegions = Integer.parseInt(headerValue); } if (numberOfReadRegions > 0 && this.serviceConfigReader.getDefaultConsistencyLevel() == ConsistencyLevel.STRONG) { return true; } } return false; } Mono<StoreResponse> barrierForGlobalStrong(RxDocumentServiceRequest request, StoreResponse response) { try { if (ReplicatedResourceClient.isGlobalStrongEnabled() && this.isGlobalStrongRequest(request, response)) { Utils.ValueHolder<Long> lsn = Utils.ValueHolder.initialize(-1l); Utils.ValueHolder<Long> globalCommittedLsn = Utils.ValueHolder.initialize(-1l); getLsnAndGlobalCommittedLsn(response, lsn, globalCommittedLsn); if (lsn.v == -1 || globalCommittedLsn.v == -1) { logger.error("ConsistencyWriter: lsn {} or GlobalCommittedLsn {} is not set for global strong request", lsn, globalCommittedLsn); throw new GoneException(RMResources.Gone); } request.requestContext.globalStrongWriteResponse = response; request.requestContext.globalCommittedSelectedLSN = lsn.v; request.requestContext.forceRefreshAddressCache = false; logger.debug("ConsistencyWriter: globalCommittedLsn {}, lsn {}", globalCommittedLsn, lsn); if (globalCommittedLsn.v < lsn.v) { Mono<RxDocumentServiceRequest> barrierRequestObs = BarrierRequestHelper.createAsync(request, this.authorizationTokenProvider, null, request.requestContext.globalCommittedSelectedLSN); return barrierRequestObs.flatMap(barrierRequest -> { Mono<Boolean> barrierWait = this.waitForWriteBarrierAsync(barrierRequest, request.requestContext.globalCommittedSelectedLSN); return barrierWait.flatMap(res -> { if (!res) { logger.error("ConsistencyWriter: Write barrier has not been met for global strong request. SelectedGlobalCommittedLsn: {}", request.requestContext.globalCommittedSelectedLSN); return Mono.error(new GoneException(RMResources.GlobalStrongWriteBarrierNotMet)); } return Mono.just(request.requestContext.globalStrongWriteResponse); }); }); } else { return Mono.just(request.requestContext.globalStrongWriteResponse); } } else { return Mono.just(response); } } catch (CosmosClientException e) { return Mono.error(e); } } static void getLsnAndGlobalCommittedLsn(StoreResponse response, Utils.ValueHolder<Long> lsn, Utils.ValueHolder<Long> globalCommittedLsn) { lsn.v = -1L; globalCommittedLsn.v = -1L; String headerValue; if ((headerValue = response.getHeaderValue(WFConstants.BackendHeaders.LSN)) != null) { lsn.v = Long.parseLong(headerValue); } if ((headerValue = response.getHeaderValue(WFConstants.BackendHeaders.GLOBAL_COMMITTED_LSN)) != null) { globalCommittedLsn.v = Long.parseLong(headerValue); } } void startBackgroundAddressRefresh(RxDocumentServiceRequest request) { this.addressSelector.resolvePrimaryUriAsync(request, true) .publishOn(Schedulers.elastic()) .subscribe( r -> { }, e -> logger.warn( "Background refresh of the primary address failed with {}", e.getMessage(), e) ); } }
Fixed
private Mono<Boolean> waitForWriteBarrierAsync(RxDocumentServiceRequest barrierRequest, long selectedGlobalCommittedLsn) { AtomicInteger writeBarrierRetryCount = new AtomicInteger(ConsistencyWriter.MAX_NUMBER_OF_WRITE_BARRIER_READ_RETRIES); AtomicLong maxGlobalCommittedLsnReceived = new AtomicLong(0); return Flux.defer(() -> { if (barrierRequest.requestContext.timeoutHelper.isElapsed()) { return Flux.error(new RequestTimeoutException()); } Mono<List<StoreResult>> storeResultListObs = this.storeReader.readMultipleReplicaAsync( barrierRequest, true /*allowPrimary*/, 1 /*any replica with correct globalCommittedLsn is good enough*/, false /*requiresValidLsn*/, false /*useSessionToken*/, ReadMode.Strong, false /*checkMinLsn*/, false /*forceReadAll*/); return storeResultListObs.flatMap( responses -> { if (responses != null && responses.stream().anyMatch(response -> response.globalCommittedLSN >= selectedGlobalCommittedLsn)) { return Mono.just(Boolean.TRUE); } long maxGlobalCommittedLsn = (responses != null && !responses.isEmpty()) ? (Long) responses.stream().map(s -> s.globalCommittedLSN).max(ComparatorUtils.NATURAL_COMPARATOR).get() : 0L; maxGlobalCommittedLsnReceived.set(maxGlobalCommittedLsnReceived.get() > maxGlobalCommittedLsn ? maxGlobalCommittedLsnReceived.get() : maxGlobalCommittedLsn); barrierRequest.requestContext.forceRefreshAddressCache = false; if (writeBarrierRetryCount.getAndDecrement() == 0) { logger.debug("ConsistencyWriter: WaitForWriteBarrierAsync - Last barrier multi-region strong. Responses: {}", responses.stream().map(StoreResult::toString).collect(Collectors.joining("; "))); logger.debug("ConsistencyWriter: Highest global committed lsn received for write barrier call is {}", maxGlobalCommittedLsnReceived); return Mono.just(Boolean.FALSE); } return Mono.empty(); }).flux(); }).repeatWhen(s -> s.flatMap(x -> { if ((ConsistencyWriter.MAX_NUMBER_OF_WRITE_BARRIER_READ_RETRIES - writeBarrierRetryCount.get()) > ConsistencyWriter.MAX_SHORT_BARRIER_RETRIES_FOR_MULTI_REGION) { return Mono.delay(Duration.ofMillis(ConsistencyWriter.DELAY_BETWEEN_WRITE_BARRIER_CALLS_IN_MS)).flux(); } else { return Mono.delay(Duration.ofMillis(ConsistencyWriter.SHORT_BARRIER_RETRY_INTERVAL_IN_MS_FOR_MULTI_REGION)).flux(); } }) ).take(1).single(); }
long maxGlobalCommittedLsn = (responses != null && !responses.isEmpty()) ?
private Mono<Boolean> waitForWriteBarrierAsync(RxDocumentServiceRequest barrierRequest, long selectedGlobalCommittedLsn) { AtomicInteger writeBarrierRetryCount = new AtomicInteger(ConsistencyWriter.MAX_NUMBER_OF_WRITE_BARRIER_READ_RETRIES); AtomicLong maxGlobalCommittedLsnReceived = new AtomicLong(0); return Flux.defer(() -> { if (barrierRequest.requestContext.timeoutHelper.isElapsed()) { return Flux.error(new RequestTimeoutException()); } Mono<List<StoreResult>> storeResultListObs = this.storeReader.readMultipleReplicaAsync( barrierRequest, true /*allowPrimary*/, 1 /*any replica with correct globalCommittedLsn is good enough*/, false /*requiresValidLsn*/, false /*useSessionToken*/, ReadMode.Strong, false /*checkMinLsn*/, false /*forceReadAll*/); return storeResultListObs.flatMap( responses -> { if (responses != null && responses.stream().anyMatch(response -> response.globalCommittedLSN >= selectedGlobalCommittedLsn)) { return Mono.just(Boolean.TRUE); } long maxGlobalCommittedLsn = (responses != null) ? (Long) responses.stream().map(s -> s.globalCommittedLSN).max(ComparatorUtils.NATURAL_COMPARATOR).orElse(0L) : 0L; maxGlobalCommittedLsnReceived.set(maxGlobalCommittedLsnReceived.get() > maxGlobalCommittedLsn ? maxGlobalCommittedLsnReceived.get() : maxGlobalCommittedLsn); barrierRequest.requestContext.forceRefreshAddressCache = false; if (writeBarrierRetryCount.getAndDecrement() == 0) { logger.debug("ConsistencyWriter: WaitForWriteBarrierAsync - Last barrier multi-region strong. Responses: {}", responses.stream().map(StoreResult::toString).collect(Collectors.joining("; "))); logger.debug("ConsistencyWriter: Highest global committed lsn received for write barrier call is {}", maxGlobalCommittedLsnReceived); return Mono.just(Boolean.FALSE); } return Mono.empty(); }).flux(); }).repeatWhen(s -> s.flatMap(x -> { if ((ConsistencyWriter.MAX_NUMBER_OF_WRITE_BARRIER_READ_RETRIES - writeBarrierRetryCount.get()) > ConsistencyWriter.MAX_SHORT_BARRIER_RETRIES_FOR_MULTI_REGION) { return Mono.delay(Duration.ofMillis(ConsistencyWriter.DELAY_BETWEEN_WRITE_BARRIER_CALLS_IN_MS)).flux(); } else { return Mono.delay(Duration.ofMillis(ConsistencyWriter.SHORT_BARRIER_RETRY_INTERVAL_IN_MS_FOR_MULTI_REGION)).flux(); } }) ).take(1).single(); }
class ConsistencyWriter { private final static int MAX_NUMBER_OF_WRITE_BARRIER_READ_RETRIES = 30; private final static int DELAY_BETWEEN_WRITE_BARRIER_CALLS_IN_MS = 30; private final static int MAX_SHORT_BARRIER_RETRIES_FOR_MULTI_REGION = 4; private final static int SHORT_BARRIER_RETRY_INTERVAL_IN_MS_FOR_MULTI_REGION = 10; private final Logger logger = LoggerFactory.getLogger(ConsistencyWriter.class); private final TransportClient transportClient; private final AddressSelector addressSelector; private final ISessionContainer sessionContainer; private final IAuthorizationTokenProvider authorizationTokenProvider; private final boolean useMultipleWriteLocations; private final GatewayServiceConfigurationReader serviceConfigReader; private final StoreReader storeReader; public ConsistencyWriter( AddressSelector addressSelector, ISessionContainer sessionContainer, TransportClient transportClient, IAuthorizationTokenProvider authorizationTokenProvider, GatewayServiceConfigurationReader serviceConfigReader, boolean useMultipleWriteLocations) { this.transportClient = transportClient; this.addressSelector = addressSelector; this.sessionContainer = sessionContainer; this.authorizationTokenProvider = authorizationTokenProvider; this.useMultipleWriteLocations = useMultipleWriteLocations; this.serviceConfigReader = serviceConfigReader; this.storeReader = new StoreReader(transportClient, addressSelector, null /*we need store reader only for global strong, no session is needed*/); } public Mono<StoreResponse> writeAsync( RxDocumentServiceRequest entity, TimeoutHelper timeout, boolean forceRefresh) { if (timeout.isElapsed()) { return Mono.error(new RequestTimeoutException()); } String sessionToken = entity.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN); return this.writePrivateAsync(entity, timeout, forceRefresh).doOnEach( arg -> { try { SessionTokenHelper.setOriginalSessionToken(entity, sessionToken); } catch (Throwable throwable) { logger.error("Unexpected failure in handling orig [{}]: new [{}]", arg, throwable.getMessage(), throwable); } } ); } Mono<StoreResponse> writePrivateAsync( RxDocumentServiceRequest request, TimeoutHelper timeout, boolean forceRefresh) { if (timeout.isElapsed()) { return Mono.error(new RequestTimeoutException()); } request.requestContext.timeoutHelper = timeout; if (request.requestContext.requestChargeTracker == null) { request.requestContext.requestChargeTracker = new RequestChargeTracker(); } if (request.requestContext.cosmosResponseDiagnostics == null) { request.requestContext.cosmosResponseDiagnostics = BridgeInternal.createCosmosResponseDiagnostics(); } request.requestContext.forceRefreshAddressCache = forceRefresh; if (request.requestContext.globalStrongWriteResponse == null) { Mono<List<AddressInformation>> replicaAddressesObs = this.addressSelector.resolveAddressesAsync(request, forceRefresh); AtomicReference<URI> primaryURI = new AtomicReference<>(); return replicaAddressesObs.flatMap(replicaAddresses -> { try { List<URI> contactedReplicas = new ArrayList<>(); replicaAddresses.forEach(replicaAddress -> contactedReplicas.add(HttpUtils.toURI(replicaAddress.getPhysicalUri()))); BridgeInternal.setContactedReplicas(request.requestContext.cosmosResponseDiagnostics, contactedReplicas); return Mono.just(AddressSelector.getPrimaryUri(request, replicaAddresses)); } catch (GoneException e) { return Mono.error(e); } }).flatMap(primaryUri -> { try { primaryURI.set(primaryUri); if (this.useMultipleWriteLocations && RequestHelper.GetConsistencyLevelToUse(this.serviceConfigReader, request) == ConsistencyLevel.SESSION) { SessionTokenHelper.setPartitionLocalSessionToken(request, this.sessionContainer); } else { SessionTokenHelper.validateAndRemoveSessionToken(request); } } catch (Exception e) { return Mono.error(e); } return this.transportClient.invokeResourceOperationAsync(primaryUri, request) .doOnError( t -> { try { Throwable unwrappedException = Exceptions.unwrap(t); CosmosClientException ex = Utils.as(unwrappedException, CosmosClientException.class); try { BridgeInternal.recordResponse(request.requestContext.cosmosResponseDiagnostics, request, storeReader.createStoreResult(null, ex, false, false, primaryUri)); } catch (Exception e) { logger.error("Error occurred while recording response", e); } String value = ex.responseHeaders().get(HttpConstants.HttpHeaders.WRITE_REQUEST_TRIGGER_ADDRESS_REFRESH); if (!Strings.isNullOrWhiteSpace(value)) { Integer result = Integers.tryParse(value); if (result != null && result == 1) { startBackgroundAddressRefresh(request); } } } catch (Throwable throwable) { logger.error("Unexpected failure in handling orig [{}]", t.getMessage(), t); logger.error("Unexpected failure in handling orig [{}] : new [{}]", t.getMessage(), throwable.getMessage(), throwable); } } ); }).flatMap(response -> { try { BridgeInternal.recordResponse(request.requestContext.cosmosResponseDiagnostics, request, storeReader.createStoreResult(response, null, false, false, primaryURI.get())); } catch (Exception e) { logger.error("Error occurred while recording response", e); } return barrierForGlobalStrong(request, response); }); } else { Mono<RxDocumentServiceRequest> barrierRequestObs = BarrierRequestHelper.createAsync(request, this.authorizationTokenProvider, null, request.requestContext.globalCommittedSelectedLSN); return barrierRequestObs.flatMap(barrierRequest -> waitForWriteBarrierAsync(barrierRequest, request.requestContext.globalCommittedSelectedLSN) .flatMap(v -> { if (!v) { logger.warn("ConsistencyWriter: Write barrier has not been met for global strong request. SelectedGlobalCommittedLsn: {}", request.requestContext.globalCommittedSelectedLSN); return Mono.error(new GoneException(RMResources.GlobalStrongWriteBarrierNotMet)); } return Mono.just(request); })).map(req -> req.requestContext.globalStrongWriteResponse); } } boolean isGlobalStrongRequest(RxDocumentServiceRequest request, StoreResponse response) { if (this.serviceConfigReader.getDefaultConsistencyLevel() == ConsistencyLevel.STRONG) { int numberOfReadRegions = -1; String headerValue = null; if ((headerValue = response.getHeaderValue(WFConstants.BackendHeaders.NUMBER_OF_READ_REGIONS)) != null) { numberOfReadRegions = Integer.parseInt(headerValue); } if (numberOfReadRegions > 0 && this.serviceConfigReader.getDefaultConsistencyLevel() == ConsistencyLevel.STRONG) { return true; } } return false; } Mono<StoreResponse> barrierForGlobalStrong(RxDocumentServiceRequest request, StoreResponse response) { try { if (ReplicatedResourceClient.isGlobalStrongEnabled() && this.isGlobalStrongRequest(request, response)) { Utils.ValueHolder<Long> lsn = Utils.ValueHolder.initialize(-1l); Utils.ValueHolder<Long> globalCommittedLsn = Utils.ValueHolder.initialize(-1l); getLsnAndGlobalCommittedLsn(response, lsn, globalCommittedLsn); if (lsn.v == -1 || globalCommittedLsn.v == -1) { logger.error("ConsistencyWriter: lsn {} or GlobalCommittedLsn {} is not set for global strong request", lsn, globalCommittedLsn); throw new GoneException(RMResources.Gone); } request.requestContext.globalStrongWriteResponse = response; request.requestContext.globalCommittedSelectedLSN = lsn.v; request.requestContext.forceRefreshAddressCache = false; logger.debug("ConsistencyWriter: globalCommittedLsn {}, lsn {}", globalCommittedLsn, lsn); if (globalCommittedLsn.v < lsn.v) { Mono<RxDocumentServiceRequest> barrierRequestObs = BarrierRequestHelper.createAsync(request, this.authorizationTokenProvider, null, request.requestContext.globalCommittedSelectedLSN); return barrierRequestObs.flatMap(barrierRequest -> { Mono<Boolean> barrierWait = this.waitForWriteBarrierAsync(barrierRequest, request.requestContext.globalCommittedSelectedLSN); return barrierWait.flatMap(res -> { if (!res) { logger.error("ConsistencyWriter: Write barrier has not been met for global strong request. SelectedGlobalCommittedLsn: {}", request.requestContext.globalCommittedSelectedLSN); return Mono.error(new GoneException(RMResources.GlobalStrongWriteBarrierNotMet)); } return Mono.just(request.requestContext.globalStrongWriteResponse); }); }); } else { return Mono.just(request.requestContext.globalStrongWriteResponse); } } else { return Mono.just(response); } } catch (CosmosClientException e) { return Mono.error(e); } } static void getLsnAndGlobalCommittedLsn(StoreResponse response, Utils.ValueHolder<Long> lsn, Utils.ValueHolder<Long> globalCommittedLsn) { lsn.v = -1L; globalCommittedLsn.v = -1L; String headerValue; if ((headerValue = response.getHeaderValue(WFConstants.BackendHeaders.LSN)) != null) { lsn.v = Long.parseLong(headerValue); } if ((headerValue = response.getHeaderValue(WFConstants.BackendHeaders.GLOBAL_COMMITTED_LSN)) != null) { globalCommittedLsn.v = Long.parseLong(headerValue); } } void startBackgroundAddressRefresh(RxDocumentServiceRequest request) { this.addressSelector.resolvePrimaryUriAsync(request, true) .publishOn(Schedulers.elastic()) .subscribe( r -> { }, e -> logger.warn( "Background refresh of the primary address failed with {}", e.getMessage(), e) ); } }
class ConsistencyWriter { private final static int MAX_NUMBER_OF_WRITE_BARRIER_READ_RETRIES = 30; private final static int DELAY_BETWEEN_WRITE_BARRIER_CALLS_IN_MS = 30; private final static int MAX_SHORT_BARRIER_RETRIES_FOR_MULTI_REGION = 4; private final static int SHORT_BARRIER_RETRY_INTERVAL_IN_MS_FOR_MULTI_REGION = 10; private final Logger logger = LoggerFactory.getLogger(ConsistencyWriter.class); private final TransportClient transportClient; private final AddressSelector addressSelector; private final ISessionContainer sessionContainer; private final IAuthorizationTokenProvider authorizationTokenProvider; private final boolean useMultipleWriteLocations; private final GatewayServiceConfigurationReader serviceConfigReader; private final StoreReader storeReader; public ConsistencyWriter( AddressSelector addressSelector, ISessionContainer sessionContainer, TransportClient transportClient, IAuthorizationTokenProvider authorizationTokenProvider, GatewayServiceConfigurationReader serviceConfigReader, boolean useMultipleWriteLocations) { this.transportClient = transportClient; this.addressSelector = addressSelector; this.sessionContainer = sessionContainer; this.authorizationTokenProvider = authorizationTokenProvider; this.useMultipleWriteLocations = useMultipleWriteLocations; this.serviceConfigReader = serviceConfigReader; this.storeReader = new StoreReader(transportClient, addressSelector, null /*we need store reader only for global strong, no session is needed*/); } public Mono<StoreResponse> writeAsync( RxDocumentServiceRequest entity, TimeoutHelper timeout, boolean forceRefresh) { if (timeout.isElapsed()) { return Mono.error(new RequestTimeoutException()); } String sessionToken = entity.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN); return this.writePrivateAsync(entity, timeout, forceRefresh).doOnEach( arg -> { try { SessionTokenHelper.setOriginalSessionToken(entity, sessionToken); } catch (Throwable throwable) { logger.error("Unexpected failure in handling orig [{}]: new [{}]", arg, throwable.getMessage(), throwable); } } ); } Mono<StoreResponse> writePrivateAsync( RxDocumentServiceRequest request, TimeoutHelper timeout, boolean forceRefresh) { if (timeout.isElapsed()) { return Mono.error(new RequestTimeoutException()); } request.requestContext.timeoutHelper = timeout; if (request.requestContext.requestChargeTracker == null) { request.requestContext.requestChargeTracker = new RequestChargeTracker(); } if (request.requestContext.cosmosResponseDiagnostics == null) { request.requestContext.cosmosResponseDiagnostics = BridgeInternal.createCosmosResponseDiagnostics(); } request.requestContext.forceRefreshAddressCache = forceRefresh; if (request.requestContext.globalStrongWriteResponse == null) { Mono<List<AddressInformation>> replicaAddressesObs = this.addressSelector.resolveAddressesAsync(request, forceRefresh); AtomicReference<URI> primaryURI = new AtomicReference<>(); return replicaAddressesObs.flatMap(replicaAddresses -> { try { List<URI> contactedReplicas = new ArrayList<>(); replicaAddresses.forEach(replicaAddress -> contactedReplicas.add(HttpUtils.toURI(replicaAddress.getPhysicalUri()))); BridgeInternal.setContactedReplicas(request.requestContext.cosmosResponseDiagnostics, contactedReplicas); return Mono.just(AddressSelector.getPrimaryUri(request, replicaAddresses)); } catch (GoneException e) { return Mono.error(e); } }).flatMap(primaryUri -> { try { primaryURI.set(primaryUri); if (this.useMultipleWriteLocations && RequestHelper.GetConsistencyLevelToUse(this.serviceConfigReader, request) == ConsistencyLevel.SESSION) { SessionTokenHelper.setPartitionLocalSessionToken(request, this.sessionContainer); } else { SessionTokenHelper.validateAndRemoveSessionToken(request); } } catch (Exception e) { return Mono.error(e); } return this.transportClient.invokeResourceOperationAsync(primaryUri, request) .doOnError( t -> { try { Throwable unwrappedException = Exceptions.unwrap(t); CosmosClientException ex = Utils.as(unwrappedException, CosmosClientException.class); try { BridgeInternal.recordResponse(request.requestContext.cosmosResponseDiagnostics, request, storeReader.createStoreResult(null, ex, false, false, primaryUri)); } catch (Exception e) { logger.error("Error occurred while recording response", e); } String value = ex.responseHeaders().get(HttpConstants.HttpHeaders.WRITE_REQUEST_TRIGGER_ADDRESS_REFRESH); if (!Strings.isNullOrWhiteSpace(value)) { Integer result = Integers.tryParse(value); if (result != null && result == 1) { startBackgroundAddressRefresh(request); } } } catch (Throwable throwable) { logger.error("Unexpected failure in handling orig [{}]", t.getMessage(), t); logger.error("Unexpected failure in handling orig [{}] : new [{}]", t.getMessage(), throwable.getMessage(), throwable); } } ); }).flatMap(response -> { try { BridgeInternal.recordResponse(request.requestContext.cosmosResponseDiagnostics, request, storeReader.createStoreResult(response, null, false, false, primaryURI.get())); } catch (Exception e) { logger.error("Error occurred while recording response", e); } return barrierForGlobalStrong(request, response); }); } else { Mono<RxDocumentServiceRequest> barrierRequestObs = BarrierRequestHelper.createAsync(request, this.authorizationTokenProvider, null, request.requestContext.globalCommittedSelectedLSN); return barrierRequestObs.flatMap(barrierRequest -> waitForWriteBarrierAsync(barrierRequest, request.requestContext.globalCommittedSelectedLSN) .flatMap(v -> { if (!v) { logger.warn("ConsistencyWriter: Write barrier has not been met for global strong request. SelectedGlobalCommittedLsn: {}", request.requestContext.globalCommittedSelectedLSN); return Mono.error(new GoneException(RMResources.GlobalStrongWriteBarrierNotMet)); } return Mono.just(request); })).map(req -> req.requestContext.globalStrongWriteResponse); } } boolean isGlobalStrongRequest(RxDocumentServiceRequest request, StoreResponse response) { if (this.serviceConfigReader.getDefaultConsistencyLevel() == ConsistencyLevel.STRONG) { int numberOfReadRegions = -1; String headerValue = null; if ((headerValue = response.getHeaderValue(WFConstants.BackendHeaders.NUMBER_OF_READ_REGIONS)) != null) { numberOfReadRegions = Integer.parseInt(headerValue); } if (numberOfReadRegions > 0 && this.serviceConfigReader.getDefaultConsistencyLevel() == ConsistencyLevel.STRONG) { return true; } } return false; } Mono<StoreResponse> barrierForGlobalStrong(RxDocumentServiceRequest request, StoreResponse response) { try { if (ReplicatedResourceClient.isGlobalStrongEnabled() && this.isGlobalStrongRequest(request, response)) { Utils.ValueHolder<Long> lsn = Utils.ValueHolder.initialize(-1l); Utils.ValueHolder<Long> globalCommittedLsn = Utils.ValueHolder.initialize(-1l); getLsnAndGlobalCommittedLsn(response, lsn, globalCommittedLsn); if (lsn.v == -1 || globalCommittedLsn.v == -1) { logger.error("ConsistencyWriter: lsn {} or GlobalCommittedLsn {} is not set for global strong request", lsn, globalCommittedLsn); throw new GoneException(RMResources.Gone); } request.requestContext.globalStrongWriteResponse = response; request.requestContext.globalCommittedSelectedLSN = lsn.v; request.requestContext.forceRefreshAddressCache = false; logger.debug("ConsistencyWriter: globalCommittedLsn {}, lsn {}", globalCommittedLsn, lsn); if (globalCommittedLsn.v < lsn.v) { Mono<RxDocumentServiceRequest> barrierRequestObs = BarrierRequestHelper.createAsync(request, this.authorizationTokenProvider, null, request.requestContext.globalCommittedSelectedLSN); return barrierRequestObs.flatMap(barrierRequest -> { Mono<Boolean> barrierWait = this.waitForWriteBarrierAsync(barrierRequest, request.requestContext.globalCommittedSelectedLSN); return barrierWait.flatMap(res -> { if (!res) { logger.error("ConsistencyWriter: Write barrier has not been met for global strong request. SelectedGlobalCommittedLsn: {}", request.requestContext.globalCommittedSelectedLSN); return Mono.error(new GoneException(RMResources.GlobalStrongWriteBarrierNotMet)); } return Mono.just(request.requestContext.globalStrongWriteResponse); }); }); } else { return Mono.just(request.requestContext.globalStrongWriteResponse); } } else { return Mono.just(response); } } catch (CosmosClientException e) { return Mono.error(e); } } static void getLsnAndGlobalCommittedLsn(StoreResponse response, Utils.ValueHolder<Long> lsn, Utils.ValueHolder<Long> globalCommittedLsn) { lsn.v = -1L; globalCommittedLsn.v = -1L; String headerValue; if ((headerValue = response.getHeaderValue(WFConstants.BackendHeaders.LSN)) != null) { lsn.v = Long.parseLong(headerValue); } if ((headerValue = response.getHeaderValue(WFConstants.BackendHeaders.GLOBAL_COMMITTED_LSN)) != null) { globalCommittedLsn.v = Long.parseLong(headerValue); } } void startBackgroundAddressRefresh(RxDocumentServiceRequest request) { this.addressSelector.resolvePrimaryUriAsync(request, true) .publishOn(Schedulers.elastic()) .subscribe( r -> { }, e -> logger.warn( "Background refresh of the primary address failed with {}", e.getMessage(), e) ); } }
I think the toUrl method may also need to be updated. It looks like it always sets the container right after the host and never considers if it needs to set the account name?
public static BlobUrlParts parse(URL url) { BlobUrlParts parts = new BlobUrlParts().setScheme(url.getProtocol()); if (ModelHelper.IP_V4_URL_PATTERN.matcher(url.getHost()).find()) { parseIpUrl(url, parts); } else { parseNonIpUrl(url, parts); } Map<String, String[]> queryParamsMap = parseQueryString(url.getQuery()); String[] snapshotArray = queryParamsMap.remove("snapshot"); if (snapshotArray != null) { parts.setSnapshot(snapshotArray[0]); } BlobServiceSasQueryParameters blobServiceSasQueryParameters = new BlobServiceSasQueryParameters(queryParamsMap, true); return parts.setSasQueryParameters(blobServiceSasQueryParameters) .setUnparsedParameters(queryParamsMap); }
if (ModelHelper.IP_V4_URL_PATTERN.matcher(url.getHost()).find()) {
public static BlobUrlParts parse(URL url) { BlobUrlParts parts = new BlobUrlParts().setScheme(url.getProtocol()); if (ModelHelper.IP_V4_URL_PATTERN.matcher(url.getHost()).find()) { parseIpUrl(url, parts); } else { parseNonIpUrl(url, parts); } Map<String, String[]> queryParamsMap = parseQueryString(url.getQuery()); String[] snapshotArray = queryParamsMap.remove("snapshot"); if (snapshotArray != null) { parts.setSnapshot(snapshotArray[0]); } BlobServiceSasQueryParameters blobServiceSasQueryParameters = new BlobServiceSasQueryParameters(queryParamsMap, true); return parts.setSasQueryParameters(blobServiceSasQueryParameters) .setUnparsedParameters(queryParamsMap); }
class BlobUrlParts { private final ClientLogger logger = new ClientLogger(BlobUrlParts.class); private String scheme; private String host; private String containerName; private String blobName; private String snapshot; private String accountName; private BlobServiceSasQueryParameters blobServiceSasQueryParameters; private Map<String, String[]> unparsedParameters; /** * Initializes a BlobUrlParts object which helps aid in the construction of a Blob Storage URL. */ public BlobUrlParts() { unparsedParameters = new HashMap<>(); } /** * Gets the accountname, ex. "myaccountname". * * @return the account name. */ public String getAccountName() { return accountName; } /** * Sets the account name. * * @param accountName The account name. * @return the updated BlobURLParts object. */ public BlobUrlParts setAccountName(String accountName) { this.accountName = accountName; return this; } /** * Gets the URL scheme, ex. "https: * * @return the URL scheme. */ public String getScheme() { return scheme; } /** * Sets the URL scheme, ex. "https: * * @param scheme The URL scheme. * @return the updated BlobUrlParts object. */ public BlobUrlParts setScheme(String scheme) { this.scheme = scheme; return this; } /** * Gets the URL host, ex. "account.blob.core.windows.net". * * @return the URL host. */ public String getHost() { return host; } /** * Sets the URL host, ex. "account.blob.core.windows.net". * * @param host The URL host. * @return the updated BlobUrlParts object. */ public BlobUrlParts setHost(String host) { this.host = host; return this; } /** * Gets the container name that will be used as part of the URL path. * * @return the container name. */ public String getBlobContainerName() { return containerName; } /** * Sets the container name that will be used as part of the URL path. * * @param containerName The container nme. * @return the updated BlobUrlParts object. */ public BlobUrlParts setContainerName(String containerName) { this.containerName = containerName; return this; } /** * Decodes and gets the blob name that will be used as part of the URL path. * * @return the decoded blob name. */ public String getBlobName() { return (blobName == null) ? null : Utility.urlDecode(blobName); } /** * Sets the blob name that will be used as part of the URL path. * * @param blobName The blob name. * @return the updated BlobUrlParts object. */ public BlobUrlParts setBlobName(String blobName) { this.blobName = Utility.urlEncode(Utility.urlDecode(blobName)); return this; } /** * Gets the snapshot identifier that will be used as part of the query string if set. * * @return the snapshot identifier. */ public String getSnapshot() { return snapshot; } /** * Sets the snapshot identifier that will be used as part of the query string if set. * * @param snapshot The snapshot identifier. * @return the updated BlobUrlParts object. */ public BlobUrlParts setSnapshot(String snapshot) { this.snapshot = snapshot; return this; } /** * Gets the {@link BlobServiceSasQueryParameters} representing the SAS query parameters that will be used to * generate the SAS token for this URL. * * @return the {@link BlobServiceSasQueryParameters} of the URL */ public BlobServiceSasQueryParameters getSasQueryParameters() { return blobServiceSasQueryParameters; } /** * Sets the {@link BlobServiceSasQueryParameters} representing the SAS query parameters that will be used to * generate the SAS token for this URL. * * @param blobServiceSasQueryParameters The SAS query parameters. * @return the updated BlobUrlParts object. */ public BlobUrlParts setSasQueryParameters(BlobServiceSasQueryParameters blobServiceSasQueryParameters) { this.blobServiceSasQueryParameters = blobServiceSasQueryParameters; return this; } /** * Gets the query string parameters that aren't part of the SAS token that will be used by this URL. * * @return the non-SAS token query string values. */ public Map<String, String[]> getUnparsedParameters() { return unparsedParameters; } /** * Sets the query string parameters that aren't part of the SAS token that will be used by this URL. * * @param unparsedParameters The non-SAS token query string values. * @return the updated BlobUrlParts object. */ public BlobUrlParts setUnparsedParameters(Map<String, String[]> unparsedParameters) { this.unparsedParameters = unparsedParameters; return this; } /** * Converts the blob URL parts to a {@link URL}. * * @return A {@code URL} to the blob resource composed of all the elements in this object. * @throws IllegalStateException The fields present on the BlobUrlParts object were insufficient to construct a * valid URL or were ill-formatted. */ public URL toUrl() { UrlBuilder url = new UrlBuilder().setScheme(this.scheme).setHost(this.host); StringBuilder path = new StringBuilder(); if ((this.containerName == null || this.containerName.isEmpty()) && this.blobName != null) { this.containerName = BlobContainerAsyncClient.ROOT_CONTAINER_NAME; } if (this.containerName != null) { path.append(this.containerName); if (this.blobName != null) { path.append('/'); path.append(this.blobName); } } url.setPath(path.toString()); if (this.snapshot != null) { url.setQueryParameter(Constants.UrlConstants.SNAPSHOT_QUERY_PARAMETER, this.snapshot); } if (this.blobServiceSasQueryParameters != null) { String encodedSAS = this.blobServiceSasQueryParameters.encode(); if (encodedSAS.length() != 0) { url.setQuery(encodedSAS); } } for (Map.Entry<String, String[]> entry : this.unparsedParameters.entrySet()) { url.setQueryParameter(entry.getKey(), Utility.urlEncode(String.join(",", entry.getValue()))); } try { return url.toURL(); } catch (MalformedURLException ex) { throw logger.logExceptionAsError(new IllegalStateException("The URL parts created a malformed URL.", ex)); } } /** * Parses a string into a BlobUrlParts. * * <p>Query parameters will be parsed into two properties, {@link BlobServiceSasQueryParameters} which contains * all SAS token related values and {@link * parameters.</p> * * <p>If a URL points to a blob in the root container, and the root container is referenced implicitly, i.e. there * is no path element for the container, the name of this blob in the root container will be set as the * containerName field in the resulting {@code BlobURLParts}.</p> * * @param url The {@code URL} to be parsed. * @return A {@link BlobUrlParts} object containing all the components of a BlobURL. * @throws IllegalArgumentException If {@code url} is a malformed {@link URL}. */ public static BlobUrlParts parse(String url) { try { return parse(new URL(url)); } catch (MalformedURLException e) { throw new IllegalArgumentException("Invalid URL format. URL: " + url); } } /** * Parses an existing URL into a BlobUrlParts. * * <p>Query parameters will be parsed into two properties, {@link BlobServiceSasQueryParameters} which contains * all SAS token related values and {@link * parameters.</p> * * <p>If a URL points to a blob in the root container, and the root container is referenced implicitly, i.e. there * is no path element for the container, the name of this blob in the root container will be set as the * containerName field in the resulting {@code BlobURLParts}.</p> * * @param url The {@code URL} to be parsed. * @return A {@link BlobUrlParts} object containing all the components of a BlobURL. */ /* * Parse the IP url into its host, account name, container name, and blob name. */ private static void parseIpUrl(URL url, BlobUrlParts parts) { parts.setHost(url.getAuthority()); String path = url.getPath(); if (path.charAt(0) == '/') { path = path.substring(1); } String[] pathPieces = path.split("/", 3); parts.setAccountName(pathPieces[0]); if (pathPieces.length >= 3) { parts.setContainerName(pathPieces[1]); parts.setBlobName(pathPieces[2]); } else if (pathPieces.length == 2) { parts.setContainerName(pathPieces[1]); } } /* * Parse the non-IP url into its host, account name, container name, and blob name. */ private static void parseNonIpUrl(URL url, BlobUrlParts parts) { String host = url.getHost(); parts.setHost(host); if (!ImplUtils.isNullOrEmpty(host)) { int accountNameIndex = host.indexOf('.'); if (accountNameIndex == -1) { parts.setAccountName(host); } else { parts.setAccountName(host.substring(0, accountNameIndex)); } } String path = url.getPath(); if (!ImplUtils.isNullOrEmpty(path)) { if (path.charAt(0) == '/') { path = path.substring(1); } int containerEndIndex = path.indexOf('/'); if (containerEndIndex == -1) { parts.setContainerName(path); } else { parts.setContainerName(path.substring(0, containerEndIndex)); parts.setBlobName(path.substring(containerEndIndex + 1)); } } } /** * Parses a query string into a one to many hashmap. * * @param queryParams The string of query params to parse. * @return A {@code HashMap<String, String[]>} of the key values. */ private static TreeMap<String, String[]> parseQueryString(String queryParams) { final TreeMap<String, String[]> retVals = new TreeMap<>(Comparator.naturalOrder()); if (ImplUtils.isNullOrEmpty(queryParams)) { return retVals; } final String[] valuePairs = queryParams.split("&"); for (String valuePair : valuePairs) { final int equalDex = valuePair.indexOf("="); String key = Utility.urlDecode(valuePair.substring(0, equalDex)).toLowerCase(Locale.ROOT); String value = Utility.urlDecode(valuePair.substring(equalDex + 1)); String[] keyValues = retVals.get(key); if (keyValues == null) { keyValues = new String[]{value}; } else { final String[] newValues = new String[keyValues.length + 1]; System.arraycopy(keyValues, 0, newValues, 0, keyValues.length); newValues[newValues.length - 1] = value; keyValues = newValues; } retVals.put(key, keyValues); } return retVals; } }
class BlobUrlParts { private final ClientLogger logger = new ClientLogger(BlobUrlParts.class); private String scheme; private String host; private String containerName; private String blobName; private String snapshot; private String accountName; private boolean isIpUrl; private BlobServiceSasQueryParameters blobServiceSasQueryParameters; private Map<String, String[]> unparsedParameters; /** * Initializes a BlobUrlParts object which helps aid in the construction of a Blob Storage URL. */ public BlobUrlParts() { unparsedParameters = new HashMap<>(); } /** * Gets the accountname, ex. "myaccountname". * * @return the account name. */ public String getAccountName() { return accountName; } /** * Sets the account name. * * @param accountName The account name. * @return the updated BlobURLParts object. */ public BlobUrlParts setAccountName(String accountName) { this.accountName = accountName; return this; } /** * Gets the URL scheme, ex. "https: * * @return the URL scheme. */ public String getScheme() { return scheme; } /** * Sets the URL scheme, ex. "https: * * @param scheme The URL scheme. * @return the updated BlobUrlParts object. */ public BlobUrlParts setScheme(String scheme) { this.scheme = scheme; return this; } /** * Gets the URL host, ex. "account.blob.core.windows.net" or "127.0.0.1:10000". * * @return the URL host. */ public String getHost() { return host; } /** * Sets the URL host, ex. "account.blob.core.windows.net" or "127.0.0.1:10000". * * @param host The URL host. * @return the updated BlobUrlParts object. */ public BlobUrlParts setHost(String host) { this.host = host; this.isIpUrl = ModelHelper.IP_V4_URL_PATTERN.matcher(host).find(); return this; } /** * Gets the container name that will be used as part of the URL path. * * @return the container name. */ public String getBlobContainerName() { return containerName; } /** * Sets the container name that will be used as part of the URL path. * * @param containerName The container nme. * @return the updated BlobUrlParts object. */ public BlobUrlParts setContainerName(String containerName) { this.containerName = containerName; return this; } /** * Decodes and gets the blob name that will be used as part of the URL path. * * @return the decoded blob name. */ public String getBlobName() { return (blobName == null) ? null : Utility.urlDecode(blobName); } /** * Sets the blob name that will be used as part of the URL path. * * @param blobName The blob name. * @return the updated BlobUrlParts object. */ public BlobUrlParts setBlobName(String blobName) { this.blobName = Utility.urlEncode(Utility.urlDecode(blobName)); return this; } /** * Gets the snapshot identifier that will be used as part of the query string if set. * * @return the snapshot identifier. */ public String getSnapshot() { return snapshot; } /** * Sets the snapshot identifier that will be used as part of the query string if set. * * @param snapshot The snapshot identifier. * @return the updated BlobUrlParts object. */ public BlobUrlParts setSnapshot(String snapshot) { this.snapshot = snapshot; return this; } /** * Gets the {@link BlobServiceSasQueryParameters} representing the SAS query parameters that will be used to * generate the SAS token for this URL. * * @return the {@link BlobServiceSasQueryParameters} of the URL */ public BlobServiceSasQueryParameters getSasQueryParameters() { return blobServiceSasQueryParameters; } /** * Sets the {@link BlobServiceSasQueryParameters} representing the SAS query parameters that will be used to * generate the SAS token for this URL. * * @param blobServiceSasQueryParameters The SAS query parameters. * @return the updated BlobUrlParts object. */ public BlobUrlParts setSasQueryParameters(BlobServiceSasQueryParameters blobServiceSasQueryParameters) { this.blobServiceSasQueryParameters = blobServiceSasQueryParameters; return this; } /** * Gets the query string parameters that aren't part of the SAS token that will be used by this URL. * * @return the non-SAS token query string values. */ public Map<String, String[]> getUnparsedParameters() { return unparsedParameters; } /** * Sets the query string parameters that aren't part of the SAS token that will be used by this URL. * * @param unparsedParameters The non-SAS token query string values. * @return the updated BlobUrlParts object. */ public BlobUrlParts setUnparsedParameters(Map<String, String[]> unparsedParameters) { this.unparsedParameters = unparsedParameters; return this; } /** * Converts the blob URL parts to a {@link URL}. * * @return A {@code URL} to the blob resource composed of all the elements in this object. * @throws IllegalStateException The fields present on the BlobUrlParts object were insufficient to construct a * valid URL or were ill-formatted. */ public URL toUrl() { UrlBuilder url = new UrlBuilder().setScheme(this.scheme).setHost(this.host); StringBuilder path = new StringBuilder(); if (ImplUtils.isNullOrEmpty(this.containerName) && this.blobName != null) { this.containerName = BlobContainerAsyncClient.ROOT_CONTAINER_NAME; } if (this.isIpUrl) { path.append(this.accountName); } if (this.containerName != null) { path.append("/").append(this.containerName); if (this.blobName != null) { path.append("/").append(this.blobName); } } url.setPath(path.toString()); if (this.snapshot != null) { url.setQueryParameter(Constants.UrlConstants.SNAPSHOT_QUERY_PARAMETER, this.snapshot); } if (this.blobServiceSasQueryParameters != null) { String encodedSAS = this.blobServiceSasQueryParameters.encode(); if (encodedSAS.length() != 0) { url.setQuery(encodedSAS); } } for (Map.Entry<String, String[]> entry : this.unparsedParameters.entrySet()) { url.setQueryParameter(entry.getKey(), Utility.urlEncode(String.join(",", entry.getValue()))); } try { return url.toURL(); } catch (MalformedURLException ex) { throw logger.logExceptionAsError(new IllegalStateException("The URL parts created a malformed URL.", ex)); } } /** * Parses a string into a BlobUrlParts. * * <p>Query parameters will be parsed into two properties, {@link BlobServiceSasQueryParameters} which contains * all SAS token related values and {@link * parameters.</p> * * <p>If a URL points to a blob in the root container, and the root container is referenced implicitly, i.e. there * is no path element for the container, the name of this blob in the root container will be set as the * containerName field in the resulting {@code BlobURLParts}.</p> * * @param url The {@code URL} to be parsed. * @return A {@link BlobUrlParts} object containing all the components of a BlobURL. * @throws IllegalArgumentException If {@code url} is a malformed {@link URL}. */ public static BlobUrlParts parse(String url) { try { return parse(new URL(url)); } catch (MalformedURLException e) { throw new IllegalArgumentException("Invalid URL format. URL: " + url); } } /** * Parses an existing URL into a BlobUrlParts. * * <p>Query parameters will be parsed into two properties, {@link BlobServiceSasQueryParameters} which contains * all SAS token related values and {@link * parameters.</p> * * <p>If a URL points to a blob in the root container, and the root container is referenced implicitly, i.e. there * is no path element for the container, the name of this blob in the root container will be set as the * containerName field in the resulting {@code BlobURLParts}.</p> * * @param url The {@code URL} to be parsed. * @return A {@link BlobUrlParts} object containing all the components of a BlobURL. */ /* * Parse the IP url into its host, account name, container name, and blob name. */ private static void parseIpUrl(URL url, BlobUrlParts parts) { parts.setHost(url.getAuthority()); String path = url.getPath(); if (path.charAt(0) == '/') { path = path.substring(1); } String[] pathPieces = path.split("/", 3); parts.setAccountName(pathPieces[0]); if (pathPieces.length >= 3) { parts.setContainerName(pathPieces[1]); parts.setBlobName(pathPieces[2]); } else if (pathPieces.length == 2) { parts.setContainerName(pathPieces[1]); } parts.isIpUrl = true; } /* * Parse the non-IP url into its host, account name, container name, and blob name. */ private static void parseNonIpUrl(URL url, BlobUrlParts parts) { String host = url.getHost(); parts.setHost(host); if (!ImplUtils.isNullOrEmpty(host)) { int accountNameIndex = host.indexOf('.'); if (accountNameIndex == -1) { parts.setAccountName(host); } else { parts.setAccountName(host.substring(0, accountNameIndex)); } } String path = url.getPath(); if (!ImplUtils.isNullOrEmpty(path)) { if (path.charAt(0) == '/') { path = path.substring(1); } int containerEndIndex = path.indexOf('/'); if (containerEndIndex == -1) { parts.setContainerName(path); } else { parts.setContainerName(path.substring(0, containerEndIndex)); parts.setBlobName(path.substring(containerEndIndex + 1)); } } parts.isIpUrl = false; } /** * Parses a query string into a one to many hashmap. * * @param queryParams The string of query params to parse. * @return A {@code HashMap<String, String[]>} of the key values. */ private static TreeMap<String, String[]> parseQueryString(String queryParams) { final TreeMap<String, String[]> retVals = new TreeMap<>(Comparator.naturalOrder()); if (ImplUtils.isNullOrEmpty(queryParams)) { return retVals; } final String[] valuePairs = queryParams.split("&"); for (String valuePair : valuePairs) { final int equalDex = valuePair.indexOf("="); String key = Utility.urlDecode(valuePair.substring(0, equalDex)).toLowerCase(Locale.ROOT); String value = Utility.urlDecode(valuePair.substring(equalDex + 1)); String[] keyValues = retVals.get(key); if (keyValues == null) { keyValues = new String[]{value}; } else { final String[] newValues = new String[keyValues.length + 1]; System.arraycopy(keyValues, 0, newValues, 0, keyValues.length); newValues[newValues.length - 1] = value; keyValues = newValues; } retVals.put(key, keyValues); } return retVals; } }
Updated
public static BlobUrlParts parse(URL url) { BlobUrlParts parts = new BlobUrlParts().setScheme(url.getProtocol()); if (ModelHelper.IP_V4_URL_PATTERN.matcher(url.getHost()).find()) { parseIpUrl(url, parts); } else { parseNonIpUrl(url, parts); } Map<String, String[]> queryParamsMap = parseQueryString(url.getQuery()); String[] snapshotArray = queryParamsMap.remove("snapshot"); if (snapshotArray != null) { parts.setSnapshot(snapshotArray[0]); } BlobServiceSasQueryParameters blobServiceSasQueryParameters = new BlobServiceSasQueryParameters(queryParamsMap, true); return parts.setSasQueryParameters(blobServiceSasQueryParameters) .setUnparsedParameters(queryParamsMap); }
if (ModelHelper.IP_V4_URL_PATTERN.matcher(url.getHost()).find()) {
public static BlobUrlParts parse(URL url) { BlobUrlParts parts = new BlobUrlParts().setScheme(url.getProtocol()); if (ModelHelper.IP_V4_URL_PATTERN.matcher(url.getHost()).find()) { parseIpUrl(url, parts); } else { parseNonIpUrl(url, parts); } Map<String, String[]> queryParamsMap = parseQueryString(url.getQuery()); String[] snapshotArray = queryParamsMap.remove("snapshot"); if (snapshotArray != null) { parts.setSnapshot(snapshotArray[0]); } BlobServiceSasQueryParameters blobServiceSasQueryParameters = new BlobServiceSasQueryParameters(queryParamsMap, true); return parts.setSasQueryParameters(blobServiceSasQueryParameters) .setUnparsedParameters(queryParamsMap); }
class BlobUrlParts { private final ClientLogger logger = new ClientLogger(BlobUrlParts.class); private String scheme; private String host; private String containerName; private String blobName; private String snapshot; private String accountName; private BlobServiceSasQueryParameters blobServiceSasQueryParameters; private Map<String, String[]> unparsedParameters; /** * Initializes a BlobUrlParts object which helps aid in the construction of a Blob Storage URL. */ public BlobUrlParts() { unparsedParameters = new HashMap<>(); } /** * Gets the accountname, ex. "myaccountname". * * @return the account name. */ public String getAccountName() { return accountName; } /** * Sets the account name. * * @param accountName The account name. * @return the updated BlobURLParts object. */ public BlobUrlParts setAccountName(String accountName) { this.accountName = accountName; return this; } /** * Gets the URL scheme, ex. "https: * * @return the URL scheme. */ public String getScheme() { return scheme; } /** * Sets the URL scheme, ex. "https: * * @param scheme The URL scheme. * @return the updated BlobUrlParts object. */ public BlobUrlParts setScheme(String scheme) { this.scheme = scheme; return this; } /** * Gets the URL host, ex. "account.blob.core.windows.net". * * @return the URL host. */ public String getHost() { return host; } /** * Sets the URL host, ex. "account.blob.core.windows.net". * * @param host The URL host. * @return the updated BlobUrlParts object. */ public BlobUrlParts setHost(String host) { this.host = host; return this; } /** * Gets the container name that will be used as part of the URL path. * * @return the container name. */ public String getBlobContainerName() { return containerName; } /** * Sets the container name that will be used as part of the URL path. * * @param containerName The container nme. * @return the updated BlobUrlParts object. */ public BlobUrlParts setContainerName(String containerName) { this.containerName = containerName; return this; } /** * Decodes and gets the blob name that will be used as part of the URL path. * * @return the decoded blob name. */ public String getBlobName() { return (blobName == null) ? null : Utility.urlDecode(blobName); } /** * Sets the blob name that will be used as part of the URL path. * * @param blobName The blob name. * @return the updated BlobUrlParts object. */ public BlobUrlParts setBlobName(String blobName) { this.blobName = Utility.urlEncode(Utility.urlDecode(blobName)); return this; } /** * Gets the snapshot identifier that will be used as part of the query string if set. * * @return the snapshot identifier. */ public String getSnapshot() { return snapshot; } /** * Sets the snapshot identifier that will be used as part of the query string if set. * * @param snapshot The snapshot identifier. * @return the updated BlobUrlParts object. */ public BlobUrlParts setSnapshot(String snapshot) { this.snapshot = snapshot; return this; } /** * Gets the {@link BlobServiceSasQueryParameters} representing the SAS query parameters that will be used to * generate the SAS token for this URL. * * @return the {@link BlobServiceSasQueryParameters} of the URL */ public BlobServiceSasQueryParameters getSasQueryParameters() { return blobServiceSasQueryParameters; } /** * Sets the {@link BlobServiceSasQueryParameters} representing the SAS query parameters that will be used to * generate the SAS token for this URL. * * @param blobServiceSasQueryParameters The SAS query parameters. * @return the updated BlobUrlParts object. */ public BlobUrlParts setSasQueryParameters(BlobServiceSasQueryParameters blobServiceSasQueryParameters) { this.blobServiceSasQueryParameters = blobServiceSasQueryParameters; return this; } /** * Gets the query string parameters that aren't part of the SAS token that will be used by this URL. * * @return the non-SAS token query string values. */ public Map<String, String[]> getUnparsedParameters() { return unparsedParameters; } /** * Sets the query string parameters that aren't part of the SAS token that will be used by this URL. * * @param unparsedParameters The non-SAS token query string values. * @return the updated BlobUrlParts object. */ public BlobUrlParts setUnparsedParameters(Map<String, String[]> unparsedParameters) { this.unparsedParameters = unparsedParameters; return this; } /** * Converts the blob URL parts to a {@link URL}. * * @return A {@code URL} to the blob resource composed of all the elements in this object. * @throws IllegalStateException The fields present on the BlobUrlParts object were insufficient to construct a * valid URL or were ill-formatted. */ public URL toUrl() { UrlBuilder url = new UrlBuilder().setScheme(this.scheme).setHost(this.host); StringBuilder path = new StringBuilder(); if ((this.containerName == null || this.containerName.isEmpty()) && this.blobName != null) { this.containerName = BlobContainerAsyncClient.ROOT_CONTAINER_NAME; } if (this.containerName != null) { path.append(this.containerName); if (this.blobName != null) { path.append('/'); path.append(this.blobName); } } url.setPath(path.toString()); if (this.snapshot != null) { url.setQueryParameter(Constants.UrlConstants.SNAPSHOT_QUERY_PARAMETER, this.snapshot); } if (this.blobServiceSasQueryParameters != null) { String encodedSAS = this.blobServiceSasQueryParameters.encode(); if (encodedSAS.length() != 0) { url.setQuery(encodedSAS); } } for (Map.Entry<String, String[]> entry : this.unparsedParameters.entrySet()) { url.setQueryParameter(entry.getKey(), Utility.urlEncode(String.join(",", entry.getValue()))); } try { return url.toURL(); } catch (MalformedURLException ex) { throw logger.logExceptionAsError(new IllegalStateException("The URL parts created a malformed URL.", ex)); } } /** * Parses a string into a BlobUrlParts. * * <p>Query parameters will be parsed into two properties, {@link BlobServiceSasQueryParameters} which contains * all SAS token related values and {@link * parameters.</p> * * <p>If a URL points to a blob in the root container, and the root container is referenced implicitly, i.e. there * is no path element for the container, the name of this blob in the root container will be set as the * containerName field in the resulting {@code BlobURLParts}.</p> * * @param url The {@code URL} to be parsed. * @return A {@link BlobUrlParts} object containing all the components of a BlobURL. * @throws IllegalArgumentException If {@code url} is a malformed {@link URL}. */ public static BlobUrlParts parse(String url) { try { return parse(new URL(url)); } catch (MalformedURLException e) { throw new IllegalArgumentException("Invalid URL format. URL: " + url); } } /** * Parses an existing URL into a BlobUrlParts. * * <p>Query parameters will be parsed into two properties, {@link BlobServiceSasQueryParameters} which contains * all SAS token related values and {@link * parameters.</p> * * <p>If a URL points to a blob in the root container, and the root container is referenced implicitly, i.e. there * is no path element for the container, the name of this blob in the root container will be set as the * containerName field in the resulting {@code BlobURLParts}.</p> * * @param url The {@code URL} to be parsed. * @return A {@link BlobUrlParts} object containing all the components of a BlobURL. */ /* * Parse the IP url into its host, account name, container name, and blob name. */ private static void parseIpUrl(URL url, BlobUrlParts parts) { parts.setHost(url.getAuthority()); String path = url.getPath(); if (path.charAt(0) == '/') { path = path.substring(1); } String[] pathPieces = path.split("/", 3); parts.setAccountName(pathPieces[0]); if (pathPieces.length >= 3) { parts.setContainerName(pathPieces[1]); parts.setBlobName(pathPieces[2]); } else if (pathPieces.length == 2) { parts.setContainerName(pathPieces[1]); } } /* * Parse the non-IP url into its host, account name, container name, and blob name. */ private static void parseNonIpUrl(URL url, BlobUrlParts parts) { String host = url.getHost(); parts.setHost(host); if (!ImplUtils.isNullOrEmpty(host)) { int accountNameIndex = host.indexOf('.'); if (accountNameIndex == -1) { parts.setAccountName(host); } else { parts.setAccountName(host.substring(0, accountNameIndex)); } } String path = url.getPath(); if (!ImplUtils.isNullOrEmpty(path)) { if (path.charAt(0) == '/') { path = path.substring(1); } int containerEndIndex = path.indexOf('/'); if (containerEndIndex == -1) { parts.setContainerName(path); } else { parts.setContainerName(path.substring(0, containerEndIndex)); parts.setBlobName(path.substring(containerEndIndex + 1)); } } } /** * Parses a query string into a one to many hashmap. * * @param queryParams The string of query params to parse. * @return A {@code HashMap<String, String[]>} of the key values. */ private static TreeMap<String, String[]> parseQueryString(String queryParams) { final TreeMap<String, String[]> retVals = new TreeMap<>(Comparator.naturalOrder()); if (ImplUtils.isNullOrEmpty(queryParams)) { return retVals; } final String[] valuePairs = queryParams.split("&"); for (String valuePair : valuePairs) { final int equalDex = valuePair.indexOf("="); String key = Utility.urlDecode(valuePair.substring(0, equalDex)).toLowerCase(Locale.ROOT); String value = Utility.urlDecode(valuePair.substring(equalDex + 1)); String[] keyValues = retVals.get(key); if (keyValues == null) { keyValues = new String[]{value}; } else { final String[] newValues = new String[keyValues.length + 1]; System.arraycopy(keyValues, 0, newValues, 0, keyValues.length); newValues[newValues.length - 1] = value; keyValues = newValues; } retVals.put(key, keyValues); } return retVals; } }
class BlobUrlParts { private final ClientLogger logger = new ClientLogger(BlobUrlParts.class); private String scheme; private String host; private String containerName; private String blobName; private String snapshot; private String accountName; private boolean isIpUrl; private BlobServiceSasQueryParameters blobServiceSasQueryParameters; private Map<String, String[]> unparsedParameters; /** * Initializes a BlobUrlParts object which helps aid in the construction of a Blob Storage URL. */ public BlobUrlParts() { unparsedParameters = new HashMap<>(); } /** * Gets the accountname, ex. "myaccountname". * * @return the account name. */ public String getAccountName() { return accountName; } /** * Sets the account name. * * @param accountName The account name. * @return the updated BlobURLParts object. */ public BlobUrlParts setAccountName(String accountName) { this.accountName = accountName; return this; } /** * Gets the URL scheme, ex. "https: * * @return the URL scheme. */ public String getScheme() { return scheme; } /** * Sets the URL scheme, ex. "https: * * @param scheme The URL scheme. * @return the updated BlobUrlParts object. */ public BlobUrlParts setScheme(String scheme) { this.scheme = scheme; return this; } /** * Gets the URL host, ex. "account.blob.core.windows.net" or "127.0.0.1:10000". * * @return the URL host. */ public String getHost() { return host; } /** * Sets the URL host, ex. "account.blob.core.windows.net" or "127.0.0.1:10000". * * @param host The URL host. * @return the updated BlobUrlParts object. */ public BlobUrlParts setHost(String host) { this.host = host; this.isIpUrl = ModelHelper.IP_V4_URL_PATTERN.matcher(host).find(); return this; } /** * Gets the container name that will be used as part of the URL path. * * @return the container name. */ public String getBlobContainerName() { return containerName; } /** * Sets the container name that will be used as part of the URL path. * * @param containerName The container nme. * @return the updated BlobUrlParts object. */ public BlobUrlParts setContainerName(String containerName) { this.containerName = containerName; return this; } /** * Decodes and gets the blob name that will be used as part of the URL path. * * @return the decoded blob name. */ public String getBlobName() { return (blobName == null) ? null : Utility.urlDecode(blobName); } /** * Sets the blob name that will be used as part of the URL path. * * @param blobName The blob name. * @return the updated BlobUrlParts object. */ public BlobUrlParts setBlobName(String blobName) { this.blobName = Utility.urlEncode(Utility.urlDecode(blobName)); return this; } /** * Gets the snapshot identifier that will be used as part of the query string if set. * * @return the snapshot identifier. */ public String getSnapshot() { return snapshot; } /** * Sets the snapshot identifier that will be used as part of the query string if set. * * @param snapshot The snapshot identifier. * @return the updated BlobUrlParts object. */ public BlobUrlParts setSnapshot(String snapshot) { this.snapshot = snapshot; return this; } /** * Gets the {@link BlobServiceSasQueryParameters} representing the SAS query parameters that will be used to * generate the SAS token for this URL. * * @return the {@link BlobServiceSasQueryParameters} of the URL */ public BlobServiceSasQueryParameters getSasQueryParameters() { return blobServiceSasQueryParameters; } /** * Sets the {@link BlobServiceSasQueryParameters} representing the SAS query parameters that will be used to * generate the SAS token for this URL. * * @param blobServiceSasQueryParameters The SAS query parameters. * @return the updated BlobUrlParts object. */ public BlobUrlParts setSasQueryParameters(BlobServiceSasQueryParameters blobServiceSasQueryParameters) { this.blobServiceSasQueryParameters = blobServiceSasQueryParameters; return this; } /** * Gets the query string parameters that aren't part of the SAS token that will be used by this URL. * * @return the non-SAS token query string values. */ public Map<String, String[]> getUnparsedParameters() { return unparsedParameters; } /** * Sets the query string parameters that aren't part of the SAS token that will be used by this URL. * * @param unparsedParameters The non-SAS token query string values. * @return the updated BlobUrlParts object. */ public BlobUrlParts setUnparsedParameters(Map<String, String[]> unparsedParameters) { this.unparsedParameters = unparsedParameters; return this; } /** * Converts the blob URL parts to a {@link URL}. * * @return A {@code URL} to the blob resource composed of all the elements in this object. * @throws IllegalStateException The fields present on the BlobUrlParts object were insufficient to construct a * valid URL or were ill-formatted. */ public URL toUrl() { UrlBuilder url = new UrlBuilder().setScheme(this.scheme).setHost(this.host); StringBuilder path = new StringBuilder(); if (ImplUtils.isNullOrEmpty(this.containerName) && this.blobName != null) { this.containerName = BlobContainerAsyncClient.ROOT_CONTAINER_NAME; } if (this.isIpUrl) { path.append(this.accountName); } if (this.containerName != null) { path.append("/").append(this.containerName); if (this.blobName != null) { path.append("/").append(this.blobName); } } url.setPath(path.toString()); if (this.snapshot != null) { url.setQueryParameter(Constants.UrlConstants.SNAPSHOT_QUERY_PARAMETER, this.snapshot); } if (this.blobServiceSasQueryParameters != null) { String encodedSAS = this.blobServiceSasQueryParameters.encode(); if (encodedSAS.length() != 0) { url.setQuery(encodedSAS); } } for (Map.Entry<String, String[]> entry : this.unparsedParameters.entrySet()) { url.setQueryParameter(entry.getKey(), Utility.urlEncode(String.join(",", entry.getValue()))); } try { return url.toURL(); } catch (MalformedURLException ex) { throw logger.logExceptionAsError(new IllegalStateException("The URL parts created a malformed URL.", ex)); } } /** * Parses a string into a BlobUrlParts. * * <p>Query parameters will be parsed into two properties, {@link BlobServiceSasQueryParameters} which contains * all SAS token related values and {@link * parameters.</p> * * <p>If a URL points to a blob in the root container, and the root container is referenced implicitly, i.e. there * is no path element for the container, the name of this blob in the root container will be set as the * containerName field in the resulting {@code BlobURLParts}.</p> * * @param url The {@code URL} to be parsed. * @return A {@link BlobUrlParts} object containing all the components of a BlobURL. * @throws IllegalArgumentException If {@code url} is a malformed {@link URL}. */ public static BlobUrlParts parse(String url) { try { return parse(new URL(url)); } catch (MalformedURLException e) { throw new IllegalArgumentException("Invalid URL format. URL: " + url); } } /** * Parses an existing URL into a BlobUrlParts. * * <p>Query parameters will be parsed into two properties, {@link BlobServiceSasQueryParameters} which contains * all SAS token related values and {@link * parameters.</p> * * <p>If a URL points to a blob in the root container, and the root container is referenced implicitly, i.e. there * is no path element for the container, the name of this blob in the root container will be set as the * containerName field in the resulting {@code BlobURLParts}.</p> * * @param url The {@code URL} to be parsed. * @return A {@link BlobUrlParts} object containing all the components of a BlobURL. */ /* * Parse the IP url into its host, account name, container name, and blob name. */ private static void parseIpUrl(URL url, BlobUrlParts parts) { parts.setHost(url.getAuthority()); String path = url.getPath(); if (path.charAt(0) == '/') { path = path.substring(1); } String[] pathPieces = path.split("/", 3); parts.setAccountName(pathPieces[0]); if (pathPieces.length >= 3) { parts.setContainerName(pathPieces[1]); parts.setBlobName(pathPieces[2]); } else if (pathPieces.length == 2) { parts.setContainerName(pathPieces[1]); } parts.isIpUrl = true; } /* * Parse the non-IP url into its host, account name, container name, and blob name. */ private static void parseNonIpUrl(URL url, BlobUrlParts parts) { String host = url.getHost(); parts.setHost(host); if (!ImplUtils.isNullOrEmpty(host)) { int accountNameIndex = host.indexOf('.'); if (accountNameIndex == -1) { parts.setAccountName(host); } else { parts.setAccountName(host.substring(0, accountNameIndex)); } } String path = url.getPath(); if (!ImplUtils.isNullOrEmpty(path)) { if (path.charAt(0) == '/') { path = path.substring(1); } int containerEndIndex = path.indexOf('/'); if (containerEndIndex == -1) { parts.setContainerName(path); } else { parts.setContainerName(path.substring(0, containerEndIndex)); parts.setBlobName(path.substring(containerEndIndex + 1)); } } parts.isIpUrl = false; } /** * Parses a query string into a one to many hashmap. * * @param queryParams The string of query params to parse. * @return A {@code HashMap<String, String[]>} of the key values. */ private static TreeMap<String, String[]> parseQueryString(String queryParams) { final TreeMap<String, String[]> retVals = new TreeMap<>(Comparator.naturalOrder()); if (ImplUtils.isNullOrEmpty(queryParams)) { return retVals; } final String[] valuePairs = queryParams.split("&"); for (String valuePair : valuePairs) { final int equalDex = valuePair.indexOf("="); String key = Utility.urlDecode(valuePair.substring(0, equalDex)).toLowerCase(Locale.ROOT); String value = Utility.urlDecode(valuePair.substring(equalDex + 1)); String[] keyValues = retVals.get(key); if (keyValues == null) { keyValues = new String[]{value}; } else { final String[] newValues = new String[keyValues.length + 1]; System.arraycopy(keyValues, 0, newValues, 0, keyValues.length); newValues[newValues.length - 1] = value; keyValues = newValues; } retVals.put(key, keyValues); } return retVals; } }
Collections.singletonList already returns a list. Why are you wrapping it in another ArrayList?
static DocumentResultCollection<DetectLanguageResult> getExpectedBatchDetectedLanguages() { DetectedLanguage detectedLanguage1 = new DetectedLanguage().setName("English").setIso6391Name("en") .setScore(1.0); DetectedLanguage detectedLanguage2 = new DetectedLanguage().setName("Spanish").setIso6391Name("es") .setScore(1.0); DetectedLanguage detectedLanguage3 = new DetectedLanguage().setName("(Unknown)").setIso6391Name("(Unknown)") .setScore(0.0); List<DetectedLanguage> detectedLanguageList1 = new ArrayList<>(Collections.singletonList(detectedLanguage1)); List<DetectedLanguage> detectedLanguageList2 = new ArrayList<>(Collections.singletonList(detectedLanguage2)); List<DetectedLanguage> detectedLanguageList3 = new ArrayList<>(Collections.singletonList(detectedLanguage3)); TextDocumentStatistics textDocumentStatistics1 = new TextDocumentStatistics().setCharacterCount(26).setTransactionCount(1); TextDocumentStatistics textDocumentStatistics2 = new TextDocumentStatistics().setCharacterCount(39).setTransactionCount(1); TextDocumentStatistics textDocumentStatistics3 = new TextDocumentStatistics().setCharacterCount(6).setTransactionCount(1); DetectLanguageResult detectLanguageResult1 = new DetectLanguageResult("0", textDocumentStatistics1, detectedLanguage1, detectedLanguageList1); DetectLanguageResult detectLanguageResult2 = new DetectLanguageResult("1", textDocumentStatistics2, detectedLanguage2, detectedLanguageList2); DetectLanguageResult detectLanguageResult3 = new DetectLanguageResult("2", textDocumentStatistics3, detectedLanguage3, detectedLanguageList3); TextBatchStatistics textBatchStatistics = new TextBatchStatistics().setDocumentCount(3).setErroneousDocumentCount(0).setTransactionCount(3).setValidDocumentCount(3); List<DetectLanguageResult> detectLanguageResultList = new ArrayList<>(Arrays.asList(detectLanguageResult1, detectLanguageResult2, detectLanguageResult3)); return new DocumentResultCollection<>(detectLanguageResultList, "2019-10-01", textBatchStatistics); }
List<DetectedLanguage> detectedLanguageList1 = new ArrayList<>(Collections.singletonList(detectedLanguage1));
static DocumentResultCollection<DetectLanguageResult> getExpectedBatchDetectedLanguages() { DetectedLanguage detectedLanguage1 = new DetectedLanguage("English", "en", 1.0); DetectedLanguage detectedLanguage2 = new DetectedLanguage("Spanish", "es", 1.0); DetectedLanguage detectedLanguage3 = new DetectedLanguage("(Unknown)", "(Unknown)", 0.0); List<DetectedLanguage> detectedLanguageList1 = Collections.singletonList(detectedLanguage1); List<DetectedLanguage> detectedLanguageList2 = Collections.singletonList(detectedLanguage2); List<DetectedLanguage> detectedLanguageList3 = Collections.singletonList(detectedLanguage3); TextDocumentStatistics textDocumentStatistics1 = new TextDocumentStatistics(26, 1); TextDocumentStatistics textDocumentStatistics2 = new TextDocumentStatistics(39, 1); TextDocumentStatistics textDocumentStatistics3 = new TextDocumentStatistics(6, 1); DetectLanguageResult detectLanguageResult1 = new DetectLanguageResult("0", textDocumentStatistics1, null, detectedLanguage1, detectedLanguageList1); DetectLanguageResult detectLanguageResult2 = new DetectLanguageResult("1", textDocumentStatistics2, null, detectedLanguage2, detectedLanguageList2); DetectLanguageResult detectLanguageResult3 = new DetectLanguageResult("2", textDocumentStatistics3, null, detectedLanguage3, detectedLanguageList3); TextDocumentBatchStatistics textDocumentBatchStatistics = new TextDocumentBatchStatistics(3, 0, 3, 3); List<DetectLanguageResult> detectLanguageResultList = Arrays.asList(detectLanguageResult1, detectLanguageResult2, detectLanguageResult3); return new DocumentResultCollection<>(detectLanguageResultList, MODEL_VERSION, textDocumentBatchStatistics); }
class TextAnalyticsClientTestBase extends TestBase { private static final String TEXT_ANALYTICS_PROPERTIES = "azure-textanalytics.properties"; private static final String NAME = "name"; private static final String VERSION = "version"; private static final String DEFAULT_SCOPE = "https: final Map<String, String> properties = CoreUtils.getProperties(TEXT_ANALYTICS_PROPERTIES); private final String clientName = properties.getOrDefault(NAME, "UnknownName"); private final String clientVersion = properties.getOrDefault(VERSION, "UnknownVersion"); private boolean showStatistics = false; private HttpLogOptions httpLogOptions = new HttpLogOptions(); <T> T clientSetup(Function<HttpPipeline, T> clientBuilder) { TokenCredential credential = null; if (!interceptorManager.isPlaybackMode()) { credential = new DefaultAzureCredentialBuilder().build(); } HttpClient httpClient; Configuration buildConfiguration = Configuration.getGlobalConfiguration().clone(); final List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new UserAgentPolicy(httpLogOptions.getApplicationId(), clientName, clientVersion, buildConfiguration)); policies.add(new RequestIdPolicy()); policies.add(new AddDatePolicy()); HttpPolicyProviders.addBeforeRetryPolicies(policies); if (credential != null) { policies.add(new BearerTokenAuthenticationPolicy(credential, DEFAULT_SCOPE)); } policies.add(new RetryPolicy()); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))); if (interceptorManager.isPlaybackMode()) { httpClient = interceptorManager.getPlaybackClient(); } else { httpClient = new NettyAsyncHttpClientBuilder().wiretap(true).build(); } policies.add(interceptorManager.getRecordPolicy()); HttpPipeline pipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); T client; client = clientBuilder.apply(pipeline); return Objects.requireNonNull(client); } @Test public abstract void detectSingleTextLanguage(); @Test public abstract void detectLanguageEmptyText(); @Test public abstract void detectLanguageFaultyText(); @Test public abstract void detectLanguagesBatchInput(); @Test public abstract void detectLanguagesBatchInputShowStatistics(); @Test public abstract void detectLanguagesBatchStringInput(); @Test public abstract void detectLanguagesBatchListCountryHint(); void detectLanguageShowStatisticsRunner(BiConsumer<List<DetectLanguageInput>, TextAnalyticsRequestOptions> testRunner) { final List<DetectLanguageInput> detectLanguageInputs = Arrays.asList( new DetectLanguageInput("0", "This is written in English", "US"), new DetectLanguageInput("1", "Este es un document escrito en Español."), new DetectLanguageInput("2", "~@!~:)", "US") ); testRunner.accept(detectLanguageInputs, setTextAnalyticsRequestOptions()); } void detectLanguageDuplicateIdRunner(BiConsumer<List<DetectLanguageInput>, TextAnalyticsRequestOptions> testRunner) { final List<DetectLanguageInput> detectLanguageInputs = Arrays.asList( new DetectLanguageInput("0", "This is written in English", "US"), new DetectLanguageInput("0", "Este es un document escrito en Español.") ); testRunner.accept(detectLanguageInputs, setTextAnalyticsRequestOptions()); } static void detectLanguagesCountryHintRunner(BiConsumer<List<String>, String> testRunner) { final List<String> inputs = new ArrayList<>(Arrays.asList( "This is written in English", "Este es un document escrito en Español.", "~@!~:)")); testRunner.accept(inputs, "US"); } static void detectLanguageStringInputRunner(Consumer<List<String>> testRunner) { final List<String> inputs = new ArrayList<>(Arrays.asList( "This is written in English", "Este es un document escrito en Español.", "~@!~:)")); testRunner.accept(inputs); } static void detectLanguageRunner(Consumer<List<DetectLanguageInput>> testRunner) { final List<DetectLanguageInput> detectLanguageInputs = Arrays.asList( new DetectLanguageInput("0", "This is written in English", "US"), new DetectLanguageInput("1", "Este es un document escrito en Español."), new DetectLanguageInput("2", "~@!~:)", "US") ); testRunner.accept(detectLanguageInputs); } @Test public abstract void recognizeEntitiesForSimpleInput(); @Test public abstract void recognizeEntitiesForEmptyText(); @Test public abstract void recognizeEntitiesForFaultyText(); @Test public abstract void recognizeEntitiesForBatchInput(); @Test public abstract void recognizeEntitiesForBatchInputShowStatistics(); void recognizeEntitiesShowStatisticsRunner(BiConsumer<List<TextDocumentInput>, TextAnalyticsRequestOptions> testRunner) { final List<TextDocumentInput> detectLanguageInputs = Arrays.asList( new TextDocumentInput("1", "Satya Nadella is the CEO of Microsoft", "en"), new TextDocumentInput("2", "Elon Musk is the CEO of SpaceX and Tesla.", "en"), new TextDocumentInput("2", "~@!~:)", "en") ); testRunner.accept(detectLanguageInputs, setTextAnalyticsRequestOptions()); } @Test public abstract void recognizeEntitiesForBatchStringInput(); @Test public abstract void recognizeEntitiesForBatchListCountryHint(); static DocumentResultCollection<NamedEntityResult> getExpectedBatchNamedEntityResult() { NamedEntity namedEntity1 = new NamedEntity() .setType("English").setText("Satya Nadella is the CEO of Microsoft").setSubtype("").setLength(1).setOffset(1).setScore(1.0); NamedEntity namedEntity2 = new NamedEntity() .setType("English").setText("").setSubtype("Elon Musk is the CEO of SpaceX and Tesla.").setLength(1).setOffset(1).setScore(1.0); NamedEntity namedEntity3 = new NamedEntity() .setType("English").setText("").setSubtype("").setLength(1).setOffset(1).setScore(1.0); List<NamedEntity> namedEntityList1 = new ArrayList<>(Collections.singletonList(namedEntity1)); List<NamedEntity> namedEntityList2 = new ArrayList<>(Collections.singletonList(namedEntity2)); List<NamedEntity> namedEntityList3 = new ArrayList<>(Collections.singletonList(namedEntity3)); TextDocumentStatistics textDocumentStatistics1 = new TextDocumentStatistics().setCharacterCount(26).setTransactionCount(1); TextDocumentStatistics textDocumentStatistics2 = new TextDocumentStatistics().setCharacterCount(39).setTransactionCount(1); TextDocumentStatistics textDocumentStatistics3 = new TextDocumentStatistics().setCharacterCount(6).setTransactionCount(1); NamedEntityResult namedEntityResult1 = new NamedEntityResult("0", textDocumentStatistics1, namedEntityList1); NamedEntityResult namedEntityResult2 = new NamedEntityResult("1", textDocumentStatistics2, namedEntityList2); NamedEntityResult namedEntityResult3 = new NamedEntityResult("2", textDocumentStatistics3, namedEntityList3); TextBatchStatistics textBatchStatistics = new TextBatchStatistics().setDocumentCount(3) .setErroneousDocumentCount(0).setTransactionCount(3).setValidDocumentCount(3); List<NamedEntityResult> detectLanguageResultList = new ArrayList<>( Arrays.asList(namedEntityResult1, namedEntityResult2, namedEntityResult3)); return new DocumentResultCollection<>(detectLanguageResultList, "2019-10-01", textBatchStatistics); } private TextAnalyticsRequestOptions setTextAnalyticsRequestOptions() { this.showStatistics = true; return new TextAnalyticsRequestOptions().setShowStatistics(true); } String getEndPoint() { return interceptorManager.isPlaybackMode() ? "http: : Configuration.getGlobalConfiguration().get("AZURE_TEXT_ANALYTICS_ENDPOINT"); } /** * Helper method to verify batch result. * * @param actualResult DocumentResultCollection<> returned by the API. * @param testApi the API to test. */ <T> void validateBatchResult(DocumentResultCollection<T> actualResult, DocumentResultCollection<T> expectedResult, String testApi) { assertEquals(expectedResult.getModelVersion(), actualResult.getModelVersion()); if (this.showStatistics) { validateBatchStatistics(expectedResult.getStatistics(), actualResult.getStatistics()); } validateDocuments(expectedResult, actualResult, testApi); } /** * Helper method to verify documents returned in a batch request. * * @param expectedResult the expected result collection.. * @param actualResult the actual result collection returned by the API. * @param testApi the API to test. */ private <T> void validateDocuments(DocumentResultCollection<T> expectedResult, DocumentResultCollection<T> actualResult, String testApi) { switch (testApi) { case "Language": final List<DetectLanguageResult> expectedResultList = expectedResult.stream() .filter(element -> element instanceof DetectLanguageResult) .map(element -> (DetectLanguageResult) element) .collect(Collectors.toList()); final List<DetectLanguageResult> actualResultList = actualResult.stream() .filter(element -> element instanceof DetectLanguageResult) .map(element -> (DetectLanguageResult) element) .collect(Collectors.toList()); assertEquals(expectedResultList.size(), actualResultList.size()); actualResultList.forEach(actualItem -> { Optional<DetectLanguageResult> optionalExpectedItem = expectedResultList.stream().filter( expectedEachItem -> actualItem.getId().equals(expectedEachItem.getId())).findFirst(); assertTrue(optionalExpectedItem.isPresent()); DetectLanguageResult expectedItem = optionalExpectedItem.get(); if (actualItem.getError() == null && this.showStatistics) { validatePrimaryLanguage(expectedItem.getPrimaryLanguage(), actualItem.getPrimaryLanguage()); validateDocumentStatistics(expectedItem.getStatistics(), actualItem.getStatistics()); validateDetectedLanguages(expectedItem.getDetectedLanguages(), actualItem.getDetectedLanguages()); } }); break; default: break; } } /** * Helper method to verify TextBatchStatistics. * * @param expectedStatistics * @param actualStatistics */ private static void validateBatchStatistics(TextBatchStatistics expectedStatistics, TextBatchStatistics actualStatistics) { assertEquals(expectedStatistics.getDocumentCount(), actualStatistics.getDocumentCount()); assertEquals(expectedStatistics.getErroneousDocumentCount(), actualStatistics.getErroneousDocumentCount()); assertEquals(expectedStatistics.getValidDocumentCount(), actualStatistics.getValidDocumentCount()); assertEquals(expectedStatistics.getTransactionCount(), actualStatistics.getTransactionCount()); } /** * Helper method to verify the error document. * * @param expectedError the Error returned from the service. * @param actualError the Error returned from the API. */ static void validateErrorDocument(Error expectedError, Error actualError) { assertEquals(expectedError.getCode(), actualError.getCode()); assertEquals(expectedError.getMessage(), actualError.getMessage()); assertEquals(expectedError.getTarget(), actualError.getTarget()); assertEquals(expectedError.getInnererror(), actualError.getInnererror()); } /** * Helper method to verify TextDocumentStatistics. * * @param expected the expected value for TextDocumentStatistics. * @param actual the value returned by API. */ private static void validateDocumentStatistics(TextDocumentStatistics expected, TextDocumentStatistics actual) { assertEquals(expected.getCharacterCount(), actual.getCharacterCount()); assertEquals(expected.getTransactionCount(), actual.getTransactionCount()); } /** * Helper method to validate a single detected language. * * @param expectedLanguage detectedLanguage returned by the service. * @param actualLanguage detectedLanguage returned by the API. */ static void validatePrimaryLanguage(DetectedLanguage expectedLanguage, DetectedLanguage actualLanguage) { assertEquals(expectedLanguage.getIso6391Name(), actualLanguage.getIso6391Name()); assertEquals(expectedLanguage.getName(), actualLanguage.getName()); assertEquals(expectedLanguage.getScore(), actualLanguage.getScore()); } /** * Helper method to validate the list of detected languages. * * @param expectedLanguageList detectedLanguages returned by the service. * @param actualLanguageList detectedLanguages returned by the API. */ static void validateDetectedLanguages(List<DetectedLanguage> expectedLanguageList, List<DetectedLanguage> actualLanguageList) { for (int i = 0; i < expectedLanguageList.size(); i++) { DetectedLanguage expectedDetectedLanguage = expectedLanguageList.get(i); DetectedLanguage actualDetectedLanguage = actualLanguageList.get(i); validatePrimaryLanguage(expectedDetectedLanguage, actualDetectedLanguage); } } /** * Helper method to verify that a command throws an IllegalArgumentException. * * @param exceptionThrower Command that should throw the exception */ static <T> void assertRunnableThrowsException(Runnable exceptionThrower, Class<T> exception) { try { exceptionThrower.run(); fail(); } catch (Exception ex) { assertEquals(exception, ex.getClass()); } } static void assertRestException(Throwable exception, Class<? extends HttpResponseException> expectedExceptionType, int expectedStatusCode) { assertEquals(expectedExceptionType, exception.getClass()); assertEquals(expectedStatusCode, ((HttpResponseException) exception).getResponse().getStatusCode()); } static void assertRestException(Runnable exceptionThrower, Class<? extends HttpResponseException> expectedExceptionType, int expectedStatusCode) { try { exceptionThrower.run(); fail(); } catch (Throwable ex) { assertRestException(ex, expectedExceptionType, expectedStatusCode); } } }
class TextAnalyticsClientTestBase extends TestBase { private static final String TEXT_ANALYTICS_PROPERTIES = "azure-ai-textanalytics.properties"; private static final String NAME = "name"; private static final String VERSION = "version"; private static final String DEFAULT_SCOPE = "https: private final Map<String, String> properties = CoreUtils.getProperties(TEXT_ANALYTICS_PROPERTIES); private final String clientName = properties.getOrDefault(NAME, "UnknownName"); private final String clientVersion = properties.getOrDefault(VERSION, "UnknownVersion"); private boolean showStatistics = false; private HttpLogOptions httpLogOptions = new HttpLogOptions(); private static final String MODEL_VERSION = "2019-10-01"; enum TestEndpoint { LANGUAGE, NAMED_ENTITY, LINKED_ENTITY, KEY_PHRASES, SENTIMENT } <T> T clientSetup(Function<HttpPipeline, T> clientBuilder) { TokenCredential credential = null; if (!interceptorManager.isPlaybackMode()) { credential = new DefaultAzureCredentialBuilder().build(); } HttpClient httpClient; Configuration buildConfiguration = Configuration.getGlobalConfiguration().clone(); final List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new UserAgentPolicy(httpLogOptions.getApplicationId(), clientName, clientVersion, buildConfiguration)); policies.add(new RequestIdPolicy()); policies.add(new AddDatePolicy()); HttpPolicyProviders.addBeforeRetryPolicies(policies); if (credential != null) { policies.add(new BearerTokenAuthenticationPolicy(credential, DEFAULT_SCOPE)); } policies.add(new RetryPolicy()); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))); if (interceptorManager.isPlaybackMode()) { httpClient = interceptorManager.getPlaybackClient(); } else { httpClient = new NettyAsyncHttpClientBuilder().wiretap(true).build(); } policies.add(interceptorManager.getRecordPolicy()); HttpPipeline pipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); T client; client = clientBuilder.apply(pipeline); return Objects.requireNonNull(client); } @Test abstract void detectSingleTextLanguage(); @Test abstract void detectLanguageEmptyText(); @Test abstract void detectLanguageFaultyText(); @Test abstract void detectLanguagesBatchInput(); @Test abstract void detectLanguagesBatchInputShowStatistics(); @Test abstract void detectLanguagesBatchStringInput(); @Test abstract void detectLanguagesBatchListCountryHint(); @Test abstract void recognizeEntitiesForTextInput(); @Test abstract void recognizeEntitiesForEmptyText(); @Test abstract void recognizeEntitiesForFaultyText(); @Test abstract void recognizeEntitiesForBatchInput(); @Test abstract void recognizeEntitiesForBatchInputShowStatistics(); @Test abstract void recognizeEntitiesForBatchStringInput(); @Test abstract void recognizeEntitiesForListLanguageHint(); @Test abstract void recognizePiiEntitiesForTextInput(); @Test abstract void recognizePiiEntitiesForEmptyText(); @Test abstract void recognizePiiEntitiesForFaultyText(); @Test abstract void recognizePiiEntitiesForBatchInput(); @Test abstract void recognizePiiEntitiesForBatchInputShowStatistics(); @Test abstract void recognizePiiEntitiesForBatchStringInput(); @Test abstract void recognizePiiEntitiesForListLanguageHint(); @Test abstract void recognizeLinkedEntitiesForTextInput(); @Test abstract void recognizeLinkedEntitiesForEmptyText(); @Test abstract void recognizeLinkedEntitiesForFaultyText(); @Test abstract void recognizeLinkedEntitiesForBatchInput(); @Test abstract void recognizeLinkedEntitiesForBatchInputShowStatistics(); @Test abstract void recognizeLinkedEntitiesForBatchStringInput(); @Test abstract void recognizeLinkedEntitiesForListLanguageHint(); @Test abstract void extractKeyPhrasesForTextInput(); @Test abstract void extractKeyPhrasesForEmptyText(); @Test abstract void extractKeyPhrasesForFaultyText(); @Test abstract void extractKeyPhrasesForBatchInput(); @Test abstract void extractKeyPhrasesForBatchInputShowStatistics(); @Test abstract void extractKeyPhrasesForBatchStringInput(); @Test abstract void extractKeyPhrasesForListLanguageHint(); @Test abstract void analyseSentimentForTextInput(); @Test abstract void analyseSentimentForEmptyText(); @Test abstract void analyseSentimentForFaultyText(); @Test abstract void analyseSentimentForBatchInput(); @Test abstract void analyseSentimentForBatchInputShowStatistics(); @Test abstract void analyseSentimentForBatchStringInput(); @Test abstract void analyseSentimentForListLanguageHint(); void detectLanguageShowStatisticsRunner(BiConsumer<List<DetectLanguageInput>, TextAnalyticsRequestOptions> testRunner) { final List<DetectLanguageInput> detectLanguageInputs = Arrays.asList( new DetectLanguageInput("0", "This is written in English", "US"), new DetectLanguageInput("1", "Este es un document escrito en Español."), new DetectLanguageInput("2", "~@!~:)", "US") ); TextAnalyticsRequestOptions options = new TextAnalyticsRequestOptions().setShowStatistics(true); showStatistics = true; testRunner.accept(detectLanguageInputs, options); } void detectLanguageDuplicateIdRunner(BiConsumer<List<DetectLanguageInput>, TextAnalyticsRequestOptions> testRunner) { final List<DetectLanguageInput> detectLanguageInputs = Arrays.asList( new DetectLanguageInput("0", "This is written in English", "US"), new DetectLanguageInput("0", "Este es un document escrito en Español.") ); testRunner.accept(detectLanguageInputs, null); } static void detectLanguagesCountryHintRunner(BiConsumer<List<String>, String> testRunner) { final List<String> inputs = Arrays.asList( "This is written in English", "Este es un document escrito en Español.", "~@!~:)"); testRunner.accept(inputs, "en"); } static void detectLanguageStringInputRunner(Consumer<List<String>> testRunner) { final List<String> inputs = Arrays.asList( "This is written in English", "Este es un document escrito en Español.", "~@!~:)"); testRunner.accept(inputs); } static void detectLanguageRunner(Consumer<List<DetectLanguageInput>> testRunner) { final List<DetectLanguageInput> detectLanguageInputs = Arrays.asList( new DetectLanguageInput("0", "This is written in English", "US"), new DetectLanguageInput("1", "Este es un document escrito en Español."), new DetectLanguageInput("2", "~@!~:)", "US") ); testRunner.accept(detectLanguageInputs); } static void recognizeNamedEntityStringInputRunner(Consumer<List<String>> testRunner) { final List<String> inputs = Arrays.asList( "I had a wonderful trip to Seattle last week.", "I work at Microsoft."); testRunner.accept(inputs); } static void recognizeNamedEntitiesLanguageHintRunner(BiConsumer<List<String>, String> testRunner) { final List<String> inputs = Arrays.asList( "I had a wonderful trip to Seattle last week.", "I work at Microsoft."); testRunner.accept(inputs, "en"); } static void recognizeBatchNamedEntityRunner(Consumer<List<TextDocumentInput>> testRunner) { final List<TextDocumentInput> textDocumentInputs = Arrays.asList( new TextDocumentInput("0", "I had a wonderful trip to Seattle last week."), new TextDocumentInput("1", "I work at Microsoft.")); testRunner.accept(textDocumentInputs); } void recognizeBatchNamedEntitiesShowStatsRunner( BiConsumer<List<TextDocumentInput>, TextAnalyticsRequestOptions> testRunner) { final List<TextDocumentInput> textDocumentInputs = Arrays.asList( new TextDocumentInput("0", "I had a wonderful trip to Seattle last week."), new TextDocumentInput("1", "I work at Microsoft.")); TextAnalyticsRequestOptions options = new TextAnalyticsRequestOptions().setShowStatistics(true); showStatistics = true; testRunner.accept(textDocumentInputs, options); } static void recognizePiiLanguageHintRunner(BiConsumer<List<String>, String> testRunner) { final List<String> inputs = Arrays.asList( "Microsoft employee with ssn 859-98-0987 is using our awesome API's.", "Your ABA number - 111000025 - is the first 9 digits in the lower left hand corner of your personal check."); testRunner.accept(inputs, "en"); } static void recognizePiiStringInputRunner(Consumer<List<String>> testRunner) { final List<String> inputs = Arrays.asList( "Microsoft employee with ssn 859-98-0987 is using our awesome API's.", "Your ABA number - 111000025 - is the first 9 digits in the lower left hand corner of your personal check."); testRunner.accept(inputs); } static void recognizeBatchPiiRunner(Consumer<List<TextDocumentInput>> testRunner) { final List<TextDocumentInput> textDocumentInputs = Arrays.asList( new TextDocumentInput("0", "Microsoft employee with ssn 859-98-0987 is using our awesome API's."), new TextDocumentInput("1", "Your ABA number - 111000025 - is the first 9 digits in the lower left hand corner of your personal check.")); testRunner.accept(textDocumentInputs); } void recognizeBatchPiiEntitiesShowStatsRunner( BiConsumer<List<TextDocumentInput>, TextAnalyticsRequestOptions> testRunner) { final List<TextDocumentInput> textDocumentInputs = Arrays.asList( new TextDocumentInput("0", "Microsoft employee with ssn 859-98-0987 is using our awesome API's."), new TextDocumentInput("1", "Your ABA number - 111000025 - is the first 9 digits in the lower left hand corner of your personal check.")); TextAnalyticsRequestOptions options = new TextAnalyticsRequestOptions().setShowStatistics(true); showStatistics = true; testRunner.accept(textDocumentInputs, options); } void recognizeBatchLinkedEntitiesShowStatsRunner( BiConsumer<List<TextDocumentInput>, TextAnalyticsRequestOptions> testRunner) { final List<TextDocumentInput> textDocumentInputs = Arrays.asList( new TextDocumentInput("0", "I had a wonderful trip to Seattle last week."), new TextDocumentInput("1", "I work at Microsoft.")); TextAnalyticsRequestOptions options = new TextAnalyticsRequestOptions().setShowStatistics(true); showStatistics = true; testRunner.accept(textDocumentInputs, options); } static void recognizeLinkedLanguageHintRunner(BiConsumer<List<String>, String> testRunner) { final List<String> inputs = Arrays.asList( "I had a wonderful trip to Seattle last week.", "I work at Microsoft."); testRunner.accept(inputs, "en"); } static void recognizeLinkedStringInputRunner(Consumer<List<String>> testRunner) { final List<String> inputs = Arrays.asList( "I had a wonderful trip to Seattle last week.", "I work at Microsoft."); testRunner.accept(inputs); } static void recognizeBatchLinkedEntityRunner(Consumer<List<TextDocumentInput>> testRunner) { final List<TextDocumentInput> textDocumentInputs = Arrays.asList( new TextDocumentInput("0", "I had a wonderful trip to Seattle last week."), new TextDocumentInput("1", "I work at Microsoft.")); testRunner.accept(textDocumentInputs); } void extractBatchKeyPhrasesShowStatsRunner( BiConsumer<List<TextDocumentInput>, TextAnalyticsRequestOptions> testRunner) { final List<TextDocumentInput> textDocumentInputs = Arrays.asList( new TextDocumentInput("0", "Hello world. This is some input text that I love."), new TextDocumentInput("1", "Bonjour tout le monde", "fr")); TextAnalyticsRequestOptions options = new TextAnalyticsRequestOptions().setShowStatistics(true); showStatistics = true; testRunner.accept(textDocumentInputs, options); } static void extractKeyPhrasesLanguageHintRunner(BiConsumer<List<String>, String> testRunner) { final List<String> inputs = Arrays.asList( "Hello world. This is some input text that I love.", "Bonjour tout le monde"); testRunner.accept(inputs, "en"); } static void extractKeyPhrasesStringInputRunner(Consumer<List<String>> testRunner) { final List<String> inputs = Arrays.asList( "Hello world. This is some input text that I love.", "Bonjour tout le monde"); testRunner.accept(inputs); } static void extractBatchKeyPhrasesRunner(Consumer<List<TextDocumentInput>> testRunner) { final List<TextDocumentInput> textDocumentInputs = Arrays.asList( new TextDocumentInput("0", "Hello world. This is some input text that I love."), new TextDocumentInput("1", "Bonjour tout le monde")); testRunner.accept(textDocumentInputs); } static void analyseSentimentLanguageHintRunner(BiConsumer<List<String>, String> testRunner) { testRunner.accept(getSentimentInput(), "en"); } static void analyseSentimentStringInputRunner(Consumer<List<String>> testRunner) { testRunner.accept(getSentimentInput()); } static void analyseBatchSentimentRunner(Consumer<List<TextDocumentInput>> testRunner) { final List<String> sentimentInputs = getSentimentInput(); testRunner.accept(Arrays.asList( new TextDocumentInput("0", sentimentInputs.get(0)), new TextDocumentInput("1", sentimentInputs.get(1)) )); } void analyseBatchSentimentShowStatsRunner( BiConsumer<List<TextDocumentInput>, TextAnalyticsRequestOptions> testRunner) { final List<TextDocumentInput> textDocumentInputs = Arrays.asList( new TextDocumentInput("0", "The hotel was dark and unclean. The restaurant had amazing gnocchi."), new TextDocumentInput("1", "The restaurant had amazing gnocchi. The hotel was dark and unclean.") ); TextAnalyticsRequestOptions options = new TextAnalyticsRequestOptions().setShowStatistics(true); testRunner.accept(textDocumentInputs, options); } static List<String> getSentimentInput() { return Arrays.asList("The hotel was dark and unclean. The restaurant had amazing gnocchi.", "The restaurant had amazing gnocchi. The hotel was dark and unclean."); } String getEndpoint() { return interceptorManager.isPlaybackMode() ? "http: : Configuration.getGlobalConfiguration().get("AZURE_TEXT_ANALYTICS_ENDPOINT"); } /** * Helper method to verify batch result. * * @param actualResult DocumentResultCollection<> returned by the API. * @param testApi the API to test. */ <T> void validateBatchResult(DocumentResultCollection<T> actualResult, DocumentResultCollection<T> expectedResult, TestEndpoint testApi) { assertEquals(expectedResult.getModelVersion(), actualResult.getModelVersion()); if (this.showStatistics) { validateBatchStatistics(expectedResult.getStatistics(), actualResult.getStatistics()); } validateDocuments(expectedResult, actualResult, testApi); } /** * Helper method to verify documents returned in a batch request. * * @param expectedResult the expected result collection.. * @param actualResult the actual result collection returned by the API. * @param testApi the API to test. */ private <T> void validateDocuments(DocumentResultCollection<T> expectedResult, DocumentResultCollection<T> actualResult, TestEndpoint testApi) { switch (testApi) { case LANGUAGE: final List<DetectLanguageResult> detectLanguageResults = expectedResult.stream() .filter(element -> element instanceof DetectLanguageResult) .map(element -> (DetectLanguageResult) element) .collect(Collectors.toList()); final List<DetectLanguageResult> actualDetectLanguageResults = actualResult.stream() .filter(element -> element instanceof DetectLanguageResult) .map(element -> (DetectLanguageResult) element) .collect(Collectors.toList()); assertEquals(detectLanguageResults.size(), actualDetectLanguageResults.size()); actualDetectLanguageResults.forEach(actualItem -> { List<DetectLanguageResult> expectedItems = detectLanguageResults.stream().filter( expectedEachItem -> actualItem.getId().equals(expectedEachItem.getId())) .collect(Collectors.toList()); assertEquals(expectedItems.size(), 1); DetectLanguageResult expectedItem = expectedItems.get(0); if (actualItem.getError() == null) { if (this.showStatistics) { validateDocumentStatistics(expectedItem.getStatistics(), actualItem.getStatistics()); } validatePrimaryLanguage(expectedItem.getPrimaryLanguage(), actualItem.getPrimaryLanguage()); validateDetectedLanguages(expectedItem.getDetectedLanguages(), actualItem.getDetectedLanguages()); } }); break; case NAMED_ENTITY: final List<RecognizeEntitiesResult> recognizeEntitiesResults = expectedResult.stream() .filter(element -> element instanceof RecognizeEntitiesResult) .map(element -> (RecognizeEntitiesResult) element) .collect(Collectors.toList()); final List<RecognizeEntitiesResult> actualRecognizeEntitiesResults = actualResult.stream() .filter(element -> element instanceof RecognizeEntitiesResult) .map(element -> (RecognizeEntitiesResult) element) .collect(Collectors.toList()); assertEquals(recognizeEntitiesResults.size(), actualRecognizeEntitiesResults.size()); actualRecognizeEntitiesResults.forEach(actualItem -> { List<RecognizeEntitiesResult> expectedItems = recognizeEntitiesResults.stream().filter( expectedEachItem -> actualItem.getId().equals(expectedEachItem.getId())).collect( Collectors.toList() ); assertEquals(expectedItems.size(), 1); RecognizeEntitiesResult expectedItem = expectedItems.get(0); if (actualItem.getError() == null) { if (this.showStatistics) { validateDocumentStatistics(expectedItem.getStatistics(), actualItem.getStatistics()); } validateNamedEntities(expectedItem.getNamedEntities(), actualItem.getNamedEntities()); } }); break; case LINKED_ENTITY: final List<RecognizeLinkedEntitiesResult> recognizeLinkedEntitiesResults = expectedResult.stream() .filter(element -> element instanceof RecognizeLinkedEntitiesResult) .map(element -> (RecognizeLinkedEntitiesResult) element) .collect(Collectors.toList()); final List<RecognizeLinkedEntitiesResult> actualRecognizeLinkedEntitiesResults = actualResult.stream() .filter(element -> element instanceof RecognizeLinkedEntitiesResult) .map(element -> (RecognizeLinkedEntitiesResult) element) .collect(Collectors.toList()); assertEquals(recognizeLinkedEntitiesResults.size(), actualRecognizeLinkedEntitiesResults.size()); actualRecognizeLinkedEntitiesResults.forEach(actualItem -> { List<RecognizeLinkedEntitiesResult> expectedItems = recognizeLinkedEntitiesResults.stream().filter( expectedEachItem -> actualItem.getId().equals(expectedEachItem.getId())) .collect(Collectors.toList()); assertEquals(expectedItems.size(), 1); RecognizeLinkedEntitiesResult expectedItem = expectedItems.get(0); if (actualItem.getError() == null) { if (this.showStatistics) { validateDocumentStatistics(expectedItem.getStatistics(), actualItem.getStatistics()); } validateLinkedEntities(expectedItem.getLinkedEntities(), actualItem.getLinkedEntities()); } }); break; case KEY_PHRASES: final List<ExtractKeyPhraseResult> extractKeyPhraseResults = expectedResult.stream() .filter(element -> element instanceof ExtractKeyPhraseResult) .map(element -> (ExtractKeyPhraseResult) element) .collect(Collectors.toList()); final List<ExtractKeyPhraseResult> actualExtractKeyPhraseResults = actualResult.stream() .filter(element -> element instanceof ExtractKeyPhraseResult) .map(element -> (ExtractKeyPhraseResult) element) .collect(Collectors.toList()); assertEquals(extractKeyPhraseResults.size(), actualExtractKeyPhraseResults.size()); actualExtractKeyPhraseResults.forEach(actualItem -> { List<ExtractKeyPhraseResult> expectedItems = extractKeyPhraseResults.stream().filter( expectedEachItem -> actualItem.getId().equals(expectedEachItem.getId())) .collect(Collectors.toList()); assertEquals(expectedItems.size(), 1); ExtractKeyPhraseResult expectedItem = expectedItems.get(0); if (actualItem.getError() == null) { if (this.showStatistics) { validateDocumentStatistics(expectedItem.getStatistics(), actualItem.getStatistics()); } validateKeyPhrases(expectedItem.getKeyPhrases(), actualItem.getKeyPhrases()); } }); break; case SENTIMENT: final List<AnalyzeSentimentResult> expectedSentimentResults = expectedResult.stream() .filter(element -> element instanceof AnalyzeSentimentResult) .map(element -> (AnalyzeSentimentResult) element) .collect(Collectors.toList()); final List<AnalyzeSentimentResult> actualSentimentResults = actualResult.stream() .filter(element -> element instanceof AnalyzeSentimentResult) .map(element -> (AnalyzeSentimentResult) element) .collect(Collectors.toList()); expectedSentimentResults.sort(Comparator.comparing(AnalyzeSentimentResult::getId)); actualSentimentResults.sort(Comparator.comparing(AnalyzeSentimentResult::getId)); final int actualSize = actualSentimentResults.size(); final int expectedSize = expectedSentimentResults.size(); assertEquals(expectedSize, actualSize); for (int i = 0; i < actualSize; i++) { final AnalyzeSentimentResult actualSentimentResult = actualSentimentResults.get(i); final AnalyzeSentimentResult expectedSentimentResult = expectedSentimentResults.get(i); if (actualSentimentResult.getError() == null) { if (this.showStatistics) { validateDocumentStatistics(expectedSentimentResult.getStatistics(), actualSentimentResult.getStatistics()); } validateAnalysedSentiment(expectedSentimentResult.getDocumentSentiment(), actualSentimentResult.getDocumentSentiment()); validateAnalysedSentenceSentiment(expectedSentimentResult.getSentenceSentiments(), actualSentimentResult.getSentenceSentiments()); } else { validateErrorDocument(actualSentimentResult.getError(), actualSentimentResult.getError()); } } break; default: throw new IllegalArgumentException(String.format("Unsupported testApi : '%s'.", testApi)); } } /** * Helper method to verify TextBatchStatistics. * * @param expectedStatistics the expected value for TextBatchStatistics. * @param actualStatistics the value returned by API. */ private static void validateBatchStatistics(TextDocumentBatchStatistics expectedStatistics, TextDocumentBatchStatistics actualStatistics) { assertEquals(expectedStatistics.getDocumentCount(), actualStatistics.getDocumentCount()); assertEquals(expectedStatistics.getErroneousDocumentCount(), actualStatistics.getErroneousDocumentCount()); assertEquals(expectedStatistics.getValidDocumentCount(), actualStatistics.getValidDocumentCount()); assertEquals(expectedStatistics.getTransactionCount(), actualStatistics.getTransactionCount()); } /** * Helper method to verify TextDocumentStatistics. * * @param expected the expected value for TextDocumentStatistics. * @param actual the value returned by API. */ private static void validateDocumentStatistics(TextDocumentStatistics expected, TextDocumentStatistics actual) { assertEquals(expected.getCharacterCount(), actual.getCharacterCount()); assertEquals(expected.getTransactionCount(), actual.getTransactionCount()); } /** * Helper method to verify LinkedEntityMatches. * * @param expectedLinkedEntityMatches the expected value for LinkedEntityMatches. * @param actualLinkedEntityMatches the value returned by API. */ private static void validateLinkedEntityMatches(List<LinkedEntityMatch> expectedLinkedEntityMatches, List<LinkedEntityMatch> actualLinkedEntityMatches) { assertEquals(expectedLinkedEntityMatches.size(), actualLinkedEntityMatches.size()); expectedLinkedEntityMatches.sort(Comparator.comparing(LinkedEntityMatch::getText)); actualLinkedEntityMatches.sort(Comparator.comparing(LinkedEntityMatch::getText)); for (int i = 0; i < expectedLinkedEntityMatches.size(); i++) { LinkedEntityMatch expectedLinkedEntity = expectedLinkedEntityMatches.get(i); LinkedEntityMatch actualLinkedEntity = actualLinkedEntityMatches.get(i); assertEquals(expectedLinkedEntity.getLength(), actualLinkedEntity.getLength()); assertEquals(expectedLinkedEntity.getOffset(), actualLinkedEntity.getOffset()); assertEquals(expectedLinkedEntity.getScore(), actualLinkedEntity.getScore()); assertEquals(expectedLinkedEntity.getText(), actualLinkedEntity.getText()); } } /** * Helper method to verify the error document. * @param expectedError the Error returned from the service. * @param actualError the Error returned from the API. */ static void validateErrorDocument(TextAnalyticsError expectedError, TextAnalyticsError actualError) { assertEquals(expectedError.getCode(), actualError.getCode()); assertEquals(expectedError.getMessage(), actualError.getMessage()); assertEquals(expectedError.getTarget(), actualError.getTarget()); } /** * Helper method to validate a single detected language. * * @param expectedLanguage detectedLanguage returned by the service. * @param actualLanguage detectedLanguage returned by the API. */ static void validatePrimaryLanguage(DetectedLanguage expectedLanguage, DetectedLanguage actualLanguage) { assertEquals(expectedLanguage.getIso6391Name(), actualLanguage.getIso6391Name()); assertEquals(expectedLanguage.getName(), actualLanguage.getName()); assertEquals(expectedLanguage.getScore(), actualLanguage.getScore()); } /** * Helper method to validate the list of detected languages. * * @param expectedLanguageList detectedLanguages returned by the service. * @param actualLanguageList detectedLanguages returned by the API. */ static void validateDetectedLanguages(List<DetectedLanguage> expectedLanguageList, List<DetectedLanguage> actualLanguageList) { assertEquals(expectedLanguageList.size(), actualLanguageList.size()); expectedLanguageList.sort(Comparator.comparing(DetectedLanguage::getName)); actualLanguageList.sort(Comparator.comparing(DetectedLanguage::getName)); for (int i = 0; i < expectedLanguageList.size(); i++) { DetectedLanguage expectedDetectedLanguage = expectedLanguageList.get(i); DetectedLanguage actualDetectedLanguage = actualLanguageList.get(i); validatePrimaryLanguage(expectedDetectedLanguage, actualDetectedLanguage); } } /** * Helper method to validate a single named entity. * * @param expectedNamedEntity namedEntity returned by the service. * @param actualNamedEntity namedEntity returned by the API. */ static void validateNamedEntity(NamedEntity expectedNamedEntity, NamedEntity actualNamedEntity) { assertEquals(expectedNamedEntity.getLength(), actualNamedEntity.getLength()); assertEquals(expectedNamedEntity.getOffset(), actualNamedEntity.getOffset()); assertEquals(expectedNamedEntity.getScore(), actualNamedEntity.getScore()); assertEquals(expectedNamedEntity.getSubtype(), actualNamedEntity.getSubtype()); assertEquals(expectedNamedEntity.getText(), actualNamedEntity.getText()); assertEquals(expectedNamedEntity.getType(), actualNamedEntity.getType()); } /** * Helper method to validate a single named entity. * * @param expectedLinkedEntity namedEntity returned by the service. * @param actualLinkedEntity namedEntity returned by the API. */ static void validateLinkedEntity(LinkedEntity expectedLinkedEntity, LinkedEntity actualLinkedEntity) { assertEquals(expectedLinkedEntity.getName(), actualLinkedEntity.getName()); assertEquals(expectedLinkedEntity.getDataSource(), actualLinkedEntity.getDataSource()); assertEquals(expectedLinkedEntity.getLanguage(), actualLinkedEntity.getLanguage()); assertEquals(expectedLinkedEntity.getUrl(), actualLinkedEntity.getUrl()); assertEquals(expectedLinkedEntity.getId(), actualLinkedEntity.getId()); validateLinkedEntityMatches(expectedLinkedEntity.getLinkedEntityMatches(), actualLinkedEntity.getLinkedEntityMatches()); } /** * Helper method to validate a single key phrase. * * @param expectedKeyPhrases key phrases returned by the service. * @param actualKeyPhrases key phrases returned by the API. */ void validateKeyPhrases(List<String> expectedKeyPhrases, List<String> actualKeyPhrases) { assertEquals(expectedKeyPhrases.size(), actualKeyPhrases.size()); Collections.sort(expectedKeyPhrases); Collections.sort(actualKeyPhrases); for (int i = 0; i < expectedKeyPhrases.size(); i++) { assertTrue(expectedKeyPhrases.get(i).equals(actualKeyPhrases.get(i))); } } /** * Helper method to validate the list of named entities. * * @param expectedNamedEntityList namedEntities returned by the service. * @param actualNamedEntityList namedEntities returned by the API. */ static void validateNamedEntities(List<NamedEntity> expectedNamedEntityList, List<NamedEntity> actualNamedEntityList) { assertEquals(expectedNamedEntityList.size(), actualNamedEntityList.size()); expectedNamedEntityList.sort(Comparator.comparing(NamedEntity::getText)); actualNamedEntityList.sort(Comparator.comparing(NamedEntity::getText)); for (int i = 0; i < expectedNamedEntityList.size(); i++) { NamedEntity expectedNamedEntity = expectedNamedEntityList.get(i); NamedEntity actualNamedEntity = actualNamedEntityList.get(i); validateNamedEntity(expectedNamedEntity, actualNamedEntity); } } /** * Helper method to validate the list of named entities. * * @param expectedLinkedEntityList namedEntities returned by the service. * @param actualLinkedEntityList namedEntities returned by the API. */ static void validateLinkedEntities(List<LinkedEntity> expectedLinkedEntityList, List<LinkedEntity> actualLinkedEntityList) { assertEquals(expectedLinkedEntityList.size(), actualLinkedEntityList.size()); expectedLinkedEntityList.sort(Comparator.comparing(LinkedEntity::getName)); actualLinkedEntityList.sort(Comparator.comparing(LinkedEntity::getName)); for (int i = 0; i < expectedLinkedEntityList.size(); i++) { LinkedEntity expectedLinkedEntity = expectedLinkedEntityList.get(i); LinkedEntity actualLinkedEntity = actualLinkedEntityList.get(i); validateLinkedEntity(expectedLinkedEntity, actualLinkedEntity); } } /** * Helper method to validate the list of sentence sentiment. Can't really validate score numbers because it * frequently changed by background model computation. * * @param expectedSentimentList a list of analyzed sentence sentiment returned by the service. * @param actualSentimentList a list of analyzed sentence sentiment returned by the API. */ static void validateAnalysedSentenceSentiment(List<TextSentiment> expectedSentimentList, List<TextSentiment> actualSentimentList) { assertEquals(expectedSentimentList.size(), actualSentimentList.size()); for (int i = 0; i < expectedSentimentList.size(); i++) { validateAnalysedSentiment(expectedSentimentList.get(i), actualSentimentList.get(i)); } } /** * Helper method to validate one pair of analysed sentiments. Can't really validate score numbers because it * frequently changed by background model computation. * * @param expectedSentiment analyzed document sentiment returned by the service. * @param actualSentiment analyzed document sentiment returned by the API. */ static void validateAnalysedSentiment(TextSentiment expectedSentiment, TextSentiment actualSentiment) { assertEquals(expectedSentiment.getLength(), actualSentiment.getLength()); assertEquals(expectedSentiment.getOffset(), actualSentiment.getOffset()); assertEquals(expectedSentiment.getTextSentimentClass(), actualSentiment.getTextSentimentClass()); assertEquals(expectedSentiment.getNegativeScore() > 0, actualSentiment.getNegativeScore() > 0); assertEquals(expectedSentiment.getNeutralScore() > 0, actualSentiment.getNeutralScore() > 0); assertEquals(expectedSentiment.getPositiveScore() > 0, actualSentiment.getPositiveScore() > 0); } static void assertRestException(Throwable exception, Class<? extends HttpResponseException> expectedExceptionType, int expectedStatusCode) { assertEquals(expectedExceptionType, exception.getClass()); assertEquals(expectedStatusCode, ((HttpResponseException) exception).getResponse().getStatusCode()); } static void assertRestException(Runnable exceptionThrower, Class<? extends HttpResponseException> expectedExceptionType, int expectedStatusCode) { try { exceptionThrower.run(); fail(); } catch (Throwable ex) { assertRestException(ex, expectedExceptionType, expectedStatusCode); } } /** * Helper method to get the expected Batch Detected Languages */ static DocumentResultCollection<RecognizeEntitiesResult> getExpectedBatchNamedEntities() { NamedEntity namedEntity1 = new NamedEntity("Seattle", "Location", null, 26, 7, 0.80624294281005859); NamedEntity namedEntity2 = new NamedEntity("last week", "DateTime", "DateRange", 34, 9, 0.8); NamedEntity namedEntity3 = new NamedEntity("Microsoft", "Organization", null, 10, 9, 0.99983596801757812); List<NamedEntity> namedEntityList1 = Arrays.asList(namedEntity1, namedEntity2); List<NamedEntity> namedEntityList2 = Collections.singletonList(namedEntity3); TextDocumentStatistics textDocumentStatistics1 = new TextDocumentStatistics(44, 1); TextDocumentStatistics textDocumentStatistics2 = new TextDocumentStatistics(20, 1); RecognizeEntitiesResult recognizeEntitiesResult1 = new RecognizeEntitiesResult("0", textDocumentStatistics1, null, namedEntityList1); RecognizeEntitiesResult recognizeEntitiesResult2 = new RecognizeEntitiesResult("1", textDocumentStatistics2, null, namedEntityList2); TextDocumentBatchStatistics textDocumentBatchStatistics = new TextDocumentBatchStatistics(2, 0, 2, 2); List<RecognizeEntitiesResult> recognizeEntitiesResultList = Arrays.asList(recognizeEntitiesResult1, recognizeEntitiesResult2); return new DocumentResultCollection<>(recognizeEntitiesResultList, MODEL_VERSION, textDocumentBatchStatistics); } static DocumentResultCollection<RecognizePiiEntitiesResult> getExpectedBatchPiiEntities() { NamedEntity namedEntity1 = new NamedEntity("859-98-0987", "U.S. Social Security Number (SSN)", "", 28, 11, 0.65); NamedEntity namedEntity2 = new NamedEntity("111000025", "ABA Routing Number", "", 18, 9, 0.75); List<NamedEntity> namedEntityList1 = Collections.singletonList(namedEntity1); List<NamedEntity> namedEntityList2 = Collections.singletonList(namedEntity2); TextDocumentStatistics textDocumentStatistics1 = new TextDocumentStatistics(67, 1); TextDocumentStatistics textDocumentStatistics2 = new TextDocumentStatistics(105, 1); RecognizePiiEntitiesResult recognizeEntitiesResult1 = new RecognizePiiEntitiesResult("0", textDocumentStatistics1, null, namedEntityList1); RecognizePiiEntitiesResult recognizeEntitiesResult2 = new RecognizePiiEntitiesResult("1", textDocumentStatistics2, null, namedEntityList2); TextDocumentBatchStatistics textDocumentBatchStatistics = new TextDocumentBatchStatistics(2, 0, 2, 2); List<RecognizePiiEntitiesResult> recognizeEntitiesResultList = Arrays.asList(recognizeEntitiesResult1, recognizeEntitiesResult2); return new DocumentResultCollection<>(recognizeEntitiesResultList, MODEL_VERSION, textDocumentBatchStatistics); } static DocumentResultCollection<RecognizeLinkedEntitiesResult> getExpectedBatchLinkedEntities() { LinkedEntityMatch linkedEntityMatch1 = new LinkedEntityMatch("Seattle", 0.11472424095537814, 7, 26); LinkedEntityMatch linkedEntityMatch2 = new LinkedEntityMatch("Microsoft", 0.18693659716732069, 9, 10); LinkedEntity linkedEntity1 = new LinkedEntity( "Seattle", Collections.singletonList(linkedEntityMatch1), "en", "Seattle", "https: "Wikipedia"); LinkedEntity linkedEntity2 = new LinkedEntity( "Microsoft", Collections.singletonList(linkedEntityMatch2), "en", "Microsoft", "https: "Wikipedia"); List<LinkedEntity> linkedEntityList1 = Collections.singletonList(linkedEntity1); List<LinkedEntity> linkedEntityList2 = Collections.singletonList(linkedEntity2); TextDocumentStatistics textDocumentStatistics1 = new TextDocumentStatistics(44, 1); TextDocumentStatistics textDocumentStatistics2 = new TextDocumentStatistics(20, 1); RecognizeLinkedEntitiesResult recognizeLinkedEntitiesResult1 = new RecognizeLinkedEntitiesResult("0", textDocumentStatistics1, null, linkedEntityList1); RecognizeLinkedEntitiesResult recognizeLinkedEntitiesResult2 = new RecognizeLinkedEntitiesResult("1", textDocumentStatistics2, null, linkedEntityList2); TextDocumentBatchStatistics textDocumentBatchStatistics = new TextDocumentBatchStatistics(2, 0, 2, 2); List<RecognizeLinkedEntitiesResult> recognizeLinkedEntitiesResultList = Arrays.asList(recognizeLinkedEntitiesResult1, recognizeLinkedEntitiesResult2); return new DocumentResultCollection<>(recognizeLinkedEntitiesResultList, MODEL_VERSION, textDocumentBatchStatistics); } static DocumentResultCollection<ExtractKeyPhraseResult> getExpectedBatchKeyPhrases() { List<String> keyPhrasesList1 = Arrays.asList("input text", "world"); List<String> keyPhrasesList2 = Arrays.asList("monde"); TextDocumentStatistics textDocumentStatistics1 = new TextDocumentStatistics(49, 1); TextDocumentStatistics textDocumentStatistics2 = new TextDocumentStatistics(21, 1); ExtractKeyPhraseResult extractKeyPhraseResult1 = new ExtractKeyPhraseResult("0", textDocumentStatistics1, null, keyPhrasesList1); ExtractKeyPhraseResult extractKeyPhraseResult2 = new ExtractKeyPhraseResult("1", textDocumentStatistics2, null, keyPhrasesList2); TextDocumentBatchStatistics textDocumentBatchStatistics = new TextDocumentBatchStatistics(2, 0, 2, 2); List<ExtractKeyPhraseResult> extractKeyPhraseResultList = Arrays.asList(extractKeyPhraseResult1, extractKeyPhraseResult2); return new DocumentResultCollection<>(extractKeyPhraseResultList, MODEL_VERSION, textDocumentBatchStatistics); } static DocumentResultCollection<AnalyzeSentimentResult> getExpectedBatchTextSentiment() { final TextDocumentStatistics textDocumentStatistics1 = new TextDocumentStatistics(67, 1); final TextDocumentStatistics textDocumentStatistics2 = new TextDocumentStatistics(67, 1); final TextSentiment expectedDocumentSentiment = new TextSentiment(TextSentimentClass.MIXED, 0.1, 0.5, 0.4, 66, 0); final AnalyzeSentimentResult analyzeSentimentResult1 = new AnalyzeSentimentResult("0", textDocumentStatistics1, null, expectedDocumentSentiment, Arrays.asList( new TextSentiment(TextSentimentClass.NEGATIVE, 0.99, 0.005, 0.005, 31, 0), new TextSentiment(TextSentimentClass.POSITIVE, 0.005, 0.005, 0.99, 35, 32) )); final AnalyzeSentimentResult analyzeSentimentResult2 = new AnalyzeSentimentResult("1", textDocumentStatistics2, null, expectedDocumentSentiment, Arrays.asList( new TextSentiment(TextSentimentClass.POSITIVE, 0.005, 0.005, 0.99, 35, 0), new TextSentiment(TextSentimentClass.NEGATIVE, 0.99, 0.005, 0.005, 31, 36) )); return new DocumentResultCollection<>(Arrays.asList(analyzeSentimentResult1, analyzeSentimentResult2), MODEL_VERSION, new TextDocumentBatchStatistics(2, 0, 2, 2)); } }
Since you're doing a switch-case on this arbitrary string "Language" down in validateDocuments, you should make this either a constant string that is shared by all your test cases, or an enum.
public void detectLanguagesBatchInputShowStatistics() { detectLanguageShowStatisticsRunner((inputs, options) -> { StepVerifier.create(client.detectBatchLanguagesWithResponse(inputs, options)) .assertNext(response -> validateBatchResult(response.getValue(), getExpectedBatchDetectedLanguages(), "Language")) .verifyComplete(); }); }
.assertNext(response -> validateBatchResult(response.getValue(), getExpectedBatchDetectedLanguages(), "Language"))
public void detectLanguagesBatchInputShowStatistics() { detectLanguageShowStatisticsRunner((inputs, options) -> { StepVerifier.create(client.detectBatchLanguagesWithResponse(inputs, options)) .assertNext(response -> validateBatchResult(response.getValue(), getExpectedBatchDetectedLanguages(), TestEndpoint.LANGUAGE)) .verifyComplete(); }); }
class TextAnalyticsAsyncClientTest extends TextAnalyticsClientTestBase { private TextAnalyticsAsyncClient client; @Override protected void beforeTest() { client = clientSetup(httpPipeline -> new TextAnalyticsClientBuilder() .endpoint(getEndPoint()) .pipeline(httpPipeline) .buildAsyncClient()); } /** * Verify that we can get statistics on the collection result when given a batch input with options. */ @Test /** * Test Detect batch input languages. */ @Test public void detectLanguagesBatchInput() { detectLanguageRunner((inputs) -> { StepVerifier.create(client.detectBatchLanguages(inputs)) .assertNext(response -> validateBatchResult(response, getExpectedBatchDetectedLanguages(), "Language")) .verifyComplete(); }); } /** * Test Detect batch languages for List of String input with country Hint. */ @Test public void detectLanguagesBatchListCountryHint() { detectLanguagesCountryHintRunner((inputs, countryHint) -> { StepVerifier.create(client.detectLanguagesWithResponse(inputs, countryHint)) .assertNext(response -> validateBatchResult(response.getValue(), getExpectedBatchDetectedLanguages(), "Language")) .verifyComplete(); }); } /** * Test Detect batch languages for List of String input. */ @Test public void detectLanguagesBatchStringInput() { detectLanguageStringInputRunner((inputs) -> { StepVerifier.create(client.detectLanguages(inputs)) .assertNext(response -> validateBatchResult(response, getExpectedBatchDetectedLanguages(), "Language")) .verifyComplete(); }); } /** * Verifies that a single DetectLanguageResult is returned for a text input to detectLanguages. */ @Test public void detectSingleTextLanguage() { DetectedLanguage primaryLanguage = new DetectedLanguage().setName("English").setIso6391Name("en").setScore(1.0); List<DetectedLanguage> expectedLanguageList = new ArrayList<>(Arrays.asList(primaryLanguage)); StepVerifier.create(client.detectLanguage("This is a test English Text")) .assertNext(response -> validateDetectedLanguages(expectedLanguageList, response.getDetectedLanguages())) .verifyComplete(); } /** * Verifies that an error document is returned for a text input with invalid country hint. * <p> * TODO: update error Model. */ @Test public void detectLanguageInvalidCountryHint() { Error expectedError = new Error().setCode("InvalidArgument").setMessage("Invalid Country Hint."); StepVerifier.create(client.detectLanguageWithResponse("Este es un document escrito en Español.", "en")) .assertNext(response -> validateErrorDocument(expectedError, response.getValue().getError())) .verifyComplete(); } /** * Verifies that an error document is returned for a empty text input. */ @Test public void detectLanguageEmptyText() { Error expectedError = new Error().setCode("InvalidArgument").setMessage("Invalid document in request."); StepVerifier.create(client.detectLanguage("")) .assertNext(response -> validateErrorDocument(expectedError, response.getError())) .verifyComplete(); } /** * Verifies that detectLanguage returns an "UNKNOWN" result when faulty text is passed. */ @Test public void detectLanguageFaultyText() { StepVerifier.create(client.detectLanguage("!@ .assertNext(response -> assertEquals(response.getPrimaryLanguage().getIso6391Name(), "(Unknown)")) .verifyComplete(); } /** * Verifies that a Bad request exception is returned for input documents with same ids. */ @Test public void detectLanguageDuplicateIdInput() { detectLanguageDuplicateIdRunner((inputs, options) -> { StepVerifier.create(client.detectBatchLanguagesWithResponse(inputs, options, Context.NONE)) .verifyErrorSatisfies(ex -> assertRestException(ex, HttpResponseException.class, 400)); }); } @Test public void recognizeEntitiesForSimpleInput() { DetectedLanguage primaryLanguage = new DetectedLanguage().setName("English").setIso6391Name("en").setScore(1.0); } @Test public void recognizeEntitiesForEmptyText() { } @Test public void recognizeEntitiesForFaultyText() { } @Test public void recognizeEntitiesForBatchInput() { } @Test public void recognizeEntitiesForBatchInputShowStatistics() { } @Test public void recognizeEntitiesForBatchStringInput() { } @Test public void recognizeEntitiesForBatchListCountryHint() { } }
class TextAnalyticsAsyncClientTest extends TextAnalyticsClientTestBase { private TextAnalyticsAsyncClient client; @Override protected void beforeTest() { client = clientSetup(httpPipeline -> new TextAnalyticsClientBuilder() .endpoint(getEndpoint()) .pipeline(httpPipeline) .buildAsyncClient()); } /** * Verify that we can get statistics on the collection result when given a batch input with options. */ @Test /** * Test Detect batch input languages. */ @Test public void detectLanguagesBatchInput() { detectLanguageRunner((inputs) -> { StepVerifier.create(client.detectBatchLanguages(inputs)) .assertNext(response -> validateBatchResult(response, getExpectedBatchDetectedLanguages(), TestEndpoint.LANGUAGE)) .verifyComplete(); }); } /** * Test Detect batch languages for List of String input with country Hint. */ @Test public void detectLanguagesBatchListCountryHint() { detectLanguagesCountryHintRunner((inputs, countryHint) -> { StepVerifier.create(client.detectLanguagesWithResponse(inputs, countryHint)) .assertNext(response -> validateBatchResult(response.getValue(), getExpectedBatchDetectedLanguages(), TestEndpoint.LANGUAGE)) .verifyComplete(); }); } /** * Test Detect batch languages for List of String input. */ @Test public void detectLanguagesBatchStringInput() { detectLanguageStringInputRunner((inputs) -> { StepVerifier.create(client.detectLanguages(inputs)) .assertNext(response -> validateBatchResult(response, getExpectedBatchDetectedLanguages(), TestEndpoint.LANGUAGE)) .verifyComplete(); }); } /** * Verifies that a single DetectLanguageResult is returned for a text input to detectLanguages. */ @Test public void detectSingleTextLanguage() { DetectedLanguage primaryLanguage = new DetectedLanguage("English", "en", 1.0); List<DetectedLanguage> expectedLanguageList = Arrays.asList(primaryLanguage); StepVerifier.create(client.detectLanguage("This is a test English Text")) .assertNext(response -> validateDetectedLanguages(expectedLanguageList, response.getDetectedLanguages())) .verifyComplete(); } /** * Verifies that an error document is returned for a text input with invalid country hint. * <p> * TODO: update error Model. */ @Test public void detectLanguageInvalidCountryHint() { TextAnalyticsError expectedError = new TextAnalyticsError(ErrorCodeValue.INVALID_ARGUMENT, "Invalid Country Hint.", null, null); StepVerifier.create(client.detectLanguageWithResponse("Este es un document escrito en Español.", "en")) .assertNext(response -> validateErrorDocument(expectedError, response.getValue().getError())) .verifyComplete(); } /** * Verifies that an error document is returned for a empty text input. */ @Test public void detectLanguageEmptyText() { TextAnalyticsError expectedError = new TextAnalyticsError(ErrorCodeValue.INVALID_ARGUMENT, "Invalid document in request.", null, null); StepVerifier.create(client.detectLanguage("")) .assertNext(response -> validateErrorDocument(expectedError, response.getError())) .verifyComplete(); } /** * Verifies that detectLanguage returns an "UNKNOWN" result when faulty text is passed. */ @Test public void detectLanguageFaultyText() { StepVerifier.create(client.detectLanguage("!@ .assertNext(response -> assertEquals(response.getPrimaryLanguage().getIso6391Name(), "(Unknown)")) .verifyComplete(); } /** * Verifies that a Bad request exception is returned for input documents with same ids. */ @Test public void detectLanguageDuplicateIdInput() { detectLanguageDuplicateIdRunner((inputs, options) -> { StepVerifier.create(client.detectBatchLanguagesWithResponse(inputs, options, Context.NONE)) .verifyErrorSatisfies(ex -> assertRestException(ex, HttpResponseException.class, 400)); }); } @Test public void recognizeEntitiesForTextInput() { NamedEntity namedEntity1 = new NamedEntity("Seattle", "Location", null, 26, 7, 0.80624294281005859); NamedEntity namedEntity2 = new NamedEntity("last week", "DateTime", "DateRange", 34, 9, 0.8); RecognizeEntitiesResult recognizeEntitiesResultList = new RecognizeEntitiesResult("0", null, null, Arrays.asList(namedEntity1, namedEntity2)); StepVerifier.create(client.recognizeEntities("I had a wonderful trip to Seattle last week.")) .assertNext(response -> validateNamedEntities(recognizeEntitiesResultList.getNamedEntities(), response.getNamedEntities())) .verifyComplete(); } @Test public void recognizeEntitiesForEmptyText() { TextAnalyticsError expectedError = new TextAnalyticsError(ErrorCodeValue.INVALID_ARGUMENT, "Invalid document in request.", null, null); StepVerifier.create(client.recognizeEntities("")) .assertNext(response -> validateErrorDocument(expectedError, response.getError())) .verifyComplete(); } @Override public void recognizeEntitiesForFaultyText() { StepVerifier.create(client.recognizeEntities("!@ .assertNext(response -> assertEquals(response.getNamedEntities().size(), 0)) .verifyComplete(); } @Test public void recognizeEntitiesForBatchInput() { recognizeBatchNamedEntityRunner((inputs) -> { StepVerifier.create(client.recognizeBatchEntities(inputs)) .assertNext(response -> validateBatchResult(response, getExpectedBatchNamedEntities(), TestEndpoint.NAMED_ENTITY)) .verifyComplete(); }); } @Test public void recognizeEntitiesForBatchInputShowStatistics() { recognizeBatchNamedEntitiesShowStatsRunner((inputs, options) -> { StepVerifier.create(client.recognizeBatchEntitiesWithResponse(inputs, options)) .assertNext(response -> validateBatchResult(response.getValue(), getExpectedBatchNamedEntities(), TestEndpoint.NAMED_ENTITY)) .verifyComplete(); }); } @Test public void recognizeEntitiesForBatchStringInput() { recognizeNamedEntityStringInputRunner((inputs) -> { StepVerifier.create(client.recognizeEntities(inputs)) .assertNext(response -> validateBatchResult(response, getExpectedBatchNamedEntities(), TestEndpoint.NAMED_ENTITY)) .verifyComplete(); }); } @Test public void recognizeEntitiesForListLanguageHint() { recognizeNamedEntitiesLanguageHintRunner((inputs, language) -> { StepVerifier.create(client.recognizeEntitiesWithResponse(inputs, language)) .assertNext(response -> validateBatchResult(response.getValue(), getExpectedBatchNamedEntities(), TestEndpoint.NAMED_ENTITY)) .verifyComplete(); }); } @Test public void recognizeLinkedEntitiesForTextInput() { LinkedEntityMatch linkedEntityMatch1 = new LinkedEntityMatch("Seattle", 0.11472424095537814, 7, 26); LinkedEntity linkedEntity1 = new LinkedEntity("Seattle", Collections.singletonList(linkedEntityMatch1), "en", "Seattle", "https: RecognizeLinkedEntitiesResult recognizeLinkedEntitiesResultList = new RecognizeLinkedEntitiesResult("0", null, null, Collections.singletonList(linkedEntity1)); StepVerifier.create(client.recognizeLinkedEntities("I had a wonderful trip to Seattle last week.")) .assertNext(response -> validateLinkedEntities(recognizeLinkedEntitiesResultList.getLinkedEntities(), response.getLinkedEntities())) .verifyComplete(); } @Test public void recognizeLinkedEntitiesForEmptyText() { TextAnalyticsError expectedError = new TextAnalyticsError(ErrorCodeValue.INVALID_ARGUMENT, "Invalid document in request.", null, null); StepVerifier.create(client.recognizeLinkedEntities("")) .assertNext(response -> validateErrorDocument(expectedError, response.getError())) .verifyComplete(); } @Test public void recognizeLinkedEntitiesForFaultyText() { StepVerifier.create(client.recognizeLinkedEntities("!@ .assertNext(response -> assertEquals(response.getLinkedEntities().size(), 0)) .verifyComplete(); } @Test public void recognizeLinkedEntitiesForBatchInput() { recognizeBatchLinkedEntityRunner((inputs) -> { StepVerifier.create(client.recognizeBatchLinkedEntities(inputs)) .assertNext(response -> validateBatchResult(response, getExpectedBatchLinkedEntities(), TestEndpoint.LINKED_ENTITY)) .verifyComplete(); }); } @Test public void recognizeLinkedEntitiesForBatchInputShowStatistics() { recognizeBatchLinkedEntitiesShowStatsRunner((inputs, options) -> { StepVerifier.create(client.recognizeBatchLinkedEntitiesWithResponse(inputs, options)) .assertNext(response -> validateBatchResult(response.getValue(), getExpectedBatchLinkedEntities(), TestEndpoint.LINKED_ENTITY)) .verifyComplete(); }); } @Test public void recognizeLinkedEntitiesForBatchStringInput() { recognizeLinkedStringInputRunner((inputs) -> { StepVerifier.create(client.recognizeLinkedEntities(inputs)) .assertNext(response -> validateBatchResult(response, getExpectedBatchLinkedEntities(), TestEndpoint.LINKED_ENTITY)) .verifyComplete(); }); } @Test public void recognizeLinkedEntitiesForListLanguageHint() { recognizeLinkedLanguageHintRunner((inputs, language) -> { StepVerifier.create(client.recognizeLinkedEntitiesWithResponse(inputs, language)) .assertNext(response -> validateBatchResult(response.getValue(), getExpectedBatchLinkedEntities(), TestEndpoint.NAMED_ENTITY)) .verifyComplete(); }); } @Test public void recognizePiiEntitiesForTextInput() { NamedEntity namedEntity1 = new NamedEntity("859-98-0987", "U.S. Social Security Number (SSN)", "", 28, 11, 0.65); RecognizeEntitiesResult recognizeEntitiesResultList = new RecognizeEntitiesResult("0", null, null, Collections.singletonList(namedEntity1)); StepVerifier.create(client.recognizePiiEntities("Microsoft employee with ssn 859-98-0987 is using our awesome API's.")) .assertNext(response -> validateNamedEntities(recognizeEntitiesResultList.getNamedEntities(), response.getNamedEntities())) .verifyComplete(); } @Test public void recognizePiiEntitiesForEmptyText() { TextAnalyticsError expectedError = new TextAnalyticsError(ErrorCodeValue.INVALID_ARGUMENT, "Invalid document in request.", null, null); StepVerifier.create(client.recognizePiiEntities("")) .assertNext(response -> validateErrorDocument(expectedError, response.getError())) .verifyComplete(); } @Override public void recognizePiiEntitiesForFaultyText() { StepVerifier.create(client.recognizePiiEntities("!@ .assertNext(response -> assertEquals(response.getNamedEntities().size(), 0)) .verifyComplete(); } @Test public void recognizePiiEntitiesForBatchInput() { recognizeBatchPiiRunner((inputs) -> { StepVerifier.create(client.recognizeBatchPiiEntities(inputs)) .assertNext(response -> validateBatchResult(response, getExpectedBatchPiiEntities(), TestEndpoint.NAMED_ENTITY)) .verifyComplete(); }); } @Test public void recognizePiiEntitiesForBatchInputShowStatistics() { recognizeBatchPiiEntitiesShowStatsRunner((inputs, options) -> { StepVerifier.create(client.recognizeBatchPiiEntitiesWithResponse(inputs, options)) .assertNext(response -> validateBatchResult(response.getValue(), getExpectedBatchPiiEntities(), TestEndpoint.NAMED_ENTITY)) .verifyComplete(); }); } @Test public void recognizePiiEntitiesForBatchStringInput() { recognizePiiStringInputRunner((inputs) -> { StepVerifier.create(client.recognizePiiEntities(inputs)) .assertNext(response -> validateBatchResult(response, getExpectedBatchPiiEntities(), TestEndpoint.NAMED_ENTITY)) .verifyComplete(); }); } @Test public void recognizePiiEntitiesForListLanguageHint() { recognizePiiLanguageHintRunner((inputs, language) -> { StepVerifier.create(client.recognizePiiEntitiesWithResponse(inputs, language)) .assertNext(response -> validateBatchResult(response.getValue(), getExpectedBatchPiiEntities(), TestEndpoint.NAMED_ENTITY)) .verifyComplete(); }); } @Test public void extractKeyPhrasesForTextInput() { List<String> keyPhrasesList1 = Arrays.asList("monde"); StepVerifier.create(client.extractKeyPhrasesWithResponse("Bonjour tout le monde.", "fr")) .assertNext(response -> validateKeyPhrases(keyPhrasesList1, response.getValue().getKeyPhrases())) .verifyComplete(); } @Test public void extractKeyPhrasesForEmptyText() { TextAnalyticsError expectedError = new TextAnalyticsError(ErrorCodeValue.INVALID_ARGUMENT, "Invalid document in request.", null, null); StepVerifier.create(client.extractKeyPhrases("")) .assertNext(response -> validateErrorDocument(expectedError, response.getError())) .verifyComplete(); } @Test public void extractKeyPhrasesForFaultyText() { StepVerifier.create(client.extractKeyPhrases("!@ .assertNext(response -> assertEquals(response.getKeyPhrases().size(), 0)) .verifyComplete(); } @Test public void extractKeyPhrasesForBatchInput() { extractBatchKeyPhrasesRunner((inputs) -> { StepVerifier.create(client.extractBatchKeyPhrases(inputs)) .assertNext(response -> validateBatchResult(response, getExpectedBatchKeyPhrases(), TestEndpoint.KEY_PHRASES)) .verifyComplete(); }); } @Test public void extractKeyPhrasesForBatchInputShowStatistics() { extractBatchKeyPhrasesShowStatsRunner((inputs, options) -> { StepVerifier.create(client.extractBatchKeyPhrasesWithResponse(inputs, options)) .assertNext(response -> validateBatchResult(response.getValue(), getExpectedBatchKeyPhrases(), TestEndpoint.KEY_PHRASES)) .verifyComplete(); }); } @Test public void extractKeyPhrasesForBatchStringInput() { extractKeyPhrasesStringInputRunner((inputs) -> { StepVerifier.create(client.extractKeyPhrases(inputs)) .assertNext(response -> validateBatchResult(response, getExpectedBatchKeyPhrases(), TestEndpoint.KEY_PHRASES)) .verifyComplete(); }); } @Test public void extractKeyPhrasesForListLanguageHint() { extractKeyPhrasesLanguageHintRunner((inputs, language) -> { StepVerifier.create(client.extractKeyPhrasesWithResponse(inputs, language)) .assertNext(response -> validateBatchResult(response.getValue(), getExpectedBatchKeyPhrases(), TestEndpoint.KEY_PHRASES)) .verifyComplete(); }); } /** * Test analyzing sentiment for a string input. */ @Test public void analyseSentimentForTextInput() { final TextSentiment expectedDocumentSentiment = new TextSentiment(TextSentimentClass.MIXED, 0.1, 0.5, 0.4, 66, 0); final List<TextSentiment> expectedSentenceSentiments = Arrays.asList( new TextSentiment(TextSentimentClass.NEGATIVE, 0.99, 0.005, 0.005, 31, 0), new TextSentiment(TextSentimentClass.POSITIVE, 0.005, 0.005, 0.99, 35, 32)); StepVerifier .create(client.analyzeSentiment("The hotel was dark and unclean. The restaurant had amazing gnocchi.")) .assertNext(response -> { validateAnalysedSentiment(expectedDocumentSentiment, response.getDocumentSentiment()); validateAnalysedSentenceSentiment(expectedSentenceSentiments, response.getSentenceSentiments()); }).verifyComplete(); } /** * Verifies that an error document is returned for a empty text input. */ @Test public void analyseSentimentForEmptyText() { TextAnalyticsError expectedError = new TextAnalyticsError(ErrorCodeValue.INVALID_ARGUMENT, "Invalid document in request.", null, null); StepVerifier.create(client.analyzeSentiment("")) .assertNext(response -> validateErrorDocument(expectedError, response.getError())).verifyComplete(); } public void analyseSentimentForFaultyText() { } /** * Test analyzing sentiment for a list of string input. */ @Test public void analyseSentimentForBatchStringInput() { analyseSentimentStringInputRunner(inputs -> StepVerifier.create(client.analyzeSentiment(inputs)) .assertNext(response -> validateBatchResult(response, getExpectedBatchTextSentiment(), TestEndpoint.SENTIMENT)) .verifyComplete()); } /** * Test analyzing sentiment for a list of string input with language hint. */ @Test public void analyseSentimentForListLanguageHint() { analyseSentimentLanguageHintRunner((inputs, language) -> StepVerifier.create(client.analyzeSentimentWithResponse(inputs, language)) .assertNext(response -> validateBatchResult(response.getValue(), getExpectedBatchTextSentiment(), TestEndpoint.SENTIMENT)) .verifyComplete()); } /** * Test analyzing sentiment for batch input. */ @Test public void analyseSentimentForBatchInput() { analyseBatchSentimentRunner(inputs -> StepVerifier.create(client.analyzeBatchSentiment(inputs)) .assertNext(response -> validateBatchResult(response, getExpectedBatchTextSentiment(), TestEndpoint.SENTIMENT)) .verifyComplete()); } /** * Verify that we can get statistics on the collection result when given a batch input with options. */ @Test public void analyseSentimentForBatchInputShowStatistics() { analyseBatchSentimentShowStatsRunner((inputs, options) -> StepVerifier.create(client.analyzeBatchSentimentWithResponse(inputs, options)) .assertNext(response -> validateBatchResult(response.getValue(), getExpectedBatchTextSentiment(), TestEndpoint.SENTIMENT)) .verifyComplete()); } }
If we write "language", the switch-case no longer passes.
public void detectLanguagesBatchInputShowStatistics() { detectLanguageShowStatisticsRunner((inputs, options) -> { StepVerifier.create(client.detectBatchLanguagesWithResponse(inputs, options)) .assertNext(response -> validateBatchResult(response.getValue(), getExpectedBatchDetectedLanguages(), "Language")) .verifyComplete(); }); }
.assertNext(response -> validateBatchResult(response.getValue(), getExpectedBatchDetectedLanguages(), "Language"))
public void detectLanguagesBatchInputShowStatistics() { detectLanguageShowStatisticsRunner((inputs, options) -> { StepVerifier.create(client.detectBatchLanguagesWithResponse(inputs, options)) .assertNext(response -> validateBatchResult(response.getValue(), getExpectedBatchDetectedLanguages(), TestEndpoint.LANGUAGE)) .verifyComplete(); }); }
class TextAnalyticsAsyncClientTest extends TextAnalyticsClientTestBase { private TextAnalyticsAsyncClient client; @Override protected void beforeTest() { client = clientSetup(httpPipeline -> new TextAnalyticsClientBuilder() .endpoint(getEndPoint()) .pipeline(httpPipeline) .buildAsyncClient()); } /** * Verify that we can get statistics on the collection result when given a batch input with options. */ @Test /** * Test Detect batch input languages. */ @Test public void detectLanguagesBatchInput() { detectLanguageRunner((inputs) -> { StepVerifier.create(client.detectBatchLanguages(inputs)) .assertNext(response -> validateBatchResult(response, getExpectedBatchDetectedLanguages(), "Language")) .verifyComplete(); }); } /** * Test Detect batch languages for List of String input with country Hint. */ @Test public void detectLanguagesBatchListCountryHint() { detectLanguagesCountryHintRunner((inputs, countryHint) -> { StepVerifier.create(client.detectLanguagesWithResponse(inputs, countryHint)) .assertNext(response -> validateBatchResult(response.getValue(), getExpectedBatchDetectedLanguages(), "Language")) .verifyComplete(); }); } /** * Test Detect batch languages for List of String input. */ @Test public void detectLanguagesBatchStringInput() { detectLanguageStringInputRunner((inputs) -> { StepVerifier.create(client.detectLanguages(inputs)) .assertNext(response -> validateBatchResult(response, getExpectedBatchDetectedLanguages(), "Language")) .verifyComplete(); }); } /** * Verifies that a single DetectLanguageResult is returned for a text input to detectLanguages. */ @Test public void detectSingleTextLanguage() { DetectedLanguage primaryLanguage = new DetectedLanguage().setName("English").setIso6391Name("en").setScore(1.0); List<DetectedLanguage> expectedLanguageList = new ArrayList<>(Arrays.asList(primaryLanguage)); StepVerifier.create(client.detectLanguage("This is a test English Text")) .assertNext(response -> validateDetectedLanguages(expectedLanguageList, response.getDetectedLanguages())) .verifyComplete(); } /** * Verifies that an error document is returned for a text input with invalid country hint. * <p> * TODO: update error Model. */ @Test public void detectLanguageInvalidCountryHint() { Error expectedError = new Error().setCode("InvalidArgument").setMessage("Invalid Country Hint."); StepVerifier.create(client.detectLanguageWithResponse("Este es un document escrito en Español.", "en")) .assertNext(response -> validateErrorDocument(expectedError, response.getValue().getError())) .verifyComplete(); } /** * Verifies that an error document is returned for a empty text input. */ @Test public void detectLanguageEmptyText() { Error expectedError = new Error().setCode("InvalidArgument").setMessage("Invalid document in request."); StepVerifier.create(client.detectLanguage("")) .assertNext(response -> validateErrorDocument(expectedError, response.getError())) .verifyComplete(); } /** * Verifies that detectLanguage returns an "UNKNOWN" result when faulty text is passed. */ @Test public void detectLanguageFaultyText() { StepVerifier.create(client.detectLanguage("!@ .assertNext(response -> assertEquals(response.getPrimaryLanguage().getIso6391Name(), "(Unknown)")) .verifyComplete(); } /** * Verifies that a Bad request exception is returned for input documents with same ids. */ @Test public void detectLanguageDuplicateIdInput() { detectLanguageDuplicateIdRunner((inputs, options) -> { StepVerifier.create(client.detectBatchLanguagesWithResponse(inputs, options, Context.NONE)) .verifyErrorSatisfies(ex -> assertRestException(ex, HttpResponseException.class, 400)); }); } @Test public void recognizeEntitiesForSimpleInput() { DetectedLanguage primaryLanguage = new DetectedLanguage().setName("English").setIso6391Name("en").setScore(1.0); } @Test public void recognizeEntitiesForEmptyText() { } @Test public void recognizeEntitiesForFaultyText() { } @Test public void recognizeEntitiesForBatchInput() { } @Test public void recognizeEntitiesForBatchInputShowStatistics() { } @Test public void recognizeEntitiesForBatchStringInput() { } @Test public void recognizeEntitiesForBatchListCountryHint() { } }
class TextAnalyticsAsyncClientTest extends TextAnalyticsClientTestBase { private TextAnalyticsAsyncClient client; @Override protected void beforeTest() { client = clientSetup(httpPipeline -> new TextAnalyticsClientBuilder() .endpoint(getEndpoint()) .pipeline(httpPipeline) .buildAsyncClient()); } /** * Verify that we can get statistics on the collection result when given a batch input with options. */ @Test /** * Test Detect batch input languages. */ @Test public void detectLanguagesBatchInput() { detectLanguageRunner((inputs) -> { StepVerifier.create(client.detectBatchLanguages(inputs)) .assertNext(response -> validateBatchResult(response, getExpectedBatchDetectedLanguages(), TestEndpoint.LANGUAGE)) .verifyComplete(); }); } /** * Test Detect batch languages for List of String input with country Hint. */ @Test public void detectLanguagesBatchListCountryHint() { detectLanguagesCountryHintRunner((inputs, countryHint) -> { StepVerifier.create(client.detectLanguagesWithResponse(inputs, countryHint)) .assertNext(response -> validateBatchResult(response.getValue(), getExpectedBatchDetectedLanguages(), TestEndpoint.LANGUAGE)) .verifyComplete(); }); } /** * Test Detect batch languages for List of String input. */ @Test public void detectLanguagesBatchStringInput() { detectLanguageStringInputRunner((inputs) -> { StepVerifier.create(client.detectLanguages(inputs)) .assertNext(response -> validateBatchResult(response, getExpectedBatchDetectedLanguages(), TestEndpoint.LANGUAGE)) .verifyComplete(); }); } /** * Verifies that a single DetectLanguageResult is returned for a text input to detectLanguages. */ @Test public void detectSingleTextLanguage() { DetectedLanguage primaryLanguage = new DetectedLanguage("English", "en", 1.0); List<DetectedLanguage> expectedLanguageList = Arrays.asList(primaryLanguage); StepVerifier.create(client.detectLanguage("This is a test English Text")) .assertNext(response -> validateDetectedLanguages(expectedLanguageList, response.getDetectedLanguages())) .verifyComplete(); } /** * Verifies that an error document is returned for a text input with invalid country hint. * <p> * TODO: update error Model. */ @Test public void detectLanguageInvalidCountryHint() { TextAnalyticsError expectedError = new TextAnalyticsError(ErrorCodeValue.INVALID_ARGUMENT, "Invalid Country Hint.", null, null); StepVerifier.create(client.detectLanguageWithResponse("Este es un document escrito en Español.", "en")) .assertNext(response -> validateErrorDocument(expectedError, response.getValue().getError())) .verifyComplete(); } /** * Verifies that an error document is returned for a empty text input. */ @Test public void detectLanguageEmptyText() { TextAnalyticsError expectedError = new TextAnalyticsError(ErrorCodeValue.INVALID_ARGUMENT, "Invalid document in request.", null, null); StepVerifier.create(client.detectLanguage("")) .assertNext(response -> validateErrorDocument(expectedError, response.getError())) .verifyComplete(); } /** * Verifies that detectLanguage returns an "UNKNOWN" result when faulty text is passed. */ @Test public void detectLanguageFaultyText() { StepVerifier.create(client.detectLanguage("!@ .assertNext(response -> assertEquals(response.getPrimaryLanguage().getIso6391Name(), "(Unknown)")) .verifyComplete(); } /** * Verifies that a Bad request exception is returned for input documents with same ids. */ @Test public void detectLanguageDuplicateIdInput() { detectLanguageDuplicateIdRunner((inputs, options) -> { StepVerifier.create(client.detectBatchLanguagesWithResponse(inputs, options, Context.NONE)) .verifyErrorSatisfies(ex -> assertRestException(ex, HttpResponseException.class, 400)); }); } @Test public void recognizeEntitiesForTextInput() { NamedEntity namedEntity1 = new NamedEntity("Seattle", "Location", null, 26, 7, 0.80624294281005859); NamedEntity namedEntity2 = new NamedEntity("last week", "DateTime", "DateRange", 34, 9, 0.8); RecognizeEntitiesResult recognizeEntitiesResultList = new RecognizeEntitiesResult("0", null, null, Arrays.asList(namedEntity1, namedEntity2)); StepVerifier.create(client.recognizeEntities("I had a wonderful trip to Seattle last week.")) .assertNext(response -> validateNamedEntities(recognizeEntitiesResultList.getNamedEntities(), response.getNamedEntities())) .verifyComplete(); } @Test public void recognizeEntitiesForEmptyText() { TextAnalyticsError expectedError = new TextAnalyticsError(ErrorCodeValue.INVALID_ARGUMENT, "Invalid document in request.", null, null); StepVerifier.create(client.recognizeEntities("")) .assertNext(response -> validateErrorDocument(expectedError, response.getError())) .verifyComplete(); } @Override public void recognizeEntitiesForFaultyText() { StepVerifier.create(client.recognizeEntities("!@ .assertNext(response -> assertEquals(response.getNamedEntities().size(), 0)) .verifyComplete(); } @Test public void recognizeEntitiesForBatchInput() { recognizeBatchNamedEntityRunner((inputs) -> { StepVerifier.create(client.recognizeBatchEntities(inputs)) .assertNext(response -> validateBatchResult(response, getExpectedBatchNamedEntities(), TestEndpoint.NAMED_ENTITY)) .verifyComplete(); }); } @Test public void recognizeEntitiesForBatchInputShowStatistics() { recognizeBatchNamedEntitiesShowStatsRunner((inputs, options) -> { StepVerifier.create(client.recognizeBatchEntitiesWithResponse(inputs, options)) .assertNext(response -> validateBatchResult(response.getValue(), getExpectedBatchNamedEntities(), TestEndpoint.NAMED_ENTITY)) .verifyComplete(); }); } @Test public void recognizeEntitiesForBatchStringInput() { recognizeNamedEntityStringInputRunner((inputs) -> { StepVerifier.create(client.recognizeEntities(inputs)) .assertNext(response -> validateBatchResult(response, getExpectedBatchNamedEntities(), TestEndpoint.NAMED_ENTITY)) .verifyComplete(); }); } @Test public void recognizeEntitiesForListLanguageHint() { recognizeNamedEntitiesLanguageHintRunner((inputs, language) -> { StepVerifier.create(client.recognizeEntitiesWithResponse(inputs, language)) .assertNext(response -> validateBatchResult(response.getValue(), getExpectedBatchNamedEntities(), TestEndpoint.NAMED_ENTITY)) .verifyComplete(); }); } @Test public void recognizeLinkedEntitiesForTextInput() { LinkedEntityMatch linkedEntityMatch1 = new LinkedEntityMatch("Seattle", 0.11472424095537814, 7, 26); LinkedEntity linkedEntity1 = new LinkedEntity("Seattle", Collections.singletonList(linkedEntityMatch1), "en", "Seattle", "https: RecognizeLinkedEntitiesResult recognizeLinkedEntitiesResultList = new RecognizeLinkedEntitiesResult("0", null, null, Collections.singletonList(linkedEntity1)); StepVerifier.create(client.recognizeLinkedEntities("I had a wonderful trip to Seattle last week.")) .assertNext(response -> validateLinkedEntities(recognizeLinkedEntitiesResultList.getLinkedEntities(), response.getLinkedEntities())) .verifyComplete(); } @Test public void recognizeLinkedEntitiesForEmptyText() { TextAnalyticsError expectedError = new TextAnalyticsError(ErrorCodeValue.INVALID_ARGUMENT, "Invalid document in request.", null, null); StepVerifier.create(client.recognizeLinkedEntities("")) .assertNext(response -> validateErrorDocument(expectedError, response.getError())) .verifyComplete(); } @Test public void recognizeLinkedEntitiesForFaultyText() { StepVerifier.create(client.recognizeLinkedEntities("!@ .assertNext(response -> assertEquals(response.getLinkedEntities().size(), 0)) .verifyComplete(); } @Test public void recognizeLinkedEntitiesForBatchInput() { recognizeBatchLinkedEntityRunner((inputs) -> { StepVerifier.create(client.recognizeBatchLinkedEntities(inputs)) .assertNext(response -> validateBatchResult(response, getExpectedBatchLinkedEntities(), TestEndpoint.LINKED_ENTITY)) .verifyComplete(); }); } @Test public void recognizeLinkedEntitiesForBatchInputShowStatistics() { recognizeBatchLinkedEntitiesShowStatsRunner((inputs, options) -> { StepVerifier.create(client.recognizeBatchLinkedEntitiesWithResponse(inputs, options)) .assertNext(response -> validateBatchResult(response.getValue(), getExpectedBatchLinkedEntities(), TestEndpoint.LINKED_ENTITY)) .verifyComplete(); }); } @Test public void recognizeLinkedEntitiesForBatchStringInput() { recognizeLinkedStringInputRunner((inputs) -> { StepVerifier.create(client.recognizeLinkedEntities(inputs)) .assertNext(response -> validateBatchResult(response, getExpectedBatchLinkedEntities(), TestEndpoint.LINKED_ENTITY)) .verifyComplete(); }); } @Test public void recognizeLinkedEntitiesForListLanguageHint() { recognizeLinkedLanguageHintRunner((inputs, language) -> { StepVerifier.create(client.recognizeLinkedEntitiesWithResponse(inputs, language)) .assertNext(response -> validateBatchResult(response.getValue(), getExpectedBatchLinkedEntities(), TestEndpoint.NAMED_ENTITY)) .verifyComplete(); }); } @Test public void recognizePiiEntitiesForTextInput() { NamedEntity namedEntity1 = new NamedEntity("859-98-0987", "U.S. Social Security Number (SSN)", "", 28, 11, 0.65); RecognizeEntitiesResult recognizeEntitiesResultList = new RecognizeEntitiesResult("0", null, null, Collections.singletonList(namedEntity1)); StepVerifier.create(client.recognizePiiEntities("Microsoft employee with ssn 859-98-0987 is using our awesome API's.")) .assertNext(response -> validateNamedEntities(recognizeEntitiesResultList.getNamedEntities(), response.getNamedEntities())) .verifyComplete(); } @Test public void recognizePiiEntitiesForEmptyText() { TextAnalyticsError expectedError = new TextAnalyticsError(ErrorCodeValue.INVALID_ARGUMENT, "Invalid document in request.", null, null); StepVerifier.create(client.recognizePiiEntities("")) .assertNext(response -> validateErrorDocument(expectedError, response.getError())) .verifyComplete(); } @Override public void recognizePiiEntitiesForFaultyText() { StepVerifier.create(client.recognizePiiEntities("!@ .assertNext(response -> assertEquals(response.getNamedEntities().size(), 0)) .verifyComplete(); } @Test public void recognizePiiEntitiesForBatchInput() { recognizeBatchPiiRunner((inputs) -> { StepVerifier.create(client.recognizeBatchPiiEntities(inputs)) .assertNext(response -> validateBatchResult(response, getExpectedBatchPiiEntities(), TestEndpoint.NAMED_ENTITY)) .verifyComplete(); }); } @Test public void recognizePiiEntitiesForBatchInputShowStatistics() { recognizeBatchPiiEntitiesShowStatsRunner((inputs, options) -> { StepVerifier.create(client.recognizeBatchPiiEntitiesWithResponse(inputs, options)) .assertNext(response -> validateBatchResult(response.getValue(), getExpectedBatchPiiEntities(), TestEndpoint.NAMED_ENTITY)) .verifyComplete(); }); } @Test public void recognizePiiEntitiesForBatchStringInput() { recognizePiiStringInputRunner((inputs) -> { StepVerifier.create(client.recognizePiiEntities(inputs)) .assertNext(response -> validateBatchResult(response, getExpectedBatchPiiEntities(), TestEndpoint.NAMED_ENTITY)) .verifyComplete(); }); } @Test public void recognizePiiEntitiesForListLanguageHint() { recognizePiiLanguageHintRunner((inputs, language) -> { StepVerifier.create(client.recognizePiiEntitiesWithResponse(inputs, language)) .assertNext(response -> validateBatchResult(response.getValue(), getExpectedBatchPiiEntities(), TestEndpoint.NAMED_ENTITY)) .verifyComplete(); }); } @Test public void extractKeyPhrasesForTextInput() { List<String> keyPhrasesList1 = Arrays.asList("monde"); StepVerifier.create(client.extractKeyPhrasesWithResponse("Bonjour tout le monde.", "fr")) .assertNext(response -> validateKeyPhrases(keyPhrasesList1, response.getValue().getKeyPhrases())) .verifyComplete(); } @Test public void extractKeyPhrasesForEmptyText() { TextAnalyticsError expectedError = new TextAnalyticsError(ErrorCodeValue.INVALID_ARGUMENT, "Invalid document in request.", null, null); StepVerifier.create(client.extractKeyPhrases("")) .assertNext(response -> validateErrorDocument(expectedError, response.getError())) .verifyComplete(); } @Test public void extractKeyPhrasesForFaultyText() { StepVerifier.create(client.extractKeyPhrases("!@ .assertNext(response -> assertEquals(response.getKeyPhrases().size(), 0)) .verifyComplete(); } @Test public void extractKeyPhrasesForBatchInput() { extractBatchKeyPhrasesRunner((inputs) -> { StepVerifier.create(client.extractBatchKeyPhrases(inputs)) .assertNext(response -> validateBatchResult(response, getExpectedBatchKeyPhrases(), TestEndpoint.KEY_PHRASES)) .verifyComplete(); }); } @Test public void extractKeyPhrasesForBatchInputShowStatistics() { extractBatchKeyPhrasesShowStatsRunner((inputs, options) -> { StepVerifier.create(client.extractBatchKeyPhrasesWithResponse(inputs, options)) .assertNext(response -> validateBatchResult(response.getValue(), getExpectedBatchKeyPhrases(), TestEndpoint.KEY_PHRASES)) .verifyComplete(); }); } @Test public void extractKeyPhrasesForBatchStringInput() { extractKeyPhrasesStringInputRunner((inputs) -> { StepVerifier.create(client.extractKeyPhrases(inputs)) .assertNext(response -> validateBatchResult(response, getExpectedBatchKeyPhrases(), TestEndpoint.KEY_PHRASES)) .verifyComplete(); }); } @Test public void extractKeyPhrasesForListLanguageHint() { extractKeyPhrasesLanguageHintRunner((inputs, language) -> { StepVerifier.create(client.extractKeyPhrasesWithResponse(inputs, language)) .assertNext(response -> validateBatchResult(response.getValue(), getExpectedBatchKeyPhrases(), TestEndpoint.KEY_PHRASES)) .verifyComplete(); }); } /** * Test analyzing sentiment for a string input. */ @Test public void analyseSentimentForTextInput() { final TextSentiment expectedDocumentSentiment = new TextSentiment(TextSentimentClass.MIXED, 0.1, 0.5, 0.4, 66, 0); final List<TextSentiment> expectedSentenceSentiments = Arrays.asList( new TextSentiment(TextSentimentClass.NEGATIVE, 0.99, 0.005, 0.005, 31, 0), new TextSentiment(TextSentimentClass.POSITIVE, 0.005, 0.005, 0.99, 35, 32)); StepVerifier .create(client.analyzeSentiment("The hotel was dark and unclean. The restaurant had amazing gnocchi.")) .assertNext(response -> { validateAnalysedSentiment(expectedDocumentSentiment, response.getDocumentSentiment()); validateAnalysedSentenceSentiment(expectedSentenceSentiments, response.getSentenceSentiments()); }).verifyComplete(); } /** * Verifies that an error document is returned for a empty text input. */ @Test public void analyseSentimentForEmptyText() { TextAnalyticsError expectedError = new TextAnalyticsError(ErrorCodeValue.INVALID_ARGUMENT, "Invalid document in request.", null, null); StepVerifier.create(client.analyzeSentiment("")) .assertNext(response -> validateErrorDocument(expectedError, response.getError())).verifyComplete(); } public void analyseSentimentForFaultyText() { } /** * Test analyzing sentiment for a list of string input. */ @Test public void analyseSentimentForBatchStringInput() { analyseSentimentStringInputRunner(inputs -> StepVerifier.create(client.analyzeSentiment(inputs)) .assertNext(response -> validateBatchResult(response, getExpectedBatchTextSentiment(), TestEndpoint.SENTIMENT)) .verifyComplete()); } /** * Test analyzing sentiment for a list of string input with language hint. */ @Test public void analyseSentimentForListLanguageHint() { analyseSentimentLanguageHintRunner((inputs, language) -> StepVerifier.create(client.analyzeSentimentWithResponse(inputs, language)) .assertNext(response -> validateBatchResult(response.getValue(), getExpectedBatchTextSentiment(), TestEndpoint.SENTIMENT)) .verifyComplete()); } /** * Test analyzing sentiment for batch input. */ @Test public void analyseSentimentForBatchInput() { analyseBatchSentimentRunner(inputs -> StepVerifier.create(client.analyzeBatchSentiment(inputs)) .assertNext(response -> validateBatchResult(response, getExpectedBatchTextSentiment(), TestEndpoint.SENTIMENT)) .verifyComplete()); } /** * Verify that we can get statistics on the collection result when given a batch input with options. */ @Test public void analyseSentimentForBatchInputShowStatistics() { analyseBatchSentimentShowStatsRunner((inputs, options) -> StepVerifier.create(client.analyzeBatchSentimentWithResponse(inputs, options)) .assertNext(response -> validateBatchResult(response.getValue(), getExpectedBatchTextSentiment(), TestEndpoint.SENTIMENT)) .verifyComplete()); } }
Why is this wrapped in an ArrayList? same in the one below.
static void detectLanguagesCountryHintRunner(BiConsumer<List<String>, String> testRunner) { final List<String> inputs = new ArrayList<>(Arrays.asList( "This is written in English", "Este es un document escrito en Español.", "~@!~:)")); testRunner.accept(inputs, "US"); }
final List<String> inputs = new ArrayList<>(Arrays.asList(
static void detectLanguagesCountryHintRunner(BiConsumer<List<String>, String> testRunner) { final List<String> inputs = Arrays.asList( "This is written in English", "Este es un document escrito en Español.", "~@!~:)"); testRunner.accept(inputs, "en"); }
class TextAnalyticsClientTestBase extends TestBase { private static final String TEXT_ANALYTICS_PROPERTIES = "azure-textanalytics.properties"; private static final String NAME = "name"; private static final String VERSION = "version"; private static final String DEFAULT_SCOPE = "https: final Map<String, String> properties = CoreUtils.getProperties(TEXT_ANALYTICS_PROPERTIES); private final String clientName = properties.getOrDefault(NAME, "UnknownName"); private final String clientVersion = properties.getOrDefault(VERSION, "UnknownVersion"); private boolean showStatistics = false; private HttpLogOptions httpLogOptions = new HttpLogOptions(); <T> T clientSetup(Function<HttpPipeline, T> clientBuilder) { TokenCredential credential = null; if (!interceptorManager.isPlaybackMode()) { credential = new DefaultAzureCredentialBuilder().build(); } HttpClient httpClient; Configuration buildConfiguration = Configuration.getGlobalConfiguration().clone(); final List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new UserAgentPolicy(httpLogOptions.getApplicationId(), clientName, clientVersion, buildConfiguration)); policies.add(new RequestIdPolicy()); policies.add(new AddDatePolicy()); HttpPolicyProviders.addBeforeRetryPolicies(policies); if (credential != null) { policies.add(new BearerTokenAuthenticationPolicy(credential, DEFAULT_SCOPE)); } policies.add(new RetryPolicy()); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))); if (interceptorManager.isPlaybackMode()) { httpClient = interceptorManager.getPlaybackClient(); } else { httpClient = new NettyAsyncHttpClientBuilder().wiretap(true).build(); } policies.add(interceptorManager.getRecordPolicy()); HttpPipeline pipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); T client; client = clientBuilder.apply(pipeline); return Objects.requireNonNull(client); } @Test public abstract void detectSingleTextLanguage(); @Test public abstract void detectLanguageEmptyText(); @Test public abstract void detectLanguageFaultyText(); @Test public abstract void detectLanguagesBatchInput(); @Test public abstract void detectLanguagesBatchInputShowStatistics(); @Test public abstract void detectLanguagesBatchStringInput(); @Test public abstract void detectLanguagesBatchListCountryHint(); void detectLanguageShowStatisticsRunner(BiConsumer<List<DetectLanguageInput>, TextAnalyticsRequestOptions> testRunner) { final List<DetectLanguageInput> detectLanguageInputs = Arrays.asList( new DetectLanguageInput("0", "This is written in English", "US"), new DetectLanguageInput("1", "Este es un document escrito en Español."), new DetectLanguageInput("2", "~@!~:)", "US") ); testRunner.accept(detectLanguageInputs, setTextAnalyticsRequestOptions()); } void detectLanguageDuplicateIdRunner(BiConsumer<List<DetectLanguageInput>, TextAnalyticsRequestOptions> testRunner) { final List<DetectLanguageInput> detectLanguageInputs = Arrays.asList( new DetectLanguageInput("0", "This is written in English", "US"), new DetectLanguageInput("0", "Este es un document escrito en Español.") ); testRunner.accept(detectLanguageInputs, setTextAnalyticsRequestOptions()); } static void detectLanguageStringInputRunner(Consumer<List<String>> testRunner) { final List<String> inputs = new ArrayList<>(Arrays.asList( "This is written in English", "Este es un document escrito en Español.", "~@!~:)")); testRunner.accept(inputs); } static void detectLanguageRunner(Consumer<List<DetectLanguageInput>> testRunner) { final List<DetectLanguageInput> detectLanguageInputs = Arrays.asList( new DetectLanguageInput("0", "This is written in English", "US"), new DetectLanguageInput("1", "Este es un document escrito en Español."), new DetectLanguageInput("2", "~@!~:)", "US") ); testRunner.accept(detectLanguageInputs); } static DocumentResultCollection<DetectLanguageResult> getExpectedBatchDetectedLanguages() { DetectedLanguage detectedLanguage1 = new DetectedLanguage().setName("English").setIso6391Name("en") .setScore(1.0); DetectedLanguage detectedLanguage2 = new DetectedLanguage().setName("Spanish").setIso6391Name("es") .setScore(1.0); DetectedLanguage detectedLanguage3 = new DetectedLanguage().setName("(Unknown)").setIso6391Name("(Unknown)") .setScore(0.0); List<DetectedLanguage> detectedLanguageList1 = new ArrayList<>(Collections.singletonList(detectedLanguage1)); List<DetectedLanguage> detectedLanguageList2 = new ArrayList<>(Collections.singletonList(detectedLanguage2)); List<DetectedLanguage> detectedLanguageList3 = new ArrayList<>(Collections.singletonList(detectedLanguage3)); TextDocumentStatistics textDocumentStatistics1 = new TextDocumentStatistics().setCharacterCount(26).setTransactionCount(1); TextDocumentStatistics textDocumentStatistics2 = new TextDocumentStatistics().setCharacterCount(39).setTransactionCount(1); TextDocumentStatistics textDocumentStatistics3 = new TextDocumentStatistics().setCharacterCount(6).setTransactionCount(1); DetectLanguageResult detectLanguageResult1 = new DetectLanguageResult("0", textDocumentStatistics1, detectedLanguage1, detectedLanguageList1); DetectLanguageResult detectLanguageResult2 = new DetectLanguageResult("1", textDocumentStatistics2, detectedLanguage2, detectedLanguageList2); DetectLanguageResult detectLanguageResult3 = new DetectLanguageResult("2", textDocumentStatistics3, detectedLanguage3, detectedLanguageList3); TextBatchStatistics textBatchStatistics = new TextBatchStatistics().setDocumentCount(3).setErroneousDocumentCount(0).setTransactionCount(3).setValidDocumentCount(3); List<DetectLanguageResult> detectLanguageResultList = new ArrayList<>(Arrays.asList(detectLanguageResult1, detectLanguageResult2, detectLanguageResult3)); return new DocumentResultCollection<>(detectLanguageResultList, "2019-10-01", textBatchStatistics); } @Test public abstract void recognizeEntitiesForSimpleInput(); @Test public abstract void recognizeEntitiesForEmptyText(); @Test public abstract void recognizeEntitiesForFaultyText(); @Test public abstract void recognizeEntitiesForBatchInput(); @Test public abstract void recognizeEntitiesForBatchInputShowStatistics(); void recognizeEntitiesShowStatisticsRunner(BiConsumer<List<TextDocumentInput>, TextAnalyticsRequestOptions> testRunner) { final List<TextDocumentInput> detectLanguageInputs = Arrays.asList( new TextDocumentInput("1", "Satya Nadella is the CEO of Microsoft", "en"), new TextDocumentInput("2", "Elon Musk is the CEO of SpaceX and Tesla.", "en"), new TextDocumentInput("2", "~@!~:)", "en") ); testRunner.accept(detectLanguageInputs, setTextAnalyticsRequestOptions()); } @Test public abstract void recognizeEntitiesForBatchStringInput(); @Test public abstract void recognizeEntitiesForBatchListCountryHint(); static DocumentResultCollection<NamedEntityResult> getExpectedBatchNamedEntityResult() { NamedEntity namedEntity1 = new NamedEntity() .setType("English").setText("Satya Nadella is the CEO of Microsoft").setSubtype("").setLength(1).setOffset(1).setScore(1.0); NamedEntity namedEntity2 = new NamedEntity() .setType("English").setText("").setSubtype("Elon Musk is the CEO of SpaceX and Tesla.").setLength(1).setOffset(1).setScore(1.0); NamedEntity namedEntity3 = new NamedEntity() .setType("English").setText("").setSubtype("").setLength(1).setOffset(1).setScore(1.0); List<NamedEntity> namedEntityList1 = new ArrayList<>(Collections.singletonList(namedEntity1)); List<NamedEntity> namedEntityList2 = new ArrayList<>(Collections.singletonList(namedEntity2)); List<NamedEntity> namedEntityList3 = new ArrayList<>(Collections.singletonList(namedEntity3)); TextDocumentStatistics textDocumentStatistics1 = new TextDocumentStatistics().setCharacterCount(26).setTransactionCount(1); TextDocumentStatistics textDocumentStatistics2 = new TextDocumentStatistics().setCharacterCount(39).setTransactionCount(1); TextDocumentStatistics textDocumentStatistics3 = new TextDocumentStatistics().setCharacterCount(6).setTransactionCount(1); NamedEntityResult namedEntityResult1 = new NamedEntityResult("0", textDocumentStatistics1, namedEntityList1); NamedEntityResult namedEntityResult2 = new NamedEntityResult("1", textDocumentStatistics2, namedEntityList2); NamedEntityResult namedEntityResult3 = new NamedEntityResult("2", textDocumentStatistics3, namedEntityList3); TextBatchStatistics textBatchStatistics = new TextBatchStatistics().setDocumentCount(3) .setErroneousDocumentCount(0).setTransactionCount(3).setValidDocumentCount(3); List<NamedEntityResult> detectLanguageResultList = new ArrayList<>( Arrays.asList(namedEntityResult1, namedEntityResult2, namedEntityResult3)); return new DocumentResultCollection<>(detectLanguageResultList, "2019-10-01", textBatchStatistics); } private TextAnalyticsRequestOptions setTextAnalyticsRequestOptions() { this.showStatistics = true; return new TextAnalyticsRequestOptions().setShowStatistics(true); } String getEndPoint() { return interceptorManager.isPlaybackMode() ? "http: : Configuration.getGlobalConfiguration().get("AZURE_TEXT_ANALYTICS_ENDPOINT"); } /** * Helper method to verify batch result. * * @param actualResult DocumentResultCollection<> returned by the API. * @param testApi the API to test. */ <T> void validateBatchResult(DocumentResultCollection<T> actualResult, DocumentResultCollection<T> expectedResult, String testApi) { assertEquals(expectedResult.getModelVersion(), actualResult.getModelVersion()); if (this.showStatistics) { validateBatchStatistics(expectedResult.getStatistics(), actualResult.getStatistics()); } validateDocuments(expectedResult, actualResult, testApi); } /** * Helper method to verify documents returned in a batch request. * * @param expectedResult the expected result collection.. * @param actualResult the actual result collection returned by the API. * @param testApi the API to test. */ private <T> void validateDocuments(DocumentResultCollection<T> expectedResult, DocumentResultCollection<T> actualResult, String testApi) { switch (testApi) { case "Language": final List<DetectLanguageResult> expectedResultList = expectedResult.stream() .filter(element -> element instanceof DetectLanguageResult) .map(element -> (DetectLanguageResult) element) .collect(Collectors.toList()); final List<DetectLanguageResult> actualResultList = actualResult.stream() .filter(element -> element instanceof DetectLanguageResult) .map(element -> (DetectLanguageResult) element) .collect(Collectors.toList()); assertEquals(expectedResultList.size(), actualResultList.size()); actualResultList.forEach(actualItem -> { Optional<DetectLanguageResult> optionalExpectedItem = expectedResultList.stream().filter( expectedEachItem -> actualItem.getId().equals(expectedEachItem.getId())).findFirst(); assertTrue(optionalExpectedItem.isPresent()); DetectLanguageResult expectedItem = optionalExpectedItem.get(); if (actualItem.getError() == null && this.showStatistics) { validatePrimaryLanguage(expectedItem.getPrimaryLanguage(), actualItem.getPrimaryLanguage()); validateDocumentStatistics(expectedItem.getStatistics(), actualItem.getStatistics()); validateDetectedLanguages(expectedItem.getDetectedLanguages(), actualItem.getDetectedLanguages()); } }); break; default: break; } } /** * Helper method to verify TextBatchStatistics. * * @param expectedStatistics * @param actualStatistics */ private static void validateBatchStatistics(TextBatchStatistics expectedStatistics, TextBatchStatistics actualStatistics) { assertEquals(expectedStatistics.getDocumentCount(), actualStatistics.getDocumentCount()); assertEquals(expectedStatistics.getErroneousDocumentCount(), actualStatistics.getErroneousDocumentCount()); assertEquals(expectedStatistics.getValidDocumentCount(), actualStatistics.getValidDocumentCount()); assertEquals(expectedStatistics.getTransactionCount(), actualStatistics.getTransactionCount()); } /** * Helper method to verify the error document. * * @param expectedError the Error returned from the service. * @param actualError the Error returned from the API. */ static void validateErrorDocument(Error expectedError, Error actualError) { assertEquals(expectedError.getCode(), actualError.getCode()); assertEquals(expectedError.getMessage(), actualError.getMessage()); assertEquals(expectedError.getTarget(), actualError.getTarget()); assertEquals(expectedError.getInnererror(), actualError.getInnererror()); } /** * Helper method to verify TextDocumentStatistics. * * @param expected the expected value for TextDocumentStatistics. * @param actual the value returned by API. */ private static void validateDocumentStatistics(TextDocumentStatistics expected, TextDocumentStatistics actual) { assertEquals(expected.getCharacterCount(), actual.getCharacterCount()); assertEquals(expected.getTransactionCount(), actual.getTransactionCount()); } /** * Helper method to validate a single detected language. * * @param expectedLanguage detectedLanguage returned by the service. * @param actualLanguage detectedLanguage returned by the API. */ static void validatePrimaryLanguage(DetectedLanguage expectedLanguage, DetectedLanguage actualLanguage) { assertEquals(expectedLanguage.getIso6391Name(), actualLanguage.getIso6391Name()); assertEquals(expectedLanguage.getName(), actualLanguage.getName()); assertEquals(expectedLanguage.getScore(), actualLanguage.getScore()); } /** * Helper method to validate the list of detected languages. * * @param expectedLanguageList detectedLanguages returned by the service. * @param actualLanguageList detectedLanguages returned by the API. */ static void validateDetectedLanguages(List<DetectedLanguage> expectedLanguageList, List<DetectedLanguage> actualLanguageList) { for (int i = 0; i < expectedLanguageList.size(); i++) { DetectedLanguage expectedDetectedLanguage = expectedLanguageList.get(i); DetectedLanguage actualDetectedLanguage = actualLanguageList.get(i); validatePrimaryLanguage(expectedDetectedLanguage, actualDetectedLanguage); } } /** * Helper method to verify that a command throws an IllegalArgumentException. * * @param exceptionThrower Command that should throw the exception */ static <T> void assertRunnableThrowsException(Runnable exceptionThrower, Class<T> exception) { try { exceptionThrower.run(); fail(); } catch (Exception ex) { assertEquals(exception, ex.getClass()); } } static void assertRestException(Throwable exception, Class<? extends HttpResponseException> expectedExceptionType, int expectedStatusCode) { assertEquals(expectedExceptionType, exception.getClass()); assertEquals(expectedStatusCode, ((HttpResponseException) exception).getResponse().getStatusCode()); } static void assertRestException(Runnable exceptionThrower, Class<? extends HttpResponseException> expectedExceptionType, int expectedStatusCode) { try { exceptionThrower.run(); fail(); } catch (Throwable ex) { assertRestException(ex, expectedExceptionType, expectedStatusCode); } } }
class TextAnalyticsClientTestBase extends TestBase { private static final String TEXT_ANALYTICS_PROPERTIES = "azure-ai-textanalytics.properties"; private static final String NAME = "name"; private static final String VERSION = "version"; private static final String DEFAULT_SCOPE = "https: private final Map<String, String> properties = CoreUtils.getProperties(TEXT_ANALYTICS_PROPERTIES); private final String clientName = properties.getOrDefault(NAME, "UnknownName"); private final String clientVersion = properties.getOrDefault(VERSION, "UnknownVersion"); private boolean showStatistics = false; private HttpLogOptions httpLogOptions = new HttpLogOptions(); private static final String MODEL_VERSION = "2019-10-01"; enum TestEndpoint { LANGUAGE, NAMED_ENTITY, LINKED_ENTITY, KEY_PHRASES, SENTIMENT } <T> T clientSetup(Function<HttpPipeline, T> clientBuilder) { TokenCredential credential = null; if (!interceptorManager.isPlaybackMode()) { credential = new DefaultAzureCredentialBuilder().build(); } HttpClient httpClient; Configuration buildConfiguration = Configuration.getGlobalConfiguration().clone(); final List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new UserAgentPolicy(httpLogOptions.getApplicationId(), clientName, clientVersion, buildConfiguration)); policies.add(new RequestIdPolicy()); policies.add(new AddDatePolicy()); HttpPolicyProviders.addBeforeRetryPolicies(policies); if (credential != null) { policies.add(new BearerTokenAuthenticationPolicy(credential, DEFAULT_SCOPE)); } policies.add(new RetryPolicy()); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))); if (interceptorManager.isPlaybackMode()) { httpClient = interceptorManager.getPlaybackClient(); } else { httpClient = new NettyAsyncHttpClientBuilder().wiretap(true).build(); } policies.add(interceptorManager.getRecordPolicy()); HttpPipeline pipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); T client; client = clientBuilder.apply(pipeline); return Objects.requireNonNull(client); } @Test abstract void detectSingleTextLanguage(); @Test abstract void detectLanguageEmptyText(); @Test abstract void detectLanguageFaultyText(); @Test abstract void detectLanguagesBatchInput(); @Test abstract void detectLanguagesBatchInputShowStatistics(); @Test abstract void detectLanguagesBatchStringInput(); @Test abstract void detectLanguagesBatchListCountryHint(); @Test abstract void recognizeEntitiesForTextInput(); @Test abstract void recognizeEntitiesForEmptyText(); @Test abstract void recognizeEntitiesForFaultyText(); @Test abstract void recognizeEntitiesForBatchInput(); @Test abstract void recognizeEntitiesForBatchInputShowStatistics(); @Test abstract void recognizeEntitiesForBatchStringInput(); @Test abstract void recognizeEntitiesForListLanguageHint(); @Test abstract void recognizePiiEntitiesForTextInput(); @Test abstract void recognizePiiEntitiesForEmptyText(); @Test abstract void recognizePiiEntitiesForFaultyText(); @Test abstract void recognizePiiEntitiesForBatchInput(); @Test abstract void recognizePiiEntitiesForBatchInputShowStatistics(); @Test abstract void recognizePiiEntitiesForBatchStringInput(); @Test abstract void recognizePiiEntitiesForListLanguageHint(); @Test abstract void recognizeLinkedEntitiesForTextInput(); @Test abstract void recognizeLinkedEntitiesForEmptyText(); @Test abstract void recognizeLinkedEntitiesForFaultyText(); @Test abstract void recognizeLinkedEntitiesForBatchInput(); @Test abstract void recognizeLinkedEntitiesForBatchInputShowStatistics(); @Test abstract void recognizeLinkedEntitiesForBatchStringInput(); @Test abstract void recognizeLinkedEntitiesForListLanguageHint(); @Test abstract void extractKeyPhrasesForTextInput(); @Test abstract void extractKeyPhrasesForEmptyText(); @Test abstract void extractKeyPhrasesForFaultyText(); @Test abstract void extractKeyPhrasesForBatchInput(); @Test abstract void extractKeyPhrasesForBatchInputShowStatistics(); @Test abstract void extractKeyPhrasesForBatchStringInput(); @Test abstract void extractKeyPhrasesForListLanguageHint(); @Test abstract void analyseSentimentForTextInput(); @Test abstract void analyseSentimentForEmptyText(); @Test abstract void analyseSentimentForFaultyText(); @Test abstract void analyseSentimentForBatchInput(); @Test abstract void analyseSentimentForBatchInputShowStatistics(); @Test abstract void analyseSentimentForBatchStringInput(); @Test abstract void analyseSentimentForListLanguageHint(); void detectLanguageShowStatisticsRunner(BiConsumer<List<DetectLanguageInput>, TextAnalyticsRequestOptions> testRunner) { final List<DetectLanguageInput> detectLanguageInputs = Arrays.asList( new DetectLanguageInput("0", "This is written in English", "US"), new DetectLanguageInput("1", "Este es un document escrito en Español."), new DetectLanguageInput("2", "~@!~:)", "US") ); TextAnalyticsRequestOptions options = new TextAnalyticsRequestOptions().setShowStatistics(true); showStatistics = true; testRunner.accept(detectLanguageInputs, options); } void detectLanguageDuplicateIdRunner(BiConsumer<List<DetectLanguageInput>, TextAnalyticsRequestOptions> testRunner) { final List<DetectLanguageInput> detectLanguageInputs = Arrays.asList( new DetectLanguageInput("0", "This is written in English", "US"), new DetectLanguageInput("0", "Este es un document escrito en Español.") ); testRunner.accept(detectLanguageInputs, null); } static void detectLanguageStringInputRunner(Consumer<List<String>> testRunner) { final List<String> inputs = Arrays.asList( "This is written in English", "Este es un document escrito en Español.", "~@!~:)"); testRunner.accept(inputs); } static void detectLanguageRunner(Consumer<List<DetectLanguageInput>> testRunner) { final List<DetectLanguageInput> detectLanguageInputs = Arrays.asList( new DetectLanguageInput("0", "This is written in English", "US"), new DetectLanguageInput("1", "Este es un document escrito en Español."), new DetectLanguageInput("2", "~@!~:)", "US") ); testRunner.accept(detectLanguageInputs); } static void recognizeNamedEntityStringInputRunner(Consumer<List<String>> testRunner) { final List<String> inputs = Arrays.asList( "I had a wonderful trip to Seattle last week.", "I work at Microsoft."); testRunner.accept(inputs); } static void recognizeNamedEntitiesLanguageHintRunner(BiConsumer<List<String>, String> testRunner) { final List<String> inputs = Arrays.asList( "I had a wonderful trip to Seattle last week.", "I work at Microsoft."); testRunner.accept(inputs, "en"); } static void recognizeBatchNamedEntityRunner(Consumer<List<TextDocumentInput>> testRunner) { final List<TextDocumentInput> textDocumentInputs = Arrays.asList( new TextDocumentInput("0", "I had a wonderful trip to Seattle last week."), new TextDocumentInput("1", "I work at Microsoft.")); testRunner.accept(textDocumentInputs); } void recognizeBatchNamedEntitiesShowStatsRunner( BiConsumer<List<TextDocumentInput>, TextAnalyticsRequestOptions> testRunner) { final List<TextDocumentInput> textDocumentInputs = Arrays.asList( new TextDocumentInput("0", "I had a wonderful trip to Seattle last week."), new TextDocumentInput("1", "I work at Microsoft.")); TextAnalyticsRequestOptions options = new TextAnalyticsRequestOptions().setShowStatistics(true); showStatistics = true; testRunner.accept(textDocumentInputs, options); } static void recognizePiiLanguageHintRunner(BiConsumer<List<String>, String> testRunner) { final List<String> inputs = Arrays.asList( "Microsoft employee with ssn 859-98-0987 is using our awesome API's.", "Your ABA number - 111000025 - is the first 9 digits in the lower left hand corner of your personal check."); testRunner.accept(inputs, "en"); } static void recognizePiiStringInputRunner(Consumer<List<String>> testRunner) { final List<String> inputs = Arrays.asList( "Microsoft employee with ssn 859-98-0987 is using our awesome API's.", "Your ABA number - 111000025 - is the first 9 digits in the lower left hand corner of your personal check."); testRunner.accept(inputs); } static void recognizeBatchPiiRunner(Consumer<List<TextDocumentInput>> testRunner) { final List<TextDocumentInput> textDocumentInputs = Arrays.asList( new TextDocumentInput("0", "Microsoft employee with ssn 859-98-0987 is using our awesome API's."), new TextDocumentInput("1", "Your ABA number - 111000025 - is the first 9 digits in the lower left hand corner of your personal check.")); testRunner.accept(textDocumentInputs); } void recognizeBatchPiiEntitiesShowStatsRunner( BiConsumer<List<TextDocumentInput>, TextAnalyticsRequestOptions> testRunner) { final List<TextDocumentInput> textDocumentInputs = Arrays.asList( new TextDocumentInput("0", "Microsoft employee with ssn 859-98-0987 is using our awesome API's."), new TextDocumentInput("1", "Your ABA number - 111000025 - is the first 9 digits in the lower left hand corner of your personal check.")); TextAnalyticsRequestOptions options = new TextAnalyticsRequestOptions().setShowStatistics(true); showStatistics = true; testRunner.accept(textDocumentInputs, options); } void recognizeBatchLinkedEntitiesShowStatsRunner( BiConsumer<List<TextDocumentInput>, TextAnalyticsRequestOptions> testRunner) { final List<TextDocumentInput> textDocumentInputs = Arrays.asList( new TextDocumentInput("0", "I had a wonderful trip to Seattle last week."), new TextDocumentInput("1", "I work at Microsoft.")); TextAnalyticsRequestOptions options = new TextAnalyticsRequestOptions().setShowStatistics(true); showStatistics = true; testRunner.accept(textDocumentInputs, options); } static void recognizeLinkedLanguageHintRunner(BiConsumer<List<String>, String> testRunner) { final List<String> inputs = Arrays.asList( "I had a wonderful trip to Seattle last week.", "I work at Microsoft."); testRunner.accept(inputs, "en"); } static void recognizeLinkedStringInputRunner(Consumer<List<String>> testRunner) { final List<String> inputs = Arrays.asList( "I had a wonderful trip to Seattle last week.", "I work at Microsoft."); testRunner.accept(inputs); } static void recognizeBatchLinkedEntityRunner(Consumer<List<TextDocumentInput>> testRunner) { final List<TextDocumentInput> textDocumentInputs = Arrays.asList( new TextDocumentInput("0", "I had a wonderful trip to Seattle last week."), new TextDocumentInput("1", "I work at Microsoft.")); testRunner.accept(textDocumentInputs); } void extractBatchKeyPhrasesShowStatsRunner( BiConsumer<List<TextDocumentInput>, TextAnalyticsRequestOptions> testRunner) { final List<TextDocumentInput> textDocumentInputs = Arrays.asList( new TextDocumentInput("0", "Hello world. This is some input text that I love."), new TextDocumentInput("1", "Bonjour tout le monde", "fr")); TextAnalyticsRequestOptions options = new TextAnalyticsRequestOptions().setShowStatistics(true); showStatistics = true; testRunner.accept(textDocumentInputs, options); } static void extractKeyPhrasesLanguageHintRunner(BiConsumer<List<String>, String> testRunner) { final List<String> inputs = Arrays.asList( "Hello world. This is some input text that I love.", "Bonjour tout le monde"); testRunner.accept(inputs, "en"); } static void extractKeyPhrasesStringInputRunner(Consumer<List<String>> testRunner) { final List<String> inputs = Arrays.asList( "Hello world. This is some input text that I love.", "Bonjour tout le monde"); testRunner.accept(inputs); } static void extractBatchKeyPhrasesRunner(Consumer<List<TextDocumentInput>> testRunner) { final List<TextDocumentInput> textDocumentInputs = Arrays.asList( new TextDocumentInput("0", "Hello world. This is some input text that I love."), new TextDocumentInput("1", "Bonjour tout le monde")); testRunner.accept(textDocumentInputs); } static void analyseSentimentLanguageHintRunner(BiConsumer<List<String>, String> testRunner) { testRunner.accept(getSentimentInput(), "en"); } static void analyseSentimentStringInputRunner(Consumer<List<String>> testRunner) { testRunner.accept(getSentimentInput()); } static void analyseBatchSentimentRunner(Consumer<List<TextDocumentInput>> testRunner) { final List<String> sentimentInputs = getSentimentInput(); testRunner.accept(Arrays.asList( new TextDocumentInput("0", sentimentInputs.get(0)), new TextDocumentInput("1", sentimentInputs.get(1)) )); } void analyseBatchSentimentShowStatsRunner( BiConsumer<List<TextDocumentInput>, TextAnalyticsRequestOptions> testRunner) { final List<TextDocumentInput> textDocumentInputs = Arrays.asList( new TextDocumentInput("0", "The hotel was dark and unclean. The restaurant had amazing gnocchi."), new TextDocumentInput("1", "The restaurant had amazing gnocchi. The hotel was dark and unclean.") ); TextAnalyticsRequestOptions options = new TextAnalyticsRequestOptions().setShowStatistics(true); testRunner.accept(textDocumentInputs, options); } static List<String> getSentimentInput() { return Arrays.asList("The hotel was dark and unclean. The restaurant had amazing gnocchi.", "The restaurant had amazing gnocchi. The hotel was dark and unclean."); } String getEndpoint() { return interceptorManager.isPlaybackMode() ? "http: : Configuration.getGlobalConfiguration().get("AZURE_TEXT_ANALYTICS_ENDPOINT"); } /** * Helper method to verify batch result. * * @param actualResult DocumentResultCollection<> returned by the API. * @param testApi the API to test. */ <T> void validateBatchResult(DocumentResultCollection<T> actualResult, DocumentResultCollection<T> expectedResult, TestEndpoint testApi) { assertEquals(expectedResult.getModelVersion(), actualResult.getModelVersion()); if (this.showStatistics) { validateBatchStatistics(expectedResult.getStatistics(), actualResult.getStatistics()); } validateDocuments(expectedResult, actualResult, testApi); } /** * Helper method to verify documents returned in a batch request. * * @param expectedResult the expected result collection.. * @param actualResult the actual result collection returned by the API. * @param testApi the API to test. */ private <T> void validateDocuments(DocumentResultCollection<T> expectedResult, DocumentResultCollection<T> actualResult, TestEndpoint testApi) { switch (testApi) { case LANGUAGE: final List<DetectLanguageResult> detectLanguageResults = expectedResult.stream() .filter(element -> element instanceof DetectLanguageResult) .map(element -> (DetectLanguageResult) element) .collect(Collectors.toList()); final List<DetectLanguageResult> actualDetectLanguageResults = actualResult.stream() .filter(element -> element instanceof DetectLanguageResult) .map(element -> (DetectLanguageResult) element) .collect(Collectors.toList()); assertEquals(detectLanguageResults.size(), actualDetectLanguageResults.size()); actualDetectLanguageResults.forEach(actualItem -> { List<DetectLanguageResult> expectedItems = detectLanguageResults.stream().filter( expectedEachItem -> actualItem.getId().equals(expectedEachItem.getId())) .collect(Collectors.toList()); assertEquals(expectedItems.size(), 1); DetectLanguageResult expectedItem = expectedItems.get(0); if (actualItem.getError() == null) { if (this.showStatistics) { validateDocumentStatistics(expectedItem.getStatistics(), actualItem.getStatistics()); } validatePrimaryLanguage(expectedItem.getPrimaryLanguage(), actualItem.getPrimaryLanguage()); validateDetectedLanguages(expectedItem.getDetectedLanguages(), actualItem.getDetectedLanguages()); } }); break; case NAMED_ENTITY: final List<RecognizeEntitiesResult> recognizeEntitiesResults = expectedResult.stream() .filter(element -> element instanceof RecognizeEntitiesResult) .map(element -> (RecognizeEntitiesResult) element) .collect(Collectors.toList()); final List<RecognizeEntitiesResult> actualRecognizeEntitiesResults = actualResult.stream() .filter(element -> element instanceof RecognizeEntitiesResult) .map(element -> (RecognizeEntitiesResult) element) .collect(Collectors.toList()); assertEquals(recognizeEntitiesResults.size(), actualRecognizeEntitiesResults.size()); actualRecognizeEntitiesResults.forEach(actualItem -> { List<RecognizeEntitiesResult> expectedItems = recognizeEntitiesResults.stream().filter( expectedEachItem -> actualItem.getId().equals(expectedEachItem.getId())).collect( Collectors.toList() ); assertEquals(expectedItems.size(), 1); RecognizeEntitiesResult expectedItem = expectedItems.get(0); if (actualItem.getError() == null) { if (this.showStatistics) { validateDocumentStatistics(expectedItem.getStatistics(), actualItem.getStatistics()); } validateNamedEntities(expectedItem.getNamedEntities(), actualItem.getNamedEntities()); } }); break; case LINKED_ENTITY: final List<RecognizeLinkedEntitiesResult> recognizeLinkedEntitiesResults = expectedResult.stream() .filter(element -> element instanceof RecognizeLinkedEntitiesResult) .map(element -> (RecognizeLinkedEntitiesResult) element) .collect(Collectors.toList()); final List<RecognizeLinkedEntitiesResult> actualRecognizeLinkedEntitiesResults = actualResult.stream() .filter(element -> element instanceof RecognizeLinkedEntitiesResult) .map(element -> (RecognizeLinkedEntitiesResult) element) .collect(Collectors.toList()); assertEquals(recognizeLinkedEntitiesResults.size(), actualRecognizeLinkedEntitiesResults.size()); actualRecognizeLinkedEntitiesResults.forEach(actualItem -> { List<RecognizeLinkedEntitiesResult> expectedItems = recognizeLinkedEntitiesResults.stream().filter( expectedEachItem -> actualItem.getId().equals(expectedEachItem.getId())) .collect(Collectors.toList()); assertEquals(expectedItems.size(), 1); RecognizeLinkedEntitiesResult expectedItem = expectedItems.get(0); if (actualItem.getError() == null) { if (this.showStatistics) { validateDocumentStatistics(expectedItem.getStatistics(), actualItem.getStatistics()); } validateLinkedEntities(expectedItem.getLinkedEntities(), actualItem.getLinkedEntities()); } }); break; case KEY_PHRASES: final List<ExtractKeyPhraseResult> extractKeyPhraseResults = expectedResult.stream() .filter(element -> element instanceof ExtractKeyPhraseResult) .map(element -> (ExtractKeyPhraseResult) element) .collect(Collectors.toList()); final List<ExtractKeyPhraseResult> actualExtractKeyPhraseResults = actualResult.stream() .filter(element -> element instanceof ExtractKeyPhraseResult) .map(element -> (ExtractKeyPhraseResult) element) .collect(Collectors.toList()); assertEquals(extractKeyPhraseResults.size(), actualExtractKeyPhraseResults.size()); actualExtractKeyPhraseResults.forEach(actualItem -> { List<ExtractKeyPhraseResult> expectedItems = extractKeyPhraseResults.stream().filter( expectedEachItem -> actualItem.getId().equals(expectedEachItem.getId())) .collect(Collectors.toList()); assertEquals(expectedItems.size(), 1); ExtractKeyPhraseResult expectedItem = expectedItems.get(0); if (actualItem.getError() == null) { if (this.showStatistics) { validateDocumentStatistics(expectedItem.getStatistics(), actualItem.getStatistics()); } validateKeyPhrases(expectedItem.getKeyPhrases(), actualItem.getKeyPhrases()); } }); break; case SENTIMENT: final List<AnalyzeSentimentResult> expectedSentimentResults = expectedResult.stream() .filter(element -> element instanceof AnalyzeSentimentResult) .map(element -> (AnalyzeSentimentResult) element) .collect(Collectors.toList()); final List<AnalyzeSentimentResult> actualSentimentResults = actualResult.stream() .filter(element -> element instanceof AnalyzeSentimentResult) .map(element -> (AnalyzeSentimentResult) element) .collect(Collectors.toList()); expectedSentimentResults.sort(Comparator.comparing(AnalyzeSentimentResult::getId)); actualSentimentResults.sort(Comparator.comparing(AnalyzeSentimentResult::getId)); final int actualSize = actualSentimentResults.size(); final int expectedSize = expectedSentimentResults.size(); assertEquals(expectedSize, actualSize); for (int i = 0; i < actualSize; i++) { final AnalyzeSentimentResult actualSentimentResult = actualSentimentResults.get(i); final AnalyzeSentimentResult expectedSentimentResult = expectedSentimentResults.get(i); if (actualSentimentResult.getError() == null) { if (this.showStatistics) { validateDocumentStatistics(expectedSentimentResult.getStatistics(), actualSentimentResult.getStatistics()); } validateAnalysedSentiment(expectedSentimentResult.getDocumentSentiment(), actualSentimentResult.getDocumentSentiment()); validateAnalysedSentenceSentiment(expectedSentimentResult.getSentenceSentiments(), actualSentimentResult.getSentenceSentiments()); } else { validateErrorDocument(actualSentimentResult.getError(), actualSentimentResult.getError()); } } break; default: throw new IllegalArgumentException(String.format("Unsupported testApi : '%s'.", testApi)); } } /** * Helper method to verify TextBatchStatistics. * * @param expectedStatistics the expected value for TextBatchStatistics. * @param actualStatistics the value returned by API. */ private static void validateBatchStatistics(TextDocumentBatchStatistics expectedStatistics, TextDocumentBatchStatistics actualStatistics) { assertEquals(expectedStatistics.getDocumentCount(), actualStatistics.getDocumentCount()); assertEquals(expectedStatistics.getErroneousDocumentCount(), actualStatistics.getErroneousDocumentCount()); assertEquals(expectedStatistics.getValidDocumentCount(), actualStatistics.getValidDocumentCount()); assertEquals(expectedStatistics.getTransactionCount(), actualStatistics.getTransactionCount()); } /** * Helper method to verify TextDocumentStatistics. * * @param expected the expected value for TextDocumentStatistics. * @param actual the value returned by API. */ private static void validateDocumentStatistics(TextDocumentStatistics expected, TextDocumentStatistics actual) { assertEquals(expected.getCharacterCount(), actual.getCharacterCount()); assertEquals(expected.getTransactionCount(), actual.getTransactionCount()); } /** * Helper method to verify LinkedEntityMatches. * * @param expectedLinkedEntityMatches the expected value for LinkedEntityMatches. * @param actualLinkedEntityMatches the value returned by API. */ private static void validateLinkedEntityMatches(List<LinkedEntityMatch> expectedLinkedEntityMatches, List<LinkedEntityMatch> actualLinkedEntityMatches) { assertEquals(expectedLinkedEntityMatches.size(), actualLinkedEntityMatches.size()); expectedLinkedEntityMatches.sort(Comparator.comparing(LinkedEntityMatch::getText)); actualLinkedEntityMatches.sort(Comparator.comparing(LinkedEntityMatch::getText)); for (int i = 0; i < expectedLinkedEntityMatches.size(); i++) { LinkedEntityMatch expectedLinkedEntity = expectedLinkedEntityMatches.get(i); LinkedEntityMatch actualLinkedEntity = actualLinkedEntityMatches.get(i); assertEquals(expectedLinkedEntity.getLength(), actualLinkedEntity.getLength()); assertEquals(expectedLinkedEntity.getOffset(), actualLinkedEntity.getOffset()); assertEquals(expectedLinkedEntity.getScore(), actualLinkedEntity.getScore()); assertEquals(expectedLinkedEntity.getText(), actualLinkedEntity.getText()); } } /** * Helper method to verify the error document. * @param expectedError the Error returned from the service. * @param actualError the Error returned from the API. */ static void validateErrorDocument(TextAnalyticsError expectedError, TextAnalyticsError actualError) { assertEquals(expectedError.getCode(), actualError.getCode()); assertEquals(expectedError.getMessage(), actualError.getMessage()); assertEquals(expectedError.getTarget(), actualError.getTarget()); } /** * Helper method to validate a single detected language. * * @param expectedLanguage detectedLanguage returned by the service. * @param actualLanguage detectedLanguage returned by the API. */ static void validatePrimaryLanguage(DetectedLanguage expectedLanguage, DetectedLanguage actualLanguage) { assertEquals(expectedLanguage.getIso6391Name(), actualLanguage.getIso6391Name()); assertEquals(expectedLanguage.getName(), actualLanguage.getName()); assertEquals(expectedLanguage.getScore(), actualLanguage.getScore()); } /** * Helper method to validate the list of detected languages. * * @param expectedLanguageList detectedLanguages returned by the service. * @param actualLanguageList detectedLanguages returned by the API. */ static void validateDetectedLanguages(List<DetectedLanguage> expectedLanguageList, List<DetectedLanguage> actualLanguageList) { assertEquals(expectedLanguageList.size(), actualLanguageList.size()); expectedLanguageList.sort(Comparator.comparing(DetectedLanguage::getName)); actualLanguageList.sort(Comparator.comparing(DetectedLanguage::getName)); for (int i = 0; i < expectedLanguageList.size(); i++) { DetectedLanguage expectedDetectedLanguage = expectedLanguageList.get(i); DetectedLanguage actualDetectedLanguage = actualLanguageList.get(i); validatePrimaryLanguage(expectedDetectedLanguage, actualDetectedLanguage); } } /** * Helper method to validate a single named entity. * * @param expectedNamedEntity namedEntity returned by the service. * @param actualNamedEntity namedEntity returned by the API. */ static void validateNamedEntity(NamedEntity expectedNamedEntity, NamedEntity actualNamedEntity) { assertEquals(expectedNamedEntity.getLength(), actualNamedEntity.getLength()); assertEquals(expectedNamedEntity.getOffset(), actualNamedEntity.getOffset()); assertEquals(expectedNamedEntity.getScore(), actualNamedEntity.getScore()); assertEquals(expectedNamedEntity.getSubtype(), actualNamedEntity.getSubtype()); assertEquals(expectedNamedEntity.getText(), actualNamedEntity.getText()); assertEquals(expectedNamedEntity.getType(), actualNamedEntity.getType()); } /** * Helper method to validate a single named entity. * * @param expectedLinkedEntity namedEntity returned by the service. * @param actualLinkedEntity namedEntity returned by the API. */ static void validateLinkedEntity(LinkedEntity expectedLinkedEntity, LinkedEntity actualLinkedEntity) { assertEquals(expectedLinkedEntity.getName(), actualLinkedEntity.getName()); assertEquals(expectedLinkedEntity.getDataSource(), actualLinkedEntity.getDataSource()); assertEquals(expectedLinkedEntity.getLanguage(), actualLinkedEntity.getLanguage()); assertEquals(expectedLinkedEntity.getUrl(), actualLinkedEntity.getUrl()); assertEquals(expectedLinkedEntity.getId(), actualLinkedEntity.getId()); validateLinkedEntityMatches(expectedLinkedEntity.getLinkedEntityMatches(), actualLinkedEntity.getLinkedEntityMatches()); } /** * Helper method to validate a single key phrase. * * @param expectedKeyPhrases key phrases returned by the service. * @param actualKeyPhrases key phrases returned by the API. */ void validateKeyPhrases(List<String> expectedKeyPhrases, List<String> actualKeyPhrases) { assertEquals(expectedKeyPhrases.size(), actualKeyPhrases.size()); Collections.sort(expectedKeyPhrases); Collections.sort(actualKeyPhrases); for (int i = 0; i < expectedKeyPhrases.size(); i++) { assertTrue(expectedKeyPhrases.get(i).equals(actualKeyPhrases.get(i))); } } /** * Helper method to validate the list of named entities. * * @param expectedNamedEntityList namedEntities returned by the service. * @param actualNamedEntityList namedEntities returned by the API. */ static void validateNamedEntities(List<NamedEntity> expectedNamedEntityList, List<NamedEntity> actualNamedEntityList) { assertEquals(expectedNamedEntityList.size(), actualNamedEntityList.size()); expectedNamedEntityList.sort(Comparator.comparing(NamedEntity::getText)); actualNamedEntityList.sort(Comparator.comparing(NamedEntity::getText)); for (int i = 0; i < expectedNamedEntityList.size(); i++) { NamedEntity expectedNamedEntity = expectedNamedEntityList.get(i); NamedEntity actualNamedEntity = actualNamedEntityList.get(i); validateNamedEntity(expectedNamedEntity, actualNamedEntity); } } /** * Helper method to validate the list of named entities. * * @param expectedLinkedEntityList namedEntities returned by the service. * @param actualLinkedEntityList namedEntities returned by the API. */ static void validateLinkedEntities(List<LinkedEntity> expectedLinkedEntityList, List<LinkedEntity> actualLinkedEntityList) { assertEquals(expectedLinkedEntityList.size(), actualLinkedEntityList.size()); expectedLinkedEntityList.sort(Comparator.comparing(LinkedEntity::getName)); actualLinkedEntityList.sort(Comparator.comparing(LinkedEntity::getName)); for (int i = 0; i < expectedLinkedEntityList.size(); i++) { LinkedEntity expectedLinkedEntity = expectedLinkedEntityList.get(i); LinkedEntity actualLinkedEntity = actualLinkedEntityList.get(i); validateLinkedEntity(expectedLinkedEntity, actualLinkedEntity); } } /** * Helper method to validate the list of sentence sentiment. Can't really validate score numbers because it * frequently changed by background model computation. * * @param expectedSentimentList a list of analyzed sentence sentiment returned by the service. * @param actualSentimentList a list of analyzed sentence sentiment returned by the API. */ static void validateAnalysedSentenceSentiment(List<TextSentiment> expectedSentimentList, List<TextSentiment> actualSentimentList) { assertEquals(expectedSentimentList.size(), actualSentimentList.size()); for (int i = 0; i < expectedSentimentList.size(); i++) { validateAnalysedSentiment(expectedSentimentList.get(i), actualSentimentList.get(i)); } } /** * Helper method to validate one pair of analysed sentiments. Can't really validate score numbers because it * frequently changed by background model computation. * * @param expectedSentiment analyzed document sentiment returned by the service. * @param actualSentiment analyzed document sentiment returned by the API. */ static void validateAnalysedSentiment(TextSentiment expectedSentiment, TextSentiment actualSentiment) { assertEquals(expectedSentiment.getLength(), actualSentiment.getLength()); assertEquals(expectedSentiment.getOffset(), actualSentiment.getOffset()); assertEquals(expectedSentiment.getTextSentimentClass(), actualSentiment.getTextSentimentClass()); assertEquals(expectedSentiment.getNegativeScore() > 0, actualSentiment.getNegativeScore() > 0); assertEquals(expectedSentiment.getNeutralScore() > 0, actualSentiment.getNeutralScore() > 0); assertEquals(expectedSentiment.getPositiveScore() > 0, actualSentiment.getPositiveScore() > 0); } static void assertRestException(Throwable exception, Class<? extends HttpResponseException> expectedExceptionType, int expectedStatusCode) { assertEquals(expectedExceptionType, exception.getClass()); assertEquals(expectedStatusCode, ((HttpResponseException) exception).getResponse().getStatusCode()); } static void assertRestException(Runnable exceptionThrower, Class<? extends HttpResponseException> expectedExceptionType, int expectedStatusCode) { try { exceptionThrower.run(); fail(); } catch (Throwable ex) { assertRestException(ex, expectedExceptionType, expectedStatusCode); } } /** * Helper method to get the expected Batch Detected Languages */ static DocumentResultCollection<DetectLanguageResult> getExpectedBatchDetectedLanguages() { DetectedLanguage detectedLanguage1 = new DetectedLanguage("English", "en", 1.0); DetectedLanguage detectedLanguage2 = new DetectedLanguage("Spanish", "es", 1.0); DetectedLanguage detectedLanguage3 = new DetectedLanguage("(Unknown)", "(Unknown)", 0.0); List<DetectedLanguage> detectedLanguageList1 = Collections.singletonList(detectedLanguage1); List<DetectedLanguage> detectedLanguageList2 = Collections.singletonList(detectedLanguage2); List<DetectedLanguage> detectedLanguageList3 = Collections.singletonList(detectedLanguage3); TextDocumentStatistics textDocumentStatistics1 = new TextDocumentStatistics(26, 1); TextDocumentStatistics textDocumentStatistics2 = new TextDocumentStatistics(39, 1); TextDocumentStatistics textDocumentStatistics3 = new TextDocumentStatistics(6, 1); DetectLanguageResult detectLanguageResult1 = new DetectLanguageResult("0", textDocumentStatistics1, null, detectedLanguage1, detectedLanguageList1); DetectLanguageResult detectLanguageResult2 = new DetectLanguageResult("1", textDocumentStatistics2, null, detectedLanguage2, detectedLanguageList2); DetectLanguageResult detectLanguageResult3 = new DetectLanguageResult("2", textDocumentStatistics3, null, detectedLanguage3, detectedLanguageList3); TextDocumentBatchStatistics textDocumentBatchStatistics = new TextDocumentBatchStatistics(3, 0, 3, 3); List<DetectLanguageResult> detectLanguageResultList = Arrays.asList(detectLanguageResult1, detectLanguageResult2, detectLanguageResult3); return new DocumentResultCollection<>(detectLanguageResultList, MODEL_VERSION, textDocumentBatchStatistics); } static DocumentResultCollection<RecognizeEntitiesResult> getExpectedBatchNamedEntities() { NamedEntity namedEntity1 = new NamedEntity("Seattle", "Location", null, 26, 7, 0.80624294281005859); NamedEntity namedEntity2 = new NamedEntity("last week", "DateTime", "DateRange", 34, 9, 0.8); NamedEntity namedEntity3 = new NamedEntity("Microsoft", "Organization", null, 10, 9, 0.99983596801757812); List<NamedEntity> namedEntityList1 = Arrays.asList(namedEntity1, namedEntity2); List<NamedEntity> namedEntityList2 = Collections.singletonList(namedEntity3); TextDocumentStatistics textDocumentStatistics1 = new TextDocumentStatistics(44, 1); TextDocumentStatistics textDocumentStatistics2 = new TextDocumentStatistics(20, 1); RecognizeEntitiesResult recognizeEntitiesResult1 = new RecognizeEntitiesResult("0", textDocumentStatistics1, null, namedEntityList1); RecognizeEntitiesResult recognizeEntitiesResult2 = new RecognizeEntitiesResult("1", textDocumentStatistics2, null, namedEntityList2); TextDocumentBatchStatistics textDocumentBatchStatistics = new TextDocumentBatchStatistics(2, 0, 2, 2); List<RecognizeEntitiesResult> recognizeEntitiesResultList = Arrays.asList(recognizeEntitiesResult1, recognizeEntitiesResult2); return new DocumentResultCollection<>(recognizeEntitiesResultList, MODEL_VERSION, textDocumentBatchStatistics); } static DocumentResultCollection<RecognizePiiEntitiesResult> getExpectedBatchPiiEntities() { NamedEntity namedEntity1 = new NamedEntity("859-98-0987", "U.S. Social Security Number (SSN)", "", 28, 11, 0.65); NamedEntity namedEntity2 = new NamedEntity("111000025", "ABA Routing Number", "", 18, 9, 0.75); List<NamedEntity> namedEntityList1 = Collections.singletonList(namedEntity1); List<NamedEntity> namedEntityList2 = Collections.singletonList(namedEntity2); TextDocumentStatistics textDocumentStatistics1 = new TextDocumentStatistics(67, 1); TextDocumentStatistics textDocumentStatistics2 = new TextDocumentStatistics(105, 1); RecognizePiiEntitiesResult recognizeEntitiesResult1 = new RecognizePiiEntitiesResult("0", textDocumentStatistics1, null, namedEntityList1); RecognizePiiEntitiesResult recognizeEntitiesResult2 = new RecognizePiiEntitiesResult("1", textDocumentStatistics2, null, namedEntityList2); TextDocumentBatchStatistics textDocumentBatchStatistics = new TextDocumentBatchStatistics(2, 0, 2, 2); List<RecognizePiiEntitiesResult> recognizeEntitiesResultList = Arrays.asList(recognizeEntitiesResult1, recognizeEntitiesResult2); return new DocumentResultCollection<>(recognizeEntitiesResultList, MODEL_VERSION, textDocumentBatchStatistics); } static DocumentResultCollection<RecognizeLinkedEntitiesResult> getExpectedBatchLinkedEntities() { LinkedEntityMatch linkedEntityMatch1 = new LinkedEntityMatch("Seattle", 0.11472424095537814, 7, 26); LinkedEntityMatch linkedEntityMatch2 = new LinkedEntityMatch("Microsoft", 0.18693659716732069, 9, 10); LinkedEntity linkedEntity1 = new LinkedEntity( "Seattle", Collections.singletonList(linkedEntityMatch1), "en", "Seattle", "https: "Wikipedia"); LinkedEntity linkedEntity2 = new LinkedEntity( "Microsoft", Collections.singletonList(linkedEntityMatch2), "en", "Microsoft", "https: "Wikipedia"); List<LinkedEntity> linkedEntityList1 = Collections.singletonList(linkedEntity1); List<LinkedEntity> linkedEntityList2 = Collections.singletonList(linkedEntity2); TextDocumentStatistics textDocumentStatistics1 = new TextDocumentStatistics(44, 1); TextDocumentStatistics textDocumentStatistics2 = new TextDocumentStatistics(20, 1); RecognizeLinkedEntitiesResult recognizeLinkedEntitiesResult1 = new RecognizeLinkedEntitiesResult("0", textDocumentStatistics1, null, linkedEntityList1); RecognizeLinkedEntitiesResult recognizeLinkedEntitiesResult2 = new RecognizeLinkedEntitiesResult("1", textDocumentStatistics2, null, linkedEntityList2); TextDocumentBatchStatistics textDocumentBatchStatistics = new TextDocumentBatchStatistics(2, 0, 2, 2); List<RecognizeLinkedEntitiesResult> recognizeLinkedEntitiesResultList = Arrays.asList(recognizeLinkedEntitiesResult1, recognizeLinkedEntitiesResult2); return new DocumentResultCollection<>(recognizeLinkedEntitiesResultList, MODEL_VERSION, textDocumentBatchStatistics); } static DocumentResultCollection<ExtractKeyPhraseResult> getExpectedBatchKeyPhrases() { List<String> keyPhrasesList1 = Arrays.asList("input text", "world"); List<String> keyPhrasesList2 = Arrays.asList("monde"); TextDocumentStatistics textDocumentStatistics1 = new TextDocumentStatistics(49, 1); TextDocumentStatistics textDocumentStatistics2 = new TextDocumentStatistics(21, 1); ExtractKeyPhraseResult extractKeyPhraseResult1 = new ExtractKeyPhraseResult("0", textDocumentStatistics1, null, keyPhrasesList1); ExtractKeyPhraseResult extractKeyPhraseResult2 = new ExtractKeyPhraseResult("1", textDocumentStatistics2, null, keyPhrasesList2); TextDocumentBatchStatistics textDocumentBatchStatistics = new TextDocumentBatchStatistics(2, 0, 2, 2); List<ExtractKeyPhraseResult> extractKeyPhraseResultList = Arrays.asList(extractKeyPhraseResult1, extractKeyPhraseResult2); return new DocumentResultCollection<>(extractKeyPhraseResultList, MODEL_VERSION, textDocumentBatchStatistics); } static DocumentResultCollection<AnalyzeSentimentResult> getExpectedBatchTextSentiment() { final TextDocumentStatistics textDocumentStatistics1 = new TextDocumentStatistics(67, 1); final TextDocumentStatistics textDocumentStatistics2 = new TextDocumentStatistics(67, 1); final TextSentiment expectedDocumentSentiment = new TextSentiment(TextSentimentClass.MIXED, 0.1, 0.5, 0.4, 66, 0); final AnalyzeSentimentResult analyzeSentimentResult1 = new AnalyzeSentimentResult("0", textDocumentStatistics1, null, expectedDocumentSentiment, Arrays.asList( new TextSentiment(TextSentimentClass.NEGATIVE, 0.99, 0.005, 0.005, 31, 0), new TextSentiment(TextSentimentClass.POSITIVE, 0.005, 0.005, 0.99, 35, 32) )); final AnalyzeSentimentResult analyzeSentimentResult2 = new AnalyzeSentimentResult("1", textDocumentStatistics2, null, expectedDocumentSentiment, Arrays.asList( new TextSentiment(TextSentimentClass.POSITIVE, 0.005, 0.005, 0.99, 35, 0), new TextSentiment(TextSentimentClass.NEGATIVE, 0.99, 0.005, 0.005, 31, 36) )); return new DocumentResultCollection<>(Arrays.asList(analyzeSentimentResult1, analyzeSentimentResult2), MODEL_VERSION, new TextDocumentBatchStatistics(2, 0, 2, 2)); } }
`new ArrayList<>(Arrays.asList(` should just be `Arrays.asList(`. you may have to do a Ctrl F and replace all.
public void detectSingleTextLanguage() { DetectedLanguage primaryLanguage = new DetectedLanguage().setName("English").setIso6391Name("en").setScore(1.0); List<DetectedLanguage> expectedLanguageList = new ArrayList<>(Arrays.asList(primaryLanguage)); StepVerifier.create(client.detectLanguage("This is a test English Text")) .assertNext(response -> validateDetectedLanguages(expectedLanguageList, response.getDetectedLanguages())) .verifyComplete(); }
List<DetectedLanguage> expectedLanguageList = new ArrayList<>(Arrays.asList(primaryLanguage));
public void detectSingleTextLanguage() { DetectedLanguage primaryLanguage = new DetectedLanguage("English", "en", 1.0); List<DetectedLanguage> expectedLanguageList = Arrays.asList(primaryLanguage); StepVerifier.create(client.detectLanguage("This is a test English Text")) .assertNext(response -> validateDetectedLanguages(expectedLanguageList, response.getDetectedLanguages())) .verifyComplete(); }
class TextAnalyticsAsyncClientTest extends TextAnalyticsClientTestBase { private TextAnalyticsAsyncClient client; @Override protected void beforeTest() { client = clientSetup(httpPipeline -> new TextAnalyticsClientBuilder() .endpoint(getEndPoint()) .pipeline(httpPipeline) .buildAsyncClient()); } /** * Verify that we can get statistics on the collection result when given a batch input with options. */ @Test public void detectLanguagesBatchInputShowStatistics() { detectLanguageShowStatisticsRunner((inputs, options) -> { StepVerifier.create(client.detectBatchLanguagesWithResponse(inputs, options)) .assertNext(response -> validateBatchResult(response.getValue(), getExpectedBatchDetectedLanguages(), "Language")) .verifyComplete(); }); } /** * Test Detect batch input languages. */ @Test public void detectLanguagesBatchInput() { detectLanguageRunner((inputs) -> { StepVerifier.create(client.detectBatchLanguages(inputs)) .assertNext(response -> validateBatchResult(response, getExpectedBatchDetectedLanguages(), "Language")) .verifyComplete(); }); } /** * Test Detect batch languages for List of String input with country Hint. */ @Test public void detectLanguagesBatchListCountryHint() { detectLanguagesCountryHintRunner((inputs, countryHint) -> { StepVerifier.create(client.detectLanguagesWithResponse(inputs, countryHint)) .assertNext(response -> validateBatchResult(response.getValue(), getExpectedBatchDetectedLanguages(), "Language")) .verifyComplete(); }); } /** * Test Detect batch languages for List of String input. */ @Test public void detectLanguagesBatchStringInput() { detectLanguageStringInputRunner((inputs) -> { StepVerifier.create(client.detectLanguages(inputs)) .assertNext(response -> validateBatchResult(response, getExpectedBatchDetectedLanguages(), "Language")) .verifyComplete(); }); } /** * Verifies that a single DetectLanguageResult is returned for a text input to detectLanguages. */ @Test /** * Verifies that an error document is returned for a text input with invalid country hint. * <p> * TODO: update error Model. */ @Test public void detectLanguageInvalidCountryHint() { Error expectedError = new Error().setCode("InvalidArgument").setMessage("Invalid Country Hint."); StepVerifier.create(client.detectLanguageWithResponse("Este es un document escrito en Español.", "en")) .assertNext(response -> validateErrorDocument(expectedError, response.getValue().getError())) .verifyComplete(); } /** * Verifies that an error document is returned for a empty text input. */ @Test public void detectLanguageEmptyText() { Error expectedError = new Error().setCode("InvalidArgument").setMessage("Invalid document in request."); StepVerifier.create(client.detectLanguage("")) .assertNext(response -> validateErrorDocument(expectedError, response.getError())) .verifyComplete(); } /** * Verifies that detectLanguage returns an "UNKNOWN" result when faulty text is passed. */ @Test public void detectLanguageFaultyText() { StepVerifier.create(client.detectLanguage("!@ .assertNext(response -> assertEquals(response.getPrimaryLanguage().getIso6391Name(), "(Unknown)")) .verifyComplete(); } /** * Verifies that a Bad request exception is returned for input documents with same ids. */ @Test public void detectLanguageDuplicateIdInput() { detectLanguageDuplicateIdRunner((inputs, options) -> { StepVerifier.create(client.detectBatchLanguagesWithResponse(inputs, options, Context.NONE)) .verifyErrorSatisfies(ex -> assertRestException(ex, HttpResponseException.class, 400)); }); } @Test public void recognizeEntitiesForSimpleInput() { DetectedLanguage primaryLanguage = new DetectedLanguage().setName("English").setIso6391Name("en").setScore(1.0); } @Test public void recognizeEntitiesForEmptyText() { } @Test public void recognizeEntitiesForFaultyText() { } @Test public void recognizeEntitiesForBatchInput() { } @Test public void recognizeEntitiesForBatchInputShowStatistics() { } @Test public void recognizeEntitiesForBatchStringInput() { } @Test public void recognizeEntitiesForBatchListCountryHint() { } }
class TextAnalyticsAsyncClientTest extends TextAnalyticsClientTestBase { private TextAnalyticsAsyncClient client; @Override protected void beforeTest() { client = clientSetup(httpPipeline -> new TextAnalyticsClientBuilder() .endpoint(getEndpoint()) .pipeline(httpPipeline) .buildAsyncClient()); } /** * Verify that we can get statistics on the collection result when given a batch input with options. */ @Test public void detectLanguagesBatchInputShowStatistics() { detectLanguageShowStatisticsRunner((inputs, options) -> { StepVerifier.create(client.detectBatchLanguagesWithResponse(inputs, options)) .assertNext(response -> validateBatchResult(response.getValue(), getExpectedBatchDetectedLanguages(), TestEndpoint.LANGUAGE)) .verifyComplete(); }); } /** * Test Detect batch input languages. */ @Test public void detectLanguagesBatchInput() { detectLanguageRunner((inputs) -> { StepVerifier.create(client.detectBatchLanguages(inputs)) .assertNext(response -> validateBatchResult(response, getExpectedBatchDetectedLanguages(), TestEndpoint.LANGUAGE)) .verifyComplete(); }); } /** * Test Detect batch languages for List of String input with country Hint. */ @Test public void detectLanguagesBatchListCountryHint() { detectLanguagesCountryHintRunner((inputs, countryHint) -> { StepVerifier.create(client.detectLanguagesWithResponse(inputs, countryHint)) .assertNext(response -> validateBatchResult(response.getValue(), getExpectedBatchDetectedLanguages(), TestEndpoint.LANGUAGE)) .verifyComplete(); }); } /** * Test Detect batch languages for List of String input. */ @Test public void detectLanguagesBatchStringInput() { detectLanguageStringInputRunner((inputs) -> { StepVerifier.create(client.detectLanguages(inputs)) .assertNext(response -> validateBatchResult(response, getExpectedBatchDetectedLanguages(), TestEndpoint.LANGUAGE)) .verifyComplete(); }); } /** * Verifies that a single DetectLanguageResult is returned for a text input to detectLanguages. */ @Test /** * Verifies that an error document is returned for a text input with invalid country hint. * <p> * TODO: update error Model. */ @Test public void detectLanguageInvalidCountryHint() { TextAnalyticsError expectedError = new TextAnalyticsError(ErrorCodeValue.INVALID_ARGUMENT, "Invalid Country Hint.", null, null); StepVerifier.create(client.detectLanguageWithResponse("Este es un document escrito en Español.", "en")) .assertNext(response -> validateErrorDocument(expectedError, response.getValue().getError())) .verifyComplete(); } /** * Verifies that an error document is returned for a empty text input. */ @Test public void detectLanguageEmptyText() { TextAnalyticsError expectedError = new TextAnalyticsError(ErrorCodeValue.INVALID_ARGUMENT, "Invalid document in request.", null, null); StepVerifier.create(client.detectLanguage("")) .assertNext(response -> validateErrorDocument(expectedError, response.getError())) .verifyComplete(); } /** * Verifies that detectLanguage returns an "UNKNOWN" result when faulty text is passed. */ @Test public void detectLanguageFaultyText() { StepVerifier.create(client.detectLanguage("!@ .assertNext(response -> assertEquals(response.getPrimaryLanguage().getIso6391Name(), "(Unknown)")) .verifyComplete(); } /** * Verifies that a Bad request exception is returned for input documents with same ids. */ @Test public void detectLanguageDuplicateIdInput() { detectLanguageDuplicateIdRunner((inputs, options) -> { StepVerifier.create(client.detectBatchLanguagesWithResponse(inputs, options, Context.NONE)) .verifyErrorSatisfies(ex -> assertRestException(ex, HttpResponseException.class, 400)); }); } @Test public void recognizeEntitiesForTextInput() { NamedEntity namedEntity1 = new NamedEntity("Seattle", "Location", null, 26, 7, 0.80624294281005859); NamedEntity namedEntity2 = new NamedEntity("last week", "DateTime", "DateRange", 34, 9, 0.8); RecognizeEntitiesResult recognizeEntitiesResultList = new RecognizeEntitiesResult("0", null, null, Arrays.asList(namedEntity1, namedEntity2)); StepVerifier.create(client.recognizeEntities("I had a wonderful trip to Seattle last week.")) .assertNext(response -> validateNamedEntities(recognizeEntitiesResultList.getNamedEntities(), response.getNamedEntities())) .verifyComplete(); } @Test public void recognizeEntitiesForEmptyText() { TextAnalyticsError expectedError = new TextAnalyticsError(ErrorCodeValue.INVALID_ARGUMENT, "Invalid document in request.", null, null); StepVerifier.create(client.recognizeEntities("")) .assertNext(response -> validateErrorDocument(expectedError, response.getError())) .verifyComplete(); } @Override public void recognizeEntitiesForFaultyText() { StepVerifier.create(client.recognizeEntities("!@ .assertNext(response -> assertEquals(response.getNamedEntities().size(), 0)) .verifyComplete(); } @Test public void recognizeEntitiesForBatchInput() { recognizeBatchNamedEntityRunner((inputs) -> { StepVerifier.create(client.recognizeBatchEntities(inputs)) .assertNext(response -> validateBatchResult(response, getExpectedBatchNamedEntities(), TestEndpoint.NAMED_ENTITY)) .verifyComplete(); }); } @Test public void recognizeEntitiesForBatchInputShowStatistics() { recognizeBatchNamedEntitiesShowStatsRunner((inputs, options) -> { StepVerifier.create(client.recognizeBatchEntitiesWithResponse(inputs, options)) .assertNext(response -> validateBatchResult(response.getValue(), getExpectedBatchNamedEntities(), TestEndpoint.NAMED_ENTITY)) .verifyComplete(); }); } @Test public void recognizeEntitiesForBatchStringInput() { recognizeNamedEntityStringInputRunner((inputs) -> { StepVerifier.create(client.recognizeEntities(inputs)) .assertNext(response -> validateBatchResult(response, getExpectedBatchNamedEntities(), TestEndpoint.NAMED_ENTITY)) .verifyComplete(); }); } @Test public void recognizeEntitiesForListLanguageHint() { recognizeNamedEntitiesLanguageHintRunner((inputs, language) -> { StepVerifier.create(client.recognizeEntitiesWithResponse(inputs, language)) .assertNext(response -> validateBatchResult(response.getValue(), getExpectedBatchNamedEntities(), TestEndpoint.NAMED_ENTITY)) .verifyComplete(); }); } @Test public void recognizeLinkedEntitiesForTextInput() { LinkedEntityMatch linkedEntityMatch1 = new LinkedEntityMatch("Seattle", 0.11472424095537814, 7, 26); LinkedEntity linkedEntity1 = new LinkedEntity("Seattle", Collections.singletonList(linkedEntityMatch1), "en", "Seattle", "https: RecognizeLinkedEntitiesResult recognizeLinkedEntitiesResultList = new RecognizeLinkedEntitiesResult("0", null, null, Collections.singletonList(linkedEntity1)); StepVerifier.create(client.recognizeLinkedEntities("I had a wonderful trip to Seattle last week.")) .assertNext(response -> validateLinkedEntities(recognizeLinkedEntitiesResultList.getLinkedEntities(), response.getLinkedEntities())) .verifyComplete(); } @Test public void recognizeLinkedEntitiesForEmptyText() { TextAnalyticsError expectedError = new TextAnalyticsError(ErrorCodeValue.INVALID_ARGUMENT, "Invalid document in request.", null, null); StepVerifier.create(client.recognizeLinkedEntities("")) .assertNext(response -> validateErrorDocument(expectedError, response.getError())) .verifyComplete(); } @Test public void recognizeLinkedEntitiesForFaultyText() { StepVerifier.create(client.recognizeLinkedEntities("!@ .assertNext(response -> assertEquals(response.getLinkedEntities().size(), 0)) .verifyComplete(); } @Test public void recognizeLinkedEntitiesForBatchInput() { recognizeBatchLinkedEntityRunner((inputs) -> { StepVerifier.create(client.recognizeBatchLinkedEntities(inputs)) .assertNext(response -> validateBatchResult(response, getExpectedBatchLinkedEntities(), TestEndpoint.LINKED_ENTITY)) .verifyComplete(); }); } @Test public void recognizeLinkedEntitiesForBatchInputShowStatistics() { recognizeBatchLinkedEntitiesShowStatsRunner((inputs, options) -> { StepVerifier.create(client.recognizeBatchLinkedEntitiesWithResponse(inputs, options)) .assertNext(response -> validateBatchResult(response.getValue(), getExpectedBatchLinkedEntities(), TestEndpoint.LINKED_ENTITY)) .verifyComplete(); }); } @Test public void recognizeLinkedEntitiesForBatchStringInput() { recognizeLinkedStringInputRunner((inputs) -> { StepVerifier.create(client.recognizeLinkedEntities(inputs)) .assertNext(response -> validateBatchResult(response, getExpectedBatchLinkedEntities(), TestEndpoint.LINKED_ENTITY)) .verifyComplete(); }); } @Test public void recognizeLinkedEntitiesForListLanguageHint() { recognizeLinkedLanguageHintRunner((inputs, language) -> { StepVerifier.create(client.recognizeLinkedEntitiesWithResponse(inputs, language)) .assertNext(response -> validateBatchResult(response.getValue(), getExpectedBatchLinkedEntities(), TestEndpoint.NAMED_ENTITY)) .verifyComplete(); }); } @Test public void recognizePiiEntitiesForTextInput() { NamedEntity namedEntity1 = new NamedEntity("859-98-0987", "U.S. Social Security Number (SSN)", "", 28, 11, 0.65); RecognizeEntitiesResult recognizeEntitiesResultList = new RecognizeEntitiesResult("0", null, null, Collections.singletonList(namedEntity1)); StepVerifier.create(client.recognizePiiEntities("Microsoft employee with ssn 859-98-0987 is using our awesome API's.")) .assertNext(response -> validateNamedEntities(recognizeEntitiesResultList.getNamedEntities(), response.getNamedEntities())) .verifyComplete(); } @Test public void recognizePiiEntitiesForEmptyText() { TextAnalyticsError expectedError = new TextAnalyticsError(ErrorCodeValue.INVALID_ARGUMENT, "Invalid document in request.", null, null); StepVerifier.create(client.recognizePiiEntities("")) .assertNext(response -> validateErrorDocument(expectedError, response.getError())) .verifyComplete(); } @Override public void recognizePiiEntitiesForFaultyText() { StepVerifier.create(client.recognizePiiEntities("!@ .assertNext(response -> assertEquals(response.getNamedEntities().size(), 0)) .verifyComplete(); } @Test public void recognizePiiEntitiesForBatchInput() { recognizeBatchPiiRunner((inputs) -> { StepVerifier.create(client.recognizeBatchPiiEntities(inputs)) .assertNext(response -> validateBatchResult(response, getExpectedBatchPiiEntities(), TestEndpoint.NAMED_ENTITY)) .verifyComplete(); }); } @Test public void recognizePiiEntitiesForBatchInputShowStatistics() { recognizeBatchPiiEntitiesShowStatsRunner((inputs, options) -> { StepVerifier.create(client.recognizeBatchPiiEntitiesWithResponse(inputs, options)) .assertNext(response -> validateBatchResult(response.getValue(), getExpectedBatchPiiEntities(), TestEndpoint.NAMED_ENTITY)) .verifyComplete(); }); } @Test public void recognizePiiEntitiesForBatchStringInput() { recognizePiiStringInputRunner((inputs) -> { StepVerifier.create(client.recognizePiiEntities(inputs)) .assertNext(response -> validateBatchResult(response, getExpectedBatchPiiEntities(), TestEndpoint.NAMED_ENTITY)) .verifyComplete(); }); } @Test public void recognizePiiEntitiesForListLanguageHint() { recognizePiiLanguageHintRunner((inputs, language) -> { StepVerifier.create(client.recognizePiiEntitiesWithResponse(inputs, language)) .assertNext(response -> validateBatchResult(response.getValue(), getExpectedBatchPiiEntities(), TestEndpoint.NAMED_ENTITY)) .verifyComplete(); }); } @Test public void extractKeyPhrasesForTextInput() { List<String> keyPhrasesList1 = Arrays.asList("monde"); StepVerifier.create(client.extractKeyPhrasesWithResponse("Bonjour tout le monde.", "fr")) .assertNext(response -> validateKeyPhrases(keyPhrasesList1, response.getValue().getKeyPhrases())) .verifyComplete(); } @Test public void extractKeyPhrasesForEmptyText() { TextAnalyticsError expectedError = new TextAnalyticsError(ErrorCodeValue.INVALID_ARGUMENT, "Invalid document in request.", null, null); StepVerifier.create(client.extractKeyPhrases("")) .assertNext(response -> validateErrorDocument(expectedError, response.getError())) .verifyComplete(); } @Test public void extractKeyPhrasesForFaultyText() { StepVerifier.create(client.extractKeyPhrases("!@ .assertNext(response -> assertEquals(response.getKeyPhrases().size(), 0)) .verifyComplete(); } @Test public void extractKeyPhrasesForBatchInput() { extractBatchKeyPhrasesRunner((inputs) -> { StepVerifier.create(client.extractBatchKeyPhrases(inputs)) .assertNext(response -> validateBatchResult(response, getExpectedBatchKeyPhrases(), TestEndpoint.KEY_PHRASES)) .verifyComplete(); }); } @Test public void extractKeyPhrasesForBatchInputShowStatistics() { extractBatchKeyPhrasesShowStatsRunner((inputs, options) -> { StepVerifier.create(client.extractBatchKeyPhrasesWithResponse(inputs, options)) .assertNext(response -> validateBatchResult(response.getValue(), getExpectedBatchKeyPhrases(), TestEndpoint.KEY_PHRASES)) .verifyComplete(); }); } @Test public void extractKeyPhrasesForBatchStringInput() { extractKeyPhrasesStringInputRunner((inputs) -> { StepVerifier.create(client.extractKeyPhrases(inputs)) .assertNext(response -> validateBatchResult(response, getExpectedBatchKeyPhrases(), TestEndpoint.KEY_PHRASES)) .verifyComplete(); }); } @Test public void extractKeyPhrasesForListLanguageHint() { extractKeyPhrasesLanguageHintRunner((inputs, language) -> { StepVerifier.create(client.extractKeyPhrasesWithResponse(inputs, language)) .assertNext(response -> validateBatchResult(response.getValue(), getExpectedBatchKeyPhrases(), TestEndpoint.KEY_PHRASES)) .verifyComplete(); }); } /** * Test analyzing sentiment for a string input. */ @Test public void analyseSentimentForTextInput() { final TextSentiment expectedDocumentSentiment = new TextSentiment(TextSentimentClass.MIXED, 0.1, 0.5, 0.4, 66, 0); final List<TextSentiment> expectedSentenceSentiments = Arrays.asList( new TextSentiment(TextSentimentClass.NEGATIVE, 0.99, 0.005, 0.005, 31, 0), new TextSentiment(TextSentimentClass.POSITIVE, 0.005, 0.005, 0.99, 35, 32)); StepVerifier .create(client.analyzeSentiment("The hotel was dark and unclean. The restaurant had amazing gnocchi.")) .assertNext(response -> { validateAnalysedSentiment(expectedDocumentSentiment, response.getDocumentSentiment()); validateAnalysedSentenceSentiment(expectedSentenceSentiments, response.getSentenceSentiments()); }).verifyComplete(); } /** * Verifies that an error document is returned for a empty text input. */ @Test public void analyseSentimentForEmptyText() { TextAnalyticsError expectedError = new TextAnalyticsError(ErrorCodeValue.INVALID_ARGUMENT, "Invalid document in request.", null, null); StepVerifier.create(client.analyzeSentiment("")) .assertNext(response -> validateErrorDocument(expectedError, response.getError())).verifyComplete(); } public void analyseSentimentForFaultyText() { } /** * Test analyzing sentiment for a list of string input. */ @Test public void analyseSentimentForBatchStringInput() { analyseSentimentStringInputRunner(inputs -> StepVerifier.create(client.analyzeSentiment(inputs)) .assertNext(response -> validateBatchResult(response, getExpectedBatchTextSentiment(), TestEndpoint.SENTIMENT)) .verifyComplete()); } /** * Test analyzing sentiment for a list of string input with language hint. */ @Test public void analyseSentimentForListLanguageHint() { analyseSentimentLanguageHintRunner((inputs, language) -> StepVerifier.create(client.analyzeSentimentWithResponse(inputs, language)) .assertNext(response -> validateBatchResult(response.getValue(), getExpectedBatchTextSentiment(), TestEndpoint.SENTIMENT)) .verifyComplete()); } /** * Test analyzing sentiment for batch input. */ @Test public void analyseSentimentForBatchInput() { analyseBatchSentimentRunner(inputs -> StepVerifier.create(client.analyzeBatchSentiment(inputs)) .assertNext(response -> validateBatchResult(response, getExpectedBatchTextSentiment(), TestEndpoint.SENTIMENT)) .verifyComplete()); } /** * Verify that we can get statistics on the collection result when given a batch input with options. */ @Test public void analyseSentimentForBatchInputShowStatistics() { analyseBatchSentimentShowStatsRunner((inputs, options) -> StepVerifier.create(client.analyzeBatchSentimentWithResponse(inputs, options)) .assertNext(response -> validateBatchResult(response.getValue(), getExpectedBatchTextSentiment(), TestEndpoint.SENTIMENT)) .verifyComplete()); } }
`%n` should be used.
public static void main(String[] args) { TextAnalyticsClient client = new TextAnalyticsClientBuilder() .subscriptionKey(Configuration.getGlobalConfiguration().get("AZURE_TEXT_ANALYTICS_SUBSCRIPTION_KEY")) .endpoint(Configuration.getGlobalConfiguration().get("AZURE_TEXT_ANALYTICS_ENDPOINT")) .buildClient(); List<TextDocumentInput> inputs = Arrays.asList( new TextDocumentInput("1", "Old Faithful is a geyser at Yellowstone Park.", "en"), new TextDocumentInput("2", "Mount Shasta has lenticular clouds.", "en") ); final TextAnalyticsRequestOptions requestOptions = new TextAnalyticsRequestOptions().setShowStatistics(true); final DocumentResultCollection<LinkedEntityResult> detectedBatchResult = client.recognizeBatchLinkedEntitiesWithResponse(inputs, requestOptions, Context.NONE).getValue(); System.out.printf("Model version: %s", detectedBatchResult.getModelVersion()); final TextBatchStatistics batchStatistics = detectedBatchResult.getStatistics(); System.out.printf("A batch of document statistics, document count: %s, erroneous document count: %s, transaction count: %s, valid document count: %s", batchStatistics.getDocumentCount(), batchStatistics.getErroneousDocumentCount(), batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); detectedBatchResult.forEach(linkedEntityDocumentResult -> linkedEntityDocumentResult.getLinkedEntities().stream().forEach(linkedEntity -> System.out.printf("Recognized Linked NamedEntity: %s, URL: %s, Data Source: %s", linkedEntity.getName(), linkedEntity.getUri(), linkedEntity.getDataSource()))); }
System.out.printf("Recognized Linked NamedEntity: %s, URL: %s, Data Source: %s",
public static void main(String[] args) { TextAnalyticsClient client = new TextAnalyticsClientBuilder() .subscriptionKey("subscription-key") .endpoint("https: .buildClient(); List<TextDocumentInput> inputs = Arrays.asList( new TextDocumentInput("1", "Old Faithful is a geyser at Yellowstone Park.", "en"), new TextDocumentInput("2", "Mount Shasta has lenticular clouds.", "en") ); final TextAnalyticsRequestOptions requestOptions = new TextAnalyticsRequestOptions().setShowStatistics(true); final DocumentResultCollection<RecognizeLinkedEntitiesResult> detectedBatchResult = client.recognizeBatchLinkedEntitiesWithResponse(inputs, requestOptions, Context.NONE).getValue(); System.out.printf("Model version: %s%n", detectedBatchResult.getModelVersion()); final TextDocumentBatchStatistics batchStatistics = detectedBatchResult.getStatistics(); System.out.printf("A batch of document statistics, document count: %s, erroneous document count: %s, transaction count: %s, valid document count: %s.%n", batchStatistics.getDocumentCount(), batchStatistics.getErroneousDocumentCount(), batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); for (RecognizeLinkedEntitiesResult linkedEntityDocumentResult : detectedBatchResult) { for (LinkedEntity linkedEntity : linkedEntityDocumentResult.getLinkedEntities()) { System.out.printf("Recognized Linked NamedEntity: %s, URL: %s, Data Source: %s%n", linkedEntity.getName(), linkedEntity.getUrl(), linkedEntity.getDataSource()); } } }
class RecognizeLinkedEntitiesBatchDocuments { }
class RecognizeLinkedEntitiesBatchDocuments { /** * Main method to invoke this demo about how to recognize linked entities of a batch of text inputs. * * @param args Unused arguments to the program. */ }
Same comment as below. you can use a foreach loop.
public static void main(String[] args) { TextAnalyticsClient client = new TextAnalyticsClientBuilder() .subscriptionKey(Configuration.getGlobalConfiguration().get("AZURE_TEXT_ANALYTICS_SUBSCRIPTION_KEY")) .endpoint(Configuration.getGlobalConfiguration().get("AZURE_TEXT_ANALYTICS_ENDPOINT")) .buildClient(); List<TextDocumentInput> inputs = Arrays.asList( new TextDocumentInput("1", "Satya Nadella is the CEO of Microsoft", "en"), new TextDocumentInput("2", "Elon Musk is the CEO of SpaceX and Tesla.", "en") ); final TextAnalyticsRequestOptions requestOptions = new TextAnalyticsRequestOptions().setShowStatistics(true); final DocumentResultCollection<NamedEntityResult> detectedBatchResult = client.recognizeBatchEntitiesWithResponse(inputs, requestOptions, Context.NONE).getValue(); System.out.printf("Model version: %s", detectedBatchResult.getModelVersion()); final TextBatchStatistics batchStatistics = detectedBatchResult.getStatistics(); System.out.printf("A batch of document statistics, document count: %s, erroneous document count: %s, transaction count: %s, valid document count: %s", batchStatistics.getDocumentCount(), batchStatistics.getErroneousDocumentCount(), batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); detectedBatchResult.forEach(detectedEntityResult -> detectedEntityResult.getNamedEntities().forEach(entity -> System.out.printf("Recognized NamedEntity: %s, NamedEntity Type: %s, NamedEntity Subtype: %s, Offset: %s, Length: %s, Score: %s", entity.getText(), entity.getType(), entity.getSubtype(), entity.getOffset(), entity.getLength(), entity.getScore()))); }
detectedBatchResult.forEach(detectedEntityResult ->
public static void main(String[] args) { TextAnalyticsClient client = new TextAnalyticsClientBuilder() .subscriptionKey("subscription-key") .endpoint("https: .buildClient(); List<TextDocumentInput> inputs = Arrays.asList( new TextDocumentInput("1", "Satya Nadella is the CEO of Microsoft.", "en"), new TextDocumentInput("2", "Elon Musk is the CEO of SpaceX and Tesla.", "en") ); final TextAnalyticsRequestOptions requestOptions = new TextAnalyticsRequestOptions().setShowStatistics(true); final DocumentResultCollection<RecognizeEntitiesResult> detectedBatchResult = client.recognizeBatchEntitiesWithResponse(inputs, requestOptions, Context.NONE).getValue(); System.out.printf("Model version: %s%n", detectedBatchResult.getModelVersion()); final TextDocumentBatchStatistics batchStatistics = detectedBatchResult.getStatistics(); System.out.printf("A batch of document statistics, document count: %s, erroneous document count: %s, transaction count: %s, valid document count: %s.%n", batchStatistics.getDocumentCount(), batchStatistics.getErroneousDocumentCount(), batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); for (RecognizeEntitiesResult recognizeEntitiesResult : detectedBatchResult) { for (NamedEntity entity : recognizeEntitiesResult.getNamedEntities()) { System.out.printf("Recognized NamedEntity: %s, NamedEntity Type: %s, NamedEntity Subtype: %s, Offset: %s, Length: %s, Score: %s.%n", entity.getText(), entity.getType(), entity.getSubtype(), entity.getOffset(), entity.getLength(), entity.getScore()); } } }
class RecognizeEntitiesBatchDocuments { }
class RecognizeEntitiesBatchDocuments { /** * Main method to invoke this demo about how to recognize entities of a batch of text inputs. * * @param args Unused arguments to the program. */ }
add `%n`
public static void main(String[] args) { TextAnalyticsClient client = new TextAnalyticsClientBuilder() .subscriptionKey(Configuration.getGlobalConfiguration().get("AZURE_TEXT_ANALYTICS_SUBSCRIPTION_KEY")) .endpoint(Configuration.getGlobalConfiguration().get("AZURE_TEXT_ANALYTICS_ENDPOINT")) .buildClient(); String text = "Satya Nadella is the CEO of Microsoft"; client.recognizeEntities(text).getNamedEntities().forEach( entity -> System.out.printf( "Recognized NamedEntity: %s, NamedEntity Type: %s, NamedEntity Subtype: %s, Offset: %s, Length: %s, Score: %s", entity.getText(), entity.getType(), entity.getSubtype(), entity.getOffset(), entity.getLength(), entity.getScore())); }
"Recognized NamedEntity: %s, NamedEntity Type: %s, NamedEntity Subtype: %s, Offset: %s, Length: %s, Score: %s",
public static void main(String[] args) { TextAnalyticsClient client = new TextAnalyticsClientBuilder() .subscriptionKey("subscription-key") .endpoint("https: .buildClient(); String text = "Satya Nadella is the CEO of Microsoft"; for (NamedEntity entity : client.recognizeEntities(text).getNamedEntities()) { System.out.printf( "Recognized NamedEntity: %s, NamedEntity Type: %s, NamedEntity Subtype: %s, Offset: %s, Length: %s, Score: %s.%n", entity.getText(), entity.getType(), entity.getSubtype(), entity.getOffset(), entity.getLength(), entity.getScore()); } }
class RecognizeEntities { }
class RecognizeEntities { /** * Main method to invoke this demo about how to recognize entities of a text input. * * @param args Unused arguments to the program. */ }
add `%n`
public static void main(String[] args) { TextAnalyticsClient client = new TextAnalyticsClientBuilder() .subscriptionKey(Configuration.getGlobalConfiguration().get("AZURE_TEXT_ANALYTICS_SUBSCRIPTION_KEY")) .endpoint(Configuration.getGlobalConfiguration().get("AZURE_TEXT_ANALYTICS_ENDPOINT")) .buildClient(); String text = "The hotel was dark and unclean."; final TextSentimentResult sentimentResult = client.analyzeSentiment(text); final TextSentiment documentSentiment = sentimentResult.getDocumentSentiment(); System.out.printf( "Recognized TextSentiment: %s, Positive Score: %s, Neutral Score: %s, Negative Score: %s.", documentSentiment.getTextSentimentClass(), documentSentiment.getPositiveScore(), documentSentiment.getNeutralScore(), documentSentiment.getNegativeScore()); final List<TextSentiment> sentiments = sentimentResult.getSentenceSentiments(); sentiments.forEach(textSentiment -> System.out.printf( "Recognized Sentence TextSentiment: %s, Positive Score: %s, Neutral Score: %s, Negative Score: %s.", textSentiment.getTextSentimentClass(), textSentiment.getPositiveScore(), textSentiment.getNeutralScore(), textSentiment.getNegativeScore())); }
"Recognized TextSentiment: %s, Positive Score: %s, Neutral Score: %s, Negative Score: %s.",
public static void main(String[] args) { TextAnalyticsClient client = new TextAnalyticsClientBuilder() .subscriptionKey("subscription-key") .endpoint("https: .buildClient(); String text = "The hotel was dark and unclean."; final AnalyzeSentimentResult sentimentResult = client.analyzeSentiment(text); final TextSentiment documentSentiment = sentimentResult.getDocumentSentiment(); System.out.printf( "Recognized TextSentiment: %s, Positive Score: %s, Neutral Score: %s, Negative Score: %s.%n", documentSentiment.getTextSentimentClass(), documentSentiment.getPositiveScore(), documentSentiment.getNeutralScore(), documentSentiment.getNegativeScore()); final List<TextSentiment> sentiments = sentimentResult.getSentenceSentiments(); for (TextSentiment textSentiment : sentiments) { System.out.printf( "Recognized Sentence TextSentiment: %s, Positive Score: %s, Neutral Score: %s, Negative Score: %s.%n", textSentiment.getTextSentimentClass(), textSentiment.getPositiveScore(), textSentiment.getNeutralScore(), textSentiment.getNegativeScore()); } }
class AnalyzeSentiment { }
class AnalyzeSentiment { /** * Main method to invoke this demo about how to analyze sentiment of a text input. * * @param args Unused arguments to the program. */ }
add `%n`
public static void main(String[] args) { TextAnalyticsClient client = new TextAnalyticsClientBuilder() .subscriptionKey(Configuration.getGlobalConfiguration().get("AZURE_TEXT_ANALYTICS_SUBSCRIPTION_KEY")) .endpoint(Configuration.getGlobalConfiguration().get("AZURE_TEXT_ANALYTICS_ENDPOINT")) .buildClient(); String text = "The hotel was dark and unclean."; final TextSentimentResult sentimentResult = client.analyzeSentiment(text); final TextSentiment documentSentiment = sentimentResult.getDocumentSentiment(); System.out.printf( "Recognized TextSentiment: %s, Positive Score: %s, Neutral Score: %s, Negative Score: %s.", documentSentiment.getTextSentimentClass(), documentSentiment.getPositiveScore(), documentSentiment.getNeutralScore(), documentSentiment.getNegativeScore()); final List<TextSentiment> sentiments = sentimentResult.getSentenceSentiments(); sentiments.forEach(textSentiment -> System.out.printf( "Recognized Sentence TextSentiment: %s, Positive Score: %s, Neutral Score: %s, Negative Score: %s.", textSentiment.getTextSentimentClass(), textSentiment.getPositiveScore(), textSentiment.getNeutralScore(), textSentiment.getNegativeScore())); }
"Recognized Sentence TextSentiment: %s, Positive Score: %s, Neutral Score: %s, Negative Score: %s.",
public static void main(String[] args) { TextAnalyticsClient client = new TextAnalyticsClientBuilder() .subscriptionKey("subscription-key") .endpoint("https: .buildClient(); String text = "The hotel was dark and unclean."; final AnalyzeSentimentResult sentimentResult = client.analyzeSentiment(text); final TextSentiment documentSentiment = sentimentResult.getDocumentSentiment(); System.out.printf( "Recognized TextSentiment: %s, Positive Score: %s, Neutral Score: %s, Negative Score: %s.%n", documentSentiment.getTextSentimentClass(), documentSentiment.getPositiveScore(), documentSentiment.getNeutralScore(), documentSentiment.getNegativeScore()); final List<TextSentiment> sentiments = sentimentResult.getSentenceSentiments(); for (TextSentiment textSentiment : sentiments) { System.out.printf( "Recognized Sentence TextSentiment: %s, Positive Score: %s, Neutral Score: %s, Negative Score: %s.%n", textSentiment.getTextSentimentClass(), textSentiment.getPositiveScore(), textSentiment.getNeutralScore(), textSentiment.getNegativeScore()); } }
class AnalyzeSentiment { }
class AnalyzeSentiment { /** * Main method to invoke this demo about how to analyze sentiment of a text input. * * @param args Unused arguments to the program. */ }
This should be cleaned up in your samples, since you have the invocation of .subscriptionKey and .endpoint twice.
public static void main(String[] args) { TextAnalyticsClient client = new TextAnalyticsClientBuilder() .subscriptionKey(Configuration.getGlobalConfiguration().get("AZURE_TEXT_ANALYTICS_SUBSCRIPTION_KEY")) .endpoint(Configuration.getGlobalConfiguration().get("AZURE_TEXT_ANALYTICS_ENDPOINT")) .buildClient(); String text = "hello world"; final DetectLanguageResult detectLanguageResult = client.detectLanguage(text, "US"); final DetectedLanguage detectedDocumentLanguage = detectLanguageResult.getPrimaryLanguage(); System.out.printf("Detected Primary Language: %s, ISO 6391 Name: %s, Score: %s%n", detectedDocumentLanguage.getName(), detectedDocumentLanguage.getIso6391Name(), detectedDocumentLanguage.getScore()); final List<DetectedLanguage> detectedLanguages = detectLanguageResult.getDetectedLanguages(); detectedLanguages.forEach(detectedLanguage -> System.out.printf("Other detected languages: %s, ISO 6391 Name: %s, Score: %s%n", detectedLanguage.getName(), detectedLanguage.getIso6391Name(), detectedLanguage.getScore())); }
public static void main(String[] args) { TextAnalyticsClient client = new TextAnalyticsClientBuilder() .subscriptionKey("subscription-key") .endpoint("https: .buildClient(); String text = "hello world"; final DetectLanguageResult detectLanguageResult = client.detectLanguage(text, "US"); final DetectedLanguage detectedDocumentLanguage = detectLanguageResult.getPrimaryLanguage(); System.out.printf("Detected Primary Language: %s, ISO 6391 Name: %s, Score: %s.%n", detectedDocumentLanguage.getName(), detectedDocumentLanguage.getIso6391Name(), detectedDocumentLanguage.getScore()); final List<DetectedLanguage> detectedLanguages = detectLanguageResult.getDetectedLanguages(); for (DetectedLanguage detectedLanguage : detectedLanguages) { System.out.printf("Other detected languages: %s, ISO 6391 Name: %s, Score: %s.%n", detectedLanguage.getName(), detectedLanguage.getIso6391Name(), detectedLanguage.getScore()); } }
class HelloWorld { }
class HelloWorld { /** * Main method to invoke this demo about how to detect language of a text input. * * @param args Unused arguments to the program. */ }
I don't think you need a local variable declaration for this. ``` for (DetectedLanguage language : detectLanguageResult.getDetectedLanguages()) { ... ``` or ``` detectLanguageResult.getDetectedLanguages().foreach(language -> { }); ```
public static void main(String[] args) { TextAnalyticsClient client = new TextAnalyticsClientBuilder() .subscriptionKey(Configuration.getGlobalConfiguration().get("AZURE_TEXT_ANALYTICS_SUBSCRIPTION_KEY")) .endpoint(Configuration.getGlobalConfiguration().get("AZURE_TEXT_ANALYTICS_ENDPOINT")) .buildClient(); String text = "hello world"; final DetectLanguageResult detectLanguageResult = client.detectLanguage(text, "US"); final DetectedLanguage detectedDocumentLanguage = detectLanguageResult.getPrimaryLanguage(); System.out.printf("Detected Primary Language: %s, ISO 6391 Name: %s, Score: %s%n", detectedDocumentLanguage.getName(), detectedDocumentLanguage.getIso6391Name(), detectedDocumentLanguage.getScore()); final List<DetectedLanguage> detectedLanguages = detectLanguageResult.getDetectedLanguages(); detectedLanguages.forEach(detectedLanguage -> System.out.printf("Other detected languages: %s, ISO 6391 Name: %s, Score: %s%n", detectedLanguage.getName(), detectedLanguage.getIso6391Name(), detectedLanguage.getScore())); }
final List<DetectedLanguage> detectedLanguages = detectLanguageResult.getDetectedLanguages();
public static void main(String[] args) { TextAnalyticsClient client = new TextAnalyticsClientBuilder() .subscriptionKey("subscription-key") .endpoint("https: .buildClient(); String text = "hello world"; final DetectLanguageResult detectLanguageResult = client.detectLanguage(text, "US"); final DetectedLanguage detectedDocumentLanguage = detectLanguageResult.getPrimaryLanguage(); System.out.printf("Detected Primary Language: %s, ISO 6391 Name: %s, Score: %s.%n", detectedDocumentLanguage.getName(), detectedDocumentLanguage.getIso6391Name(), detectedDocumentLanguage.getScore()); final List<DetectedLanguage> detectedLanguages = detectLanguageResult.getDetectedLanguages(); for (DetectedLanguage detectedLanguage : detectedLanguages) { System.out.printf("Other detected languages: %s, ISO 6391 Name: %s, Score: %s.%n", detectedLanguage.getName(), detectedLanguage.getIso6391Name(), detectedLanguage.getScore()); } }
class HelloWorld { }
class HelloWorld { /** * Main method to invoke this demo about how to detect language of a text input. * * @param args Unused arguments to the program. */ }
Wrap this with try-catch and return Mono.error() if there are any exceptions. For reference, see other async clients.
public Mono<DetectLanguageResult> detectLanguage(String text) { return detectLanguageWithResponse(text, defaultCountryHint).flatMap(FluxUtil::toMono); }
return detectLanguageWithResponse(text, defaultCountryHint).flatMap(FluxUtil::toMono);
public Mono<DetectLanguageResult> detectLanguage(String text) { try { return detectLanguageWithResponse(text, defaultCountryHint).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } }
class TextAnalyticsAsyncClient { private final ClientLogger logger = new ClientLogger(TextAnalyticsAsyncClient.class); private final TextAnalyticsClientImpl service; private final TextAnalyticsServiceVersion serviceVersion; private final String defaultCountryHint; private final String defaultLanguage; /** * Create a {@code TextAnalyticsAsyncClient} that sends requests to the Text Analytics services's endpoint. * Each service call goes through the {@link TextAnalyticsClientBuilder * * @param service The proxy service used to perform REST calls. * @param serviceVersion The versions of Azure Text Analytics supported by this client library. * @param clientOptions The {@link TextAnalyticsClientOptions client option} contains * {@link TextAnalyticsClientOptions * {@link TextAnalyticsClientOptions
class TextAnalyticsAsyncClient { private final ClientLogger logger = new ClientLogger(TextAnalyticsAsyncClient.class); private final TextAnalyticsClientImpl service; private final TextAnalyticsServiceVersion serviceVersion; private final String defaultCountryHint; private final String defaultLanguage; /** * Create a {@code TextAnalyticsAsyncClient} that sends requests to the Text Analytics services's endpoint. Each * service call goes through the {@link TextAnalyticsClientBuilder * * @param service The proxy service used to perform REST calls. * @param serviceVersion The versions of Azure Text Analytics supported by this client library. * @param clientOptions The {@link TextAnalyticsClientOptions client option} contains * {@link TextAnalyticsClientOptions * {@link TextAnalyticsClientOptions
Could we move these getters up with `getServiceVersion`? It's easier to find getters if they are grouped together.
public String getDefaultLanguage() { return defaultLanguage; }
}
public String getDefaultLanguage() { return defaultLanguage; }
class (Positive, Negative, and * Neutral) for the document and each sentence within it. * * @param textInputs A list of {@link TextDocumentInput inputs/documents}
class TextAnalyticsAsyncClient { private final ClientLogger logger = new ClientLogger(TextAnalyticsAsyncClient.class); private final TextAnalyticsClientImpl service; private final TextAnalyticsServiceVersion serviceVersion; private final String defaultCountryHint; private final String defaultLanguage; /** * Create a {@code TextAnalyticsAsyncClient} that sends requests to the Text Analytics services's endpoint. Each * service call goes through the {@link TextAnalyticsClientBuilder * * @param service The proxy service used to perform REST calls. * @param serviceVersion The versions of Azure Text Analytics supported by this client library. * @param clientOptions The {@link TextAnalyticsClientOptions client option} contains * {@link TextAnalyticsClientOptions * {@link TextAnalyticsClientOptions
This would only happen if the service returned an invalid Sentiment type correct? Is this a good spot to throw a Runtime exception if the service failed? Maybe skip the result, log a warning, and continue processing the other responses.
private AnalyzeSentimentResult convertToTextSentimentResult(final DocumentSentiment documentSentiment) { final TextSentimentClass documentSentimentClass = TextSentimentClass.fromString(documentSentiment. getSentiment().toString()); if (documentSentimentClass == null) { throw logger.logExceptionAsWarning( new RuntimeException(String.format("'%s' is not valid text sentiment.", documentSentiment.getSentiment()))); } final SentimentConfidenceScorePerLabel confidenceScorePerLabel = documentSentiment.getDocumentScores(); final List<TextSentiment> sentenceSentimentTexts = documentSentiment.getSentences().stream() .map(sentenceSentiment -> { TextSentimentClass sentimentClass = TextSentimentClass.fromString(sentenceSentiment .getSentiment().toString()); if (sentimentClass == null) { throw logger.logExceptionAsWarning( new RuntimeException(String.format("'%s' is not valid text sentiment.", sentenceSentiment.getSentiment()))); } SentimentConfidenceScorePerLabel confidenceScorePerSentence = sentenceSentiment.getSentenceScores(); return new TextSentiment(sentimentClass, confidenceScorePerSentence.getNegative(), confidenceScorePerSentence.getNeutral(), confidenceScorePerSentence.getPositive(), sentenceSentiment.getLength(), sentenceSentiment.getOffset()); }).collect(Collectors.toList()); return new AnalyzeSentimentResult(documentSentiment.getId(), documentSentiment.getStatistics() == null ? null : convertToTextDocumentStatistics(documentSentiment.getStatistics()), null, new TextSentiment(documentSentimentClass, confidenceScorePerLabel.getNegative(), confidenceScorePerLabel.getNeutral(), confidenceScorePerLabel.getPositive(), sentenceSentimentTexts.stream().mapToInt(TextSentiment::getLength).sum(), 0), sentenceSentimentTexts); }
if (documentSentimentClass == null) {
private AnalyzeSentimentResult convertToTextSentimentResult(final DocumentSentiment documentSentiment) { final TextSentimentClass documentSentimentClass = TextSentimentClass.fromString(documentSentiment. getSentiment().toString()); if (documentSentimentClass == null) { logger.logExceptionAsWarning( new RuntimeException(String.format("'%s' is not valid text sentiment.", documentSentiment.getSentiment()))); } final SentimentConfidenceScorePerLabel confidenceScorePerLabel = documentSentiment.getDocumentScores(); final List<TextSentiment> sentenceSentimentTexts = documentSentiment.getSentences().stream() .map(sentenceSentiment -> { TextSentimentClass sentimentClass = TextSentimentClass.fromString(sentenceSentiment .getSentiment().toString()); if (sentimentClass == null) { logger.logExceptionAsWarning( new RuntimeException(String.format("'%s' is not valid text sentiment.", sentenceSentiment.getSentiment()))); } SentimentConfidenceScorePerLabel confidenceScorePerSentence = sentenceSentiment.getSentenceScores(); return new TextSentiment(sentimentClass, confidenceScorePerSentence.getNegative(), confidenceScorePerSentence.getNeutral(), confidenceScorePerSentence.getPositive(), sentenceSentiment.getLength(), sentenceSentiment.getOffset()); }).collect(Collectors.toList()); return new AnalyzeSentimentResult(documentSentiment.getId(), documentSentiment.getStatistics() == null ? null : convertToTextDocumentStatistics(documentSentiment.getStatistics()), null, new TextSentiment(documentSentimentClass, confidenceScorePerLabel.getNegative(), confidenceScorePerLabel.getNeutral(), confidenceScorePerLabel.getPositive(), sentenceSentimentTexts.stream().mapToInt(TextSentiment::getLength).sum(), 0), sentenceSentimentTexts); }
class (Positive, Negative, and * Neutral) for the document and each sentence within it. * * @param textInputs A list of {@link TextDocumentInput inputs/documents}
class (Positive, Negative, and * Neutral) for the document and each sentence within it. * * @param textInputs A list of {@link TextDocumentInput inputs/documents}
Same as my other comment around this being a service issue.
private AnalyzeSentimentResult convertToTextSentimentResult(final DocumentSentiment documentSentiment) { final TextSentimentClass documentSentimentClass = TextSentimentClass.fromString(documentSentiment. getSentiment().toString()); if (documentSentimentClass == null) { throw logger.logExceptionAsWarning( new RuntimeException(String.format("'%s' is not valid text sentiment.", documentSentiment.getSentiment()))); } final SentimentConfidenceScorePerLabel confidenceScorePerLabel = documentSentiment.getDocumentScores(); final List<TextSentiment> sentenceSentimentTexts = documentSentiment.getSentences().stream() .map(sentenceSentiment -> { TextSentimentClass sentimentClass = TextSentimentClass.fromString(sentenceSentiment .getSentiment().toString()); if (sentimentClass == null) { throw logger.logExceptionAsWarning( new RuntimeException(String.format("'%s' is not valid text sentiment.", sentenceSentiment.getSentiment()))); } SentimentConfidenceScorePerLabel confidenceScorePerSentence = sentenceSentiment.getSentenceScores(); return new TextSentiment(sentimentClass, confidenceScorePerSentence.getNegative(), confidenceScorePerSentence.getNeutral(), confidenceScorePerSentence.getPositive(), sentenceSentiment.getLength(), sentenceSentiment.getOffset()); }).collect(Collectors.toList()); return new AnalyzeSentimentResult(documentSentiment.getId(), documentSentiment.getStatistics() == null ? null : convertToTextDocumentStatistics(documentSentiment.getStatistics()), null, new TextSentiment(documentSentimentClass, confidenceScorePerLabel.getNegative(), confidenceScorePerLabel.getNeutral(), confidenceScorePerLabel.getPositive(), sentenceSentimentTexts.stream().mapToInt(TextSentiment::getLength).sum(), 0), sentenceSentimentTexts); }
throw logger.logExceptionAsWarning(
private AnalyzeSentimentResult convertToTextSentimentResult(final DocumentSentiment documentSentiment) { final TextSentimentClass documentSentimentClass = TextSentimentClass.fromString(documentSentiment. getSentiment().toString()); if (documentSentimentClass == null) { logger.logExceptionAsWarning( new RuntimeException(String.format("'%s' is not valid text sentiment.", documentSentiment.getSentiment()))); } final SentimentConfidenceScorePerLabel confidenceScorePerLabel = documentSentiment.getDocumentScores(); final List<TextSentiment> sentenceSentimentTexts = documentSentiment.getSentences().stream() .map(sentenceSentiment -> { TextSentimentClass sentimentClass = TextSentimentClass.fromString(sentenceSentiment .getSentiment().toString()); if (sentimentClass == null) { logger.logExceptionAsWarning( new RuntimeException(String.format("'%s' is not valid text sentiment.", sentenceSentiment.getSentiment()))); } SentimentConfidenceScorePerLabel confidenceScorePerSentence = sentenceSentiment.getSentenceScores(); return new TextSentiment(sentimentClass, confidenceScorePerSentence.getNegative(), confidenceScorePerSentence.getNeutral(), confidenceScorePerSentence.getPositive(), sentenceSentiment.getLength(), sentenceSentiment.getOffset()); }).collect(Collectors.toList()); return new AnalyzeSentimentResult(documentSentiment.getId(), documentSentiment.getStatistics() == null ? null : convertToTextDocumentStatistics(documentSentiment.getStatistics()), null, new TextSentiment(documentSentimentClass, confidenceScorePerLabel.getNegative(), confidenceScorePerLabel.getNeutral(), confidenceScorePerLabel.getPositive(), sentenceSentimentTexts.stream().mapToInt(TextSentiment::getLength).sum(), 0), sentenceSentimentTexts); }
class (Positive, Negative, and * Neutral) for the document and each sentence within it. * * @param textInputs A list of {@link TextDocumentInput inputs/documents}
class (Positive, Negative, and * Neutral) for the document and each sentence within it. * * @param textInputs A list of {@link TextDocumentInput inputs/documents}
retryAfterHeader : This could be null when user do not want to use any response header for retry.
private Duration determineDelayDuration(HttpResponse response, int tryCount) { int code = response.getStatusCode(); if (code != 429 && code != 503) { return retryStrategy.calculateRetryDelay(tryCount); } String retryHeaderValue = null; if (!isNullOrEmpty(retryAfterHeader)) { retryHeaderValue = response.getHeaderValue(retryAfterHeader); } if (isNullOrEmpty(retryHeaderValue)) { return retryStrategy.calculateRetryDelay(tryCount); } return Duration.of(Integer.parseInt(retryHeaderValue), retryAfterTimeUnit); }
retryHeaderValue = response.getHeaderValue(retryAfterHeader);
private Duration determineDelayDuration(HttpResponse response, int tryCount) { int code = response.getStatusCode(); if (code != 429 && code != 503) { return retryPolicyOptions.getRetryStrategy().calculateRetryDelay(tryCount); } String retryHeaderValue = null; if (!isNullOrEmpty(retryPolicyOptions.getRetryAfterHeader())) { retryHeaderValue = response.getHeaderValue(retryPolicyOptions.getRetryAfterHeader()); } if (isNullOrEmpty(retryHeaderValue)) { return retryPolicyOptions.getRetryStrategy().calculateRetryDelay(tryCount); } return Duration.of(Integer.parseInt(retryHeaderValue), retryPolicyOptions.getRetryAfterTimeUnit()); }
class RetryPolicy implements HttpPipelinePolicy { private final ClientLogger logger = new ClientLogger(RetryPolicy.class); private final RetryStrategy retryStrategy; private final String retryAfterHeader; private final ChronoUnit retryAfterTimeUnit; /** * Creates a default {@link ExponentialBackoff} retry policy. */ public RetryPolicy() { this(new ExponentialBackoff(), null, null); } /** * Creates a default {@link ExponentialBackoff} retry policy. * @param retryAfterHeader The retry after http header name to be used get retry after value from * @param retryAfterTimeUnit The time unit to use while applying retry based on value specified in * * {@code retryAfterHeader} in {@link HttpResponse}. * {@link HttpResponse}. */ public RetryPolicy(String retryAfterHeader, ChronoUnit retryAfterTimeUnit) { this(new ExponentialBackoff(), retryAfterHeader, retryAfterTimeUnit); } /** * Creates a RetryPolicy with the provided {@link RetryStrategy}. * * @param retryStrategy The {@link RetryStrategy} used for retries. * @throws NullPointerException if {@code retryStrategy} is {@code null}. */ public RetryPolicy(RetryStrategy retryStrategy) { this(retryStrategy, null, null); } /** * Creates a {@link RetryPolicy} with the provided {@link RetryStrategy} and {@code retryAfterHeader}. * * @param retryStrategy The {@link RetryStrategy} used for retries. * @param retryAfterHeader The retry after http header name to be used get retry after value from * {@link HttpResponse}. * @param retryAfterTimeUnit The time unit to use while applying retry based on value specified in * {@code retryAfterHeader} in {@link HttpResponse}. * @throws NullPointerException if {@code retryStrategy} is {@code null}. */ public RetryPolicy(RetryStrategy retryStrategy, String retryAfterHeader, ChronoUnit retryAfterTimeUnit) { this.retryStrategy = Objects.requireNonNull(retryStrategy, "'retryStrategy' cannot be null."); this.retryAfterHeader = retryAfterHeader; this.retryAfterTimeUnit = retryAfterTimeUnit; if (!isNullOrEmpty(retryAfterHeader)) { Objects.requireNonNull(retryAfterTimeUnit, "'retryAfterTimeUnit' cannot be null."); } } @Override public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { return attemptAsync(context, next, context.getHttpRequest(), 0); } private Mono<HttpResponse> attemptAsync(final HttpPipelineCallContext context, final HttpPipelineNextPolicy next, final HttpRequest originalHttpRequest, final int tryCount) { context.setHttpRequest(originalHttpRequest.copy()); return next.clone().process() .flatMap(httpResponse -> { if (shouldRetry(httpResponse, tryCount)) { final Duration delayDuration = determineDelayDuration(httpResponse, tryCount); logger.verbose("[Retrying] Try count: {}, Delay duration in seconds: {}", tryCount, delayDuration.getSeconds()); return attemptAsync(context, next, originalHttpRequest, tryCount + 1) .delaySubscription(delayDuration); } else { return Mono.just(httpResponse); } }) .onErrorResume(err -> { int maxRetries = retryStrategy.getMaxRetries(); if (tryCount < maxRetries) { logger.verbose("[Error Resume] Try count: {}, Error: {}", tryCount, err); return attemptAsync(context, next, originalHttpRequest, tryCount + 1) .delaySubscription(retryStrategy.calculateRetryDelay(tryCount)); } else { return Mono.error(new RuntimeException( String.format("Max retries %d times exceeded. Error Details: %s", maxRetries, err.getMessage()), err)); } }); } private boolean shouldRetry(HttpResponse response, int tryCount) { return tryCount < retryStrategy.getMaxRetries() && retryStrategy.shouldRetry(response); } /** * Determines the delay duration that should be waited before retrying. * @param response HTTP response * @return If the HTTP response has a retry-after-ms header that will be returned, * otherwise the duration used during the construction of the policy. */ }
class RetryPolicy implements HttpPipelinePolicy { private final ClientLogger logger = new ClientLogger(RetryPolicy.class); private final RetryPolicyOptions retryPolicyOptions; /** * Creates {@link RetryPolicy} with default {@link ExponentialBackoff} as {@link RetryStrategy}and use * 'retry-after-ms' in {@link HttpResponse} header for calculating retry delay. */ public RetryPolicy() { this(new RetryPolicyOptions(new ExponentialBackoff())); } /** * Creates a RetryPolicy with the provided {@link RetryStrategy}. * * @param retryStrategy The {@link RetryStrategy} used for retries. */ public RetryPolicy(RetryStrategy retryStrategy) { Objects.requireNonNull(retryStrategy, "'retryStrategy' cannot be null"); this.retryPolicyOptions = new RetryPolicyOptions(retryStrategy); } /** * Creates a {@link RetryPolicy} with the provided {@link RetryPolicyOptions}. * * @param retryPolicyOptions with given {@link RetryPolicyOptions}. * @throws NullPointerException if {@code retryPolicyOptions} or {@code retryPolicyOptions getRetryStrategy } * is {@code null}. */ public RetryPolicy(RetryPolicyOptions retryPolicyOptions) { this.retryPolicyOptions = Objects.requireNonNull(retryPolicyOptions, "'retryPolicyOptions' cannot be null."); Objects.requireNonNull(retryPolicyOptions.getRetryStrategy(), "'retryPolicyOptions.retryStrategy' cannot be null."); } @Override public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { return attemptAsync(context, next, context.getHttpRequest(), 0); } private Mono<HttpResponse> attemptAsync(final HttpPipelineCallContext context, final HttpPipelineNextPolicy next, final HttpRequest originalHttpRequest, final int tryCount) { context.setHttpRequest(originalHttpRequest.copy()); return next.clone().process() .flatMap(httpResponse -> { if (shouldRetry(httpResponse, tryCount)) { final Duration delayDuration = determineDelayDuration(httpResponse, tryCount); logger.verbose("[Retrying] Try count: {}, Delay duration in seconds: {}", tryCount, delayDuration.getSeconds()); return attemptAsync(context, next, originalHttpRequest, tryCount + 1) .delaySubscription(delayDuration); } else { return Mono.just(httpResponse); } }) .onErrorResume(err -> { int maxRetries = retryPolicyOptions.getRetryStrategy().getMaxRetries(); if (tryCount < maxRetries) { logger.verbose("[Error Resume] Try count: {}, Error: {}", tryCount, err); return attemptAsync(context, next, originalHttpRequest, tryCount + 1) .delaySubscription(retryPolicyOptions.getRetryStrategy().calculateRetryDelay(tryCount)); } else { return Mono.error(new RuntimeException( String.format("Max retries %d times exceeded. Error Details: %s", maxRetries, err.getMessage()), err)); } }); } private boolean shouldRetry(HttpResponse response, int tryCount) { return tryCount < retryPolicyOptions.getRetryStrategy().getMaxRetries() && retryPolicyOptions.getRetryStrategy().shouldRetry(response); } /** * Determines the delay duration that should be waited before retrying. * @param response HTTP response * @return If the HTTP response has a retry-after-ms header that will be returned, * otherwise the duration used during the construction of the policy. */ }
Let's just instantiate with `this.retryStrategy = new ExponentialBackoff()`
public RetryPolicyOptions() { setRetryOptions(new ExponentialBackoff(), null, null); }
setRetryOptions(new ExponentialBackoff(), null, null);
public RetryPolicyOptions() { this(new ExponentialBackoff(), null, null); }
class RetryPolicyOptions { private RetryStrategy retryStrategy; private String retryAfterHeader; private ChronoUnit retryAfterTimeUnit; /** * Creates a default {@link ExponentialBackoff} for retry policy. It will not use any {@code retryAfterHeader} * in {@link HttpResponse}. */ /** * Sets RetryPolicyOptions with the provided {@link RetryStrategy}. It will not use any * {@code retryAfterHeader} in {@link HttpResponse}. * * @param retryStrategy The {@link RetryStrategy} used for retries. * @throws NullPointerException if {@code retryStrategy} is {@code null}. */ public RetryPolicyOptions setRetryStrategy(RetryStrategy retryStrategy) { return setRetryOptions(retryStrategy, null, null); } /** * Sets default {@link ExponentialBackoff} retry policy along with provided {@code retryAfterHeader} and * {@code retryAfterTimeUnit}. * * @param retryAfterHeader The 'retry-after' HTTP header name to lookup for the retry duration.The value * {@code null} is valid. * @param retryAfterTimeUnit The time unit to use while applying retry based on value specified in * {@code retryAfterHeader} in {@link HttpResponse}.The value {@code null} is valid only in case when * {@code retryAfterHeader} is empty or {@code null}. * @throws NullPointerException Only if {@code retryAfterTimeUnit} is {@code null} and {@code retryAfterHeader} * is not {@code null}. */ public RetryPolicyOptions setRetryAfterHeader(String retryAfterHeader, ChronoUnit retryAfterTimeUnit) { return setRetryOptions(new ExponentialBackoff(), retryAfterHeader, retryAfterTimeUnit); } private RetryPolicyOptions setRetryOptions(RetryStrategy retryStrategy, String retryAfterHeader, ChronoUnit retryAfterTimeUnit) { this.retryStrategy = Objects.requireNonNull(retryStrategy, "'retryStrategy' cannot be null."); this.retryAfterHeader = retryAfterHeader; this.retryAfterTimeUnit = retryAfterTimeUnit; if (!isNullOrEmpty(retryAfterHeader)) { Objects.requireNonNull(retryAfterTimeUnit, "'retryAfterTimeUnit' cannot be null."); } return this; } /** * @return {@link RetryStrategy} to be used in this {@link RetryPolicyOptions}. */ public RetryStrategy getRetryStrategy() { return retryStrategy; } /** * @return {@code retryAfterHeader} to be used in this {@link RetryPolicyOptions}. */ public String getRetryAfterHeader() { return retryAfterHeader; } /** * @return {@link ChronoUnit} to be used for {@code retryAfterHeader}. */ public ChronoUnit getRetryAfterTimeUnit() { return retryAfterTimeUnit; } }
class RetryPolicyOptions { private final RetryStrategy retryStrategy; private final String retryAfterHeader; private final ChronoUnit retryAfterTimeUnit; /** * Creates a default {@link RetryPolicyOptions} used by a {@link RetryPolicy}. This will use * {@link ExponentialBackoff} as the {@link */ /** * Creates the {@link RetryPolicyOptions} with provided {@link RetryStrategy} that will be used when a request is * retried. It will ignore retry delay headers. * * @param retryStrategy The {@link RetryStrategy} used for retries. It will default to {@link ExponentialBackoff} * if provided value is {@code null} */ public RetryPolicyOptions(RetryStrategy retryStrategy) { this(retryStrategy, null, null); } /** * Creates the {@link RetryPolicyOptions} with provided {@link RetryStrategy}, {@code retryAfterHeader} and * {@code retryAfterTimeUnit} that will be used when a request is retried. * * @param retryStrategy The {@link RetryStrategy} used for retries. It will default to {@link ExponentialBackoff} * if provided value is {@code null}. * @param retryAfterHeader The HTTP header, such as 'Retry-After' or 'x-ms-retry-after-ms', to lookup for the * retry delay. If the value is {@code null}, {@link RetryPolicy} will use the retry strategy to compute the delay * and ignore the delay provided in response header. * @param retryAfterTimeUnit The time unit to use when applying the retry delay. {@code null} is valid if, and only * if, {@code retryAfterHeader} is {@code null}. * @throws NullPointerException When {@code retryAfterTimeUnit} is {@code null} and {@code retryAfterHeader} is * not {@code null}. */ public RetryPolicyOptions(RetryStrategy retryStrategy, String retryAfterHeader, ChronoUnit retryAfterTimeUnit) { if (Objects.isNull(retryStrategy)) { this.retryStrategy = new ExponentialBackoff(); } else { this.retryStrategy = retryStrategy; } this.retryAfterHeader = retryAfterHeader; this.retryAfterTimeUnit = retryAfterTimeUnit; if (!isNullOrEmpty(retryAfterHeader)) { Objects.requireNonNull(retryAfterTimeUnit, "'retryAfterTimeUnit' cannot be null."); } } /** * @return The {@link RetryStrategy} used when retrying requests. */ public RetryStrategy getRetryStrategy() { return retryStrategy; } /** * @return The HTTP header which contains the retry delay returned by the service. */ public String getRetryAfterHeader() { return retryAfterHeader; } /** * @return The {@link ChronoUnit} used when applying request retry delays. */ public ChronoUnit getRetryAfterTimeUnit() { return retryAfterTimeUnit; } }
This should also check that `retryPolicyOptions.getRetryStrategy()` is also non-null. We need that to be set.
public RetryPolicy(RetryPolicyOptions retryPolicyOptions) { this.retryPolicyOptions = Objects.requireNonNull(retryPolicyOptions, "'retryPolicyOptions' cannot be null."); }
this.retryPolicyOptions = Objects.requireNonNull(retryPolicyOptions,
public RetryPolicy(RetryPolicyOptions retryPolicyOptions) { this.retryPolicyOptions = Objects.requireNonNull(retryPolicyOptions, "'retryPolicyOptions' cannot be null."); Objects.requireNonNull(retryPolicyOptions.getRetryStrategy(), "'retryPolicyOptions.retryStrategy' cannot be null."); }
class RetryPolicy implements HttpPipelinePolicy { private final ClientLogger logger = new ClientLogger(RetryPolicy.class); private final RetryPolicyOptions retryPolicyOptions; /** * Creates a default {@link RetryPolicy} with default {@link RetryPolicyOptions}. */ public RetryPolicy() { this(new RetryPolicyOptions()); } /** * Creates a RetryPolicy with the provided {@link RetryStrategy}. * * @param retryStrategy The {@link RetryStrategy} used for retries. */ public RetryPolicy(RetryStrategy retryStrategy) { Objects.requireNonNull(retryStrategy, "'retryStrategy' cannot be null"); this.retryPolicyOptions = new RetryPolicyOptions().setRetryStrategy(retryStrategy); } /** * Creates a {@link RetryPolicy} with the provided {@link RetryPolicyOptions}. * * @param retryPolicyOptions with given {@link RetryPolicyOptions}. * @throws NullPointerException if {@code retryPolicyOptions} is {@code null}. */ @Override public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { return attemptAsync(context, next, context.getHttpRequest(), 0); } private Mono<HttpResponse> attemptAsync(final HttpPipelineCallContext context, final HttpPipelineNextPolicy next, final HttpRequest originalHttpRequest, final int tryCount) { context.setHttpRequest(originalHttpRequest.copy()); return next.clone().process() .flatMap(httpResponse -> { if (shouldRetry(httpResponse, tryCount)) { final Duration delayDuration = determineDelayDuration(httpResponse, tryCount); logger.verbose("[Retrying] Try count: {}, Delay duration in seconds: {}", tryCount, delayDuration.getSeconds()); return attemptAsync(context, next, originalHttpRequest, tryCount + 1) .delaySubscription(delayDuration); } else { return Mono.just(httpResponse); } }) .onErrorResume(err -> { int maxRetries = retryPolicyOptions.getRetryStrategy().getMaxRetries(); if (tryCount < maxRetries) { logger.verbose("[Error Resume] Try count: {}, Error: {}", tryCount, err); return attemptAsync(context, next, originalHttpRequest, tryCount + 1) .delaySubscription(retryPolicyOptions.getRetryStrategy().calculateRetryDelay(tryCount)); } else { return Mono.error(new RuntimeException( String.format("Max retries %d times exceeded. Error Details: %s", maxRetries, err.getMessage()), err)); } }); } private boolean shouldRetry(HttpResponse response, int tryCount) { return tryCount < retryPolicyOptions.getRetryStrategy().getMaxRetries() && retryPolicyOptions.getRetryStrategy() .shouldRetry(response); } /** * Determines the delay duration that should be waited before retrying. * @param response HTTP response * @return If the HTTP response has a retry-after-ms header that will be returned, * otherwise the duration used during the construction of the policy. */ private Duration determineDelayDuration(HttpResponse response, int tryCount) { int code = response.getStatusCode(); if (code != 429 && code != 503) { return retryPolicyOptions.getRetryStrategy().calculateRetryDelay(tryCount); } String retryHeaderValue = null; if (!isNullOrEmpty(retryPolicyOptions.getRetryAfterHeader())) { retryHeaderValue = response.getHeaderValue(retryPolicyOptions.getRetryAfterHeader()); } if (isNullOrEmpty(retryHeaderValue)) { return retryPolicyOptions.getRetryStrategy().calculateRetryDelay(tryCount); } return Duration.of(Integer.parseInt(retryHeaderValue), retryPolicyOptions.getRetryAfterTimeUnit()); } }
class RetryPolicy implements HttpPipelinePolicy { private final ClientLogger logger = new ClientLogger(RetryPolicy.class); private final RetryPolicyOptions retryPolicyOptions; /** * Creates {@link RetryPolicy} with default {@link ExponentialBackoff} as {@link RetryStrategy}and use * 'retry-after-ms' in {@link HttpResponse} header for calculating retry delay. */ public RetryPolicy() { this(new RetryPolicyOptions(new ExponentialBackoff())); } /** * Creates a RetryPolicy with the provided {@link RetryStrategy}. * * @param retryStrategy The {@link RetryStrategy} used for retries. */ public RetryPolicy(RetryStrategy retryStrategy) { Objects.requireNonNull(retryStrategy, "'retryStrategy' cannot be null"); this.retryPolicyOptions = new RetryPolicyOptions(retryStrategy); } /** * Creates a {@link RetryPolicy} with the provided {@link RetryPolicyOptions}. * * @param retryPolicyOptions with given {@link RetryPolicyOptions}. * @throws NullPointerException if {@code retryPolicyOptions} or {@code retryPolicyOptions getRetryStrategy } * is {@code null}. */ @Override public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { return attemptAsync(context, next, context.getHttpRequest(), 0); } private Mono<HttpResponse> attemptAsync(final HttpPipelineCallContext context, final HttpPipelineNextPolicy next, final HttpRequest originalHttpRequest, final int tryCount) { context.setHttpRequest(originalHttpRequest.copy()); return next.clone().process() .flatMap(httpResponse -> { if (shouldRetry(httpResponse, tryCount)) { final Duration delayDuration = determineDelayDuration(httpResponse, tryCount); logger.verbose("[Retrying] Try count: {}, Delay duration in seconds: {}", tryCount, delayDuration.getSeconds()); return attemptAsync(context, next, originalHttpRequest, tryCount + 1) .delaySubscription(delayDuration); } else { return Mono.just(httpResponse); } }) .onErrorResume(err -> { int maxRetries = retryPolicyOptions.getRetryStrategy().getMaxRetries(); if (tryCount < maxRetries) { logger.verbose("[Error Resume] Try count: {}, Error: {}", tryCount, err); return attemptAsync(context, next, originalHttpRequest, tryCount + 1) .delaySubscription(retryPolicyOptions.getRetryStrategy().calculateRetryDelay(tryCount)); } else { return Mono.error(new RuntimeException( String.format("Max retries %d times exceeded. Error Details: %s", maxRetries, err.getMessage()), err)); } }); } private boolean shouldRetry(HttpResponse response, int tryCount) { return tryCount < retryPolicyOptions.getRetryStrategy().getMaxRetries() && retryPolicyOptions.getRetryStrategy().shouldRetry(response); } /** * Determines the delay duration that should be waited before retrying. * @param response HTTP response * @return If the HTTP response has a retry-after-ms header that will be returned, * otherwise the duration used during the construction of the policy. */ private Duration determineDelayDuration(HttpResponse response, int tryCount) { int code = response.getStatusCode(); if (code != 429 && code != 503) { return retryPolicyOptions.getRetryStrategy().calculateRetryDelay(tryCount); } String retryHeaderValue = null; if (!isNullOrEmpty(retryPolicyOptions.getRetryAfterHeader())) { retryHeaderValue = response.getHeaderValue(retryPolicyOptions.getRetryAfterHeader()); } if (isNullOrEmpty(retryHeaderValue)) { return retryPolicyOptions.getRetryStrategy().calculateRetryDelay(tryCount); } return Duration.of(Integer.parseInt(retryHeaderValue), retryPolicyOptions.getRetryAfterTimeUnit()); } }
I don't think the default should use `retry-after-ms` for the header, as far as I know this is only used by AppConfiguration.
public RetryPolicy() { this(new RetryPolicyOptions(new ExponentialBackoff(), RETRY_AFTER_MS_HEADER, ChronoUnit.MILLIS)); }
this(new RetryPolicyOptions(new ExponentialBackoff(), RETRY_AFTER_MS_HEADER, ChronoUnit.MILLIS));
public RetryPolicy() { this(new RetryPolicyOptions(new ExponentialBackoff())); }
class RetryPolicy implements HttpPipelinePolicy { private final ClientLogger logger = new ClientLogger(RetryPolicy.class); private static final String RETRY_AFTER_MS_HEADER = "retry-after-ms"; private final RetryPolicyOptions retryPolicyOptions; /** * Creates {@link RetryPolicy} with default {@link ExponentialBackoff} as {@link RetryStrategy}and use * 'retry-after-ms' in {@link HttpResponse} header for calculating retry delay. */ /** * Creates a RetryPolicy with the provided {@link RetryStrategy}. * * @param retryStrategy The {@link RetryStrategy} used for retries. */ public RetryPolicy(RetryStrategy retryStrategy) { Objects.requireNonNull(retryStrategy, "'retryStrategy' cannot be null"); this.retryPolicyOptions = new RetryPolicyOptions(retryStrategy); } /** * Creates a {@link RetryPolicy} with the provided {@link RetryPolicyOptions}. * * @param retryPolicyOptions with given {@link RetryPolicyOptions}. * @throws NullPointerException if {@code retryPolicyOptions} or {@code retryPolicyOptions getRetryStrategy } * is {@code null}. */ public RetryPolicy(RetryPolicyOptions retryPolicyOptions) { this.retryPolicyOptions = Objects.requireNonNull(retryPolicyOptions, "'retryPolicyOptions' cannot be null."); Objects.requireNonNull(retryPolicyOptions.getRetryStrategy(), "'retryPolicyOptions.retryStrategy' cannot be null."); } @Override public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { return attemptAsync(context, next, context.getHttpRequest(), 0); } private Mono<HttpResponse> attemptAsync(final HttpPipelineCallContext context, final HttpPipelineNextPolicy next, final HttpRequest originalHttpRequest, final int tryCount) { context.setHttpRequest(originalHttpRequest.copy()); return next.clone().process() .flatMap(httpResponse -> { if (shouldRetry(httpResponse, tryCount)) { final Duration delayDuration = determineDelayDuration(httpResponse, tryCount); logger.verbose("[Retrying] Try count: {}, Delay duration in seconds: {}", tryCount, delayDuration.getSeconds()); return attemptAsync(context, next, originalHttpRequest, tryCount + 1) .delaySubscription(delayDuration); } else { return Mono.just(httpResponse); } }) .onErrorResume(err -> { int maxRetries = retryPolicyOptions.getRetryStrategy().getMaxRetries(); if (tryCount < maxRetries) { logger.verbose("[Error Resume] Try count: {}, Error: {}", tryCount, err); return attemptAsync(context, next, originalHttpRequest, tryCount + 1) .delaySubscription(retryPolicyOptions.getRetryStrategy().calculateRetryDelay(tryCount)); } else { return Mono.error(new RuntimeException( String.format("Max retries %d times exceeded. Error Details: %s", maxRetries, err.getMessage()), err)); } }); } private boolean shouldRetry(HttpResponse response, int tryCount) { return tryCount < retryPolicyOptions.getRetryStrategy().getMaxRetries() && retryPolicyOptions.getRetryStrategy() .shouldRetry(response); } /** * Determines the delay duration that should be waited before retrying. * @param response HTTP response * @return If the HTTP response has a retry-after-ms header that will be returned, * otherwise the duration used during the construction of the policy. */ private Duration determineDelayDuration(HttpResponse response, int tryCount) { int code = response.getStatusCode(); if (code != 429 && code != 503) { return retryPolicyOptions.getRetryStrategy().calculateRetryDelay(tryCount); } String retryHeaderValue = null; if (!isNullOrEmpty(retryPolicyOptions.getRetryAfterHeader())) { retryHeaderValue = response.getHeaderValue(retryPolicyOptions.getRetryAfterHeader()); } if (isNullOrEmpty(retryHeaderValue)) { return retryPolicyOptions.getRetryStrategy().calculateRetryDelay(tryCount); } return Duration.of(Integer.parseInt(retryHeaderValue), retryPolicyOptions.getRetryAfterTimeUnit()); } }
class RetryPolicy implements HttpPipelinePolicy { private final ClientLogger logger = new ClientLogger(RetryPolicy.class); private final RetryPolicyOptions retryPolicyOptions; /** * Creates {@link RetryPolicy} with default {@link ExponentialBackoff} as {@link RetryStrategy}and use * 'retry-after-ms' in {@link HttpResponse} header for calculating retry delay. */ /** * Creates a RetryPolicy with the provided {@link RetryStrategy}. * * @param retryStrategy The {@link RetryStrategy} used for retries. */ public RetryPolicy(RetryStrategy retryStrategy) { Objects.requireNonNull(retryStrategy, "'retryStrategy' cannot be null"); this.retryPolicyOptions = new RetryPolicyOptions(retryStrategy); } /** * Creates a {@link RetryPolicy} with the provided {@link RetryPolicyOptions}. * * @param retryPolicyOptions with given {@link RetryPolicyOptions}. * @throws NullPointerException if {@code retryPolicyOptions} or {@code retryPolicyOptions getRetryStrategy } * is {@code null}. */ public RetryPolicy(RetryPolicyOptions retryPolicyOptions) { this.retryPolicyOptions = Objects.requireNonNull(retryPolicyOptions, "'retryPolicyOptions' cannot be null."); Objects.requireNonNull(retryPolicyOptions.getRetryStrategy(), "'retryPolicyOptions.retryStrategy' cannot be null."); } @Override public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { return attemptAsync(context, next, context.getHttpRequest(), 0); } private Mono<HttpResponse> attemptAsync(final HttpPipelineCallContext context, final HttpPipelineNextPolicy next, final HttpRequest originalHttpRequest, final int tryCount) { context.setHttpRequest(originalHttpRequest.copy()); return next.clone().process() .flatMap(httpResponse -> { if (shouldRetry(httpResponse, tryCount)) { final Duration delayDuration = determineDelayDuration(httpResponse, tryCount); logger.verbose("[Retrying] Try count: {}, Delay duration in seconds: {}", tryCount, delayDuration.getSeconds()); return attemptAsync(context, next, originalHttpRequest, tryCount + 1) .delaySubscription(delayDuration); } else { return Mono.just(httpResponse); } }) .onErrorResume(err -> { int maxRetries = retryPolicyOptions.getRetryStrategy().getMaxRetries(); if (tryCount < maxRetries) { logger.verbose("[Error Resume] Try count: {}, Error: {}", tryCount, err); return attemptAsync(context, next, originalHttpRequest, tryCount + 1) .delaySubscription(retryPolicyOptions.getRetryStrategy().calculateRetryDelay(tryCount)); } else { return Mono.error(new RuntimeException( String.format("Max retries %d times exceeded. Error Details: %s", maxRetries, err.getMessage()), err)); } }); } private boolean shouldRetry(HttpResponse response, int tryCount) { return tryCount < retryPolicyOptions.getRetryStrategy().getMaxRetries() && retryPolicyOptions.getRetryStrategy().shouldRetry(response); } /** * Determines the delay duration that should be waited before retrying. * @param response HTTP response * @return If the HTTP response has a retry-after-ms header that will be returned, * otherwise the duration used during the construction of the policy. */ private Duration determineDelayDuration(HttpResponse response, int tryCount) { int code = response.getStatusCode(); if (code != 429 && code != 503) { return retryPolicyOptions.getRetryStrategy().calculateRetryDelay(tryCount); } String retryHeaderValue = null; if (!isNullOrEmpty(retryPolicyOptions.getRetryAfterHeader())) { retryHeaderValue = response.getHeaderValue(retryPolicyOptions.getRetryAfterHeader()); } if (isNullOrEmpty(retryHeaderValue)) { return retryPolicyOptions.getRetryStrategy().calculateRetryDelay(tryCount); } return Duration.of(Integer.parseInt(retryHeaderValue), retryPolicyOptions.getRetryAfterTimeUnit()); } }
Should this be using `requireNonNull` or just use `ExponentialBackoff` if it is null?
public RetryPolicyOptions(RetryStrategy retryStrategy, String retryAfterHeader, ChronoUnit retryAfterTimeUnit) { this.retryStrategy = Objects.requireNonNull(retryStrategy, "'retryStrategy' cannot be null."); this.retryAfterHeader = retryAfterHeader; this.retryAfterTimeUnit = retryAfterTimeUnit; if (!isNullOrEmpty(retryAfterHeader)) { Objects.requireNonNull(retryAfterTimeUnit, "'retryAfterTimeUnit' cannot be null."); } }
this.retryStrategy = Objects.requireNonNull(retryStrategy, "'retryStrategy' cannot be null.");
public RetryPolicyOptions(RetryStrategy retryStrategy, String retryAfterHeader, ChronoUnit retryAfterTimeUnit) { if (Objects.isNull(retryStrategy)) { this.retryStrategy = new ExponentialBackoff(); } else { this.retryStrategy = retryStrategy; } this.retryAfterHeader = retryAfterHeader; this.retryAfterTimeUnit = retryAfterTimeUnit; if (!isNullOrEmpty(retryAfterHeader)) { Objects.requireNonNull(retryAfterTimeUnit, "'retryAfterTimeUnit' cannot be null."); } }
class RetryPolicyOptions { private final RetryStrategy retryStrategy; private final String retryAfterHeader; private final ChronoUnit retryAfterTimeUnit; /** * Creates a default {@link RetryPolicyOptions} used by a {@link RetryPolicy}. This will use * {@link ExponentialBackoff} as the {@link */ public RetryPolicyOptions() { this(new ExponentialBackoff(), null, null); } /** * Creates the {@link RetryPolicyOptions} with provided {@link RetryStrategy} that will be used when a request is * retried. It will ignore retry delay headers. * * @param retryStrategy The {@link RetryStrategy} used for retries. * @throws NullPointerException if {@code retryStrategy} is {@code null}. */ public RetryPolicyOptions(RetryStrategy retryStrategy) { this(retryStrategy, null, null); } /** * Creates the {@link RetryPolicyOptions} with provided {@code retryAfterHeader} and {@code retryAfterTimeUnit} * that will be used when a request is retried. This will use {@link ExponentialBackoff} as the * {@link * * @param retryAfterHeader The HTTP header, such as 'Retry-After' or 'x-ms-retry-after-ms', to lookup for the * retry delay. The value {@code null} is valid. * @param retryAfterTimeUnit The time unit to use when applying the retry delay. {@code null} is valid if, and only * if, {@code retryAfterHeader} is {@code null}. * @throws NullPointerException Only if {@code retryAfterTimeUnit} is {@code null} and {@code retryAfterHeader} * is not {@code null}. */ public RetryPolicyOptions(String retryAfterHeader, ChronoUnit retryAfterTimeUnit) { this(new ExponentialBackoff(), retryAfterHeader, retryAfterTimeUnit); } /** * Creates the {@link RetryPolicyOptions} with provided {@link RetryStrategy}, {@code retryAfterHeader} and * {@code retryAfterTimeUnit} that will be used when a request is retried. * * @param retryStrategy The {@link RetryStrategy} used for retries. * @param retryAfterHeader The HTTP header, such as 'Retry-After' or 'x-ms-retry-after-ms', to lookup for the * retry delay.The value {@code null} is valid. * @param retryAfterTimeUnit The time unit to use when applying the retry delay. {@code null} is valid if, and only * * if, {@code retryAfterHeader} is {@code null}. * @throws NullPointerException if {@code retryStrategy} is {@code null}. Also when {@code retryAfterTimeUnit} * is {@code null} and {@code retryAfterHeader} is not {@code null}. */ /** * @return The {@link RetryStrategy} used when retrying requests. */ public RetryStrategy getRetryStrategy() { return retryStrategy; } /** * @return The HTTP header which contains the retry delay returned by the service. */ public String getRetryAfterHeader() { return retryAfterHeader; } /** * @return The {@link ChronoUnit} used when applying request retry delays. */ public ChronoUnit getRetryAfterTimeUnit() { return retryAfterTimeUnit; } }
class RetryPolicyOptions { private final RetryStrategy retryStrategy; private final String retryAfterHeader; private final ChronoUnit retryAfterTimeUnit; /** * Creates a default {@link RetryPolicyOptions} used by a {@link RetryPolicy}. This will use * {@link ExponentialBackoff} as the {@link */ public RetryPolicyOptions() { this(new ExponentialBackoff(), null, null); } /** * Creates the {@link RetryPolicyOptions} with provided {@link RetryStrategy} that will be used when a request is * retried. It will ignore retry delay headers. * * @param retryStrategy The {@link RetryStrategy} used for retries. It will default to {@link ExponentialBackoff} * if provided value is {@code null} */ public RetryPolicyOptions(RetryStrategy retryStrategy) { this(retryStrategy, null, null); } /** * Creates the {@link RetryPolicyOptions} with provided {@link RetryStrategy}, {@code retryAfterHeader} and * {@code retryAfterTimeUnit} that will be used when a request is retried. * * @param retryStrategy The {@link RetryStrategy} used for retries. It will default to {@link ExponentialBackoff} * if provided value is {@code null}. * @param retryAfterHeader The HTTP header, such as 'Retry-After' or 'x-ms-retry-after-ms', to lookup for the * retry delay. If the value is {@code null}, {@link RetryPolicy} will use the retry strategy to compute the delay * and ignore the delay provided in response header. * @param retryAfterTimeUnit The time unit to use when applying the retry delay. {@code null} is valid if, and only * if, {@code retryAfterHeader} is {@code null}. * @throws NullPointerException When {@code retryAfterTimeUnit} is {@code null} and {@code retryAfterHeader} is * not {@code null}. */ /** * @return The {@link RetryStrategy} used when retrying requests. */ public RetryStrategy getRetryStrategy() { return retryStrategy; } /** * @return The HTTP header which contains the retry delay returned by the service. */ public String getRetryAfterHeader() { return retryAfterHeader; } /** * @return The {@link ChronoUnit} used when applying request retry delays. */ public ChronoUnit getRetryAfterTimeUnit() { return retryAfterTimeUnit; } }
will use use ExponentialBackoff
public RetryPolicyOptions(RetryStrategy retryStrategy, String retryAfterHeader, ChronoUnit retryAfterTimeUnit) { this.retryStrategy = Objects.requireNonNull(retryStrategy, "'retryStrategy' cannot be null."); this.retryAfterHeader = retryAfterHeader; this.retryAfterTimeUnit = retryAfterTimeUnit; if (!isNullOrEmpty(retryAfterHeader)) { Objects.requireNonNull(retryAfterTimeUnit, "'retryAfterTimeUnit' cannot be null."); } }
this.retryStrategy = Objects.requireNonNull(retryStrategy, "'retryStrategy' cannot be null.");
public RetryPolicyOptions(RetryStrategy retryStrategy, String retryAfterHeader, ChronoUnit retryAfterTimeUnit) { if (Objects.isNull(retryStrategy)) { this.retryStrategy = new ExponentialBackoff(); } else { this.retryStrategy = retryStrategy; } this.retryAfterHeader = retryAfterHeader; this.retryAfterTimeUnit = retryAfterTimeUnit; if (!isNullOrEmpty(retryAfterHeader)) { Objects.requireNonNull(retryAfterTimeUnit, "'retryAfterTimeUnit' cannot be null."); } }
class RetryPolicyOptions { private final RetryStrategy retryStrategy; private final String retryAfterHeader; private final ChronoUnit retryAfterTimeUnit; /** * Creates a default {@link RetryPolicyOptions} used by a {@link RetryPolicy}. This will use * {@link ExponentialBackoff} as the {@link */ public RetryPolicyOptions() { this(new ExponentialBackoff(), null, null); } /** * Creates the {@link RetryPolicyOptions} with provided {@link RetryStrategy} that will be used when a request is * retried. It will ignore retry delay headers. * * @param retryStrategy The {@link RetryStrategy} used for retries. * @throws NullPointerException if {@code retryStrategy} is {@code null}. */ public RetryPolicyOptions(RetryStrategy retryStrategy) { this(retryStrategy, null, null); } /** * Creates the {@link RetryPolicyOptions} with provided {@code retryAfterHeader} and {@code retryAfterTimeUnit} * that will be used when a request is retried. This will use {@link ExponentialBackoff} as the * {@link * * @param retryAfterHeader The HTTP header, such as 'Retry-After' or 'x-ms-retry-after-ms', to lookup for the * retry delay. The value {@code null} is valid. * @param retryAfterTimeUnit The time unit to use when applying the retry delay. {@code null} is valid if, and only * if, {@code retryAfterHeader} is {@code null}. * @throws NullPointerException Only if {@code retryAfterTimeUnit} is {@code null} and {@code retryAfterHeader} * is not {@code null}. */ public RetryPolicyOptions(String retryAfterHeader, ChronoUnit retryAfterTimeUnit) { this(new ExponentialBackoff(), retryAfterHeader, retryAfterTimeUnit); } /** * Creates the {@link RetryPolicyOptions} with provided {@link RetryStrategy}, {@code retryAfterHeader} and * {@code retryAfterTimeUnit} that will be used when a request is retried. * * @param retryStrategy The {@link RetryStrategy} used for retries. * @param retryAfterHeader The HTTP header, such as 'Retry-After' or 'x-ms-retry-after-ms', to lookup for the * retry delay.The value {@code null} is valid. * @param retryAfterTimeUnit The time unit to use when applying the retry delay. {@code null} is valid if, and only * * if, {@code retryAfterHeader} is {@code null}. * @throws NullPointerException if {@code retryStrategy} is {@code null}. Also when {@code retryAfterTimeUnit} * is {@code null} and {@code retryAfterHeader} is not {@code null}. */ /** * @return The {@link RetryStrategy} used when retrying requests. */ public RetryStrategy getRetryStrategy() { return retryStrategy; } /** * @return The HTTP header which contains the retry delay returned by the service. */ public String getRetryAfterHeader() { return retryAfterHeader; } /** * @return The {@link ChronoUnit} used when applying request retry delays. */ public ChronoUnit getRetryAfterTimeUnit() { return retryAfterTimeUnit; } }
class RetryPolicyOptions { private final RetryStrategy retryStrategy; private final String retryAfterHeader; private final ChronoUnit retryAfterTimeUnit; /** * Creates a default {@link RetryPolicyOptions} used by a {@link RetryPolicy}. This will use * {@link ExponentialBackoff} as the {@link */ public RetryPolicyOptions() { this(new ExponentialBackoff(), null, null); } /** * Creates the {@link RetryPolicyOptions} with provided {@link RetryStrategy} that will be used when a request is * retried. It will ignore retry delay headers. * * @param retryStrategy The {@link RetryStrategy} used for retries. It will default to {@link ExponentialBackoff} * if provided value is {@code null} */ public RetryPolicyOptions(RetryStrategy retryStrategy) { this(retryStrategy, null, null); } /** * Creates the {@link RetryPolicyOptions} with provided {@link RetryStrategy}, {@code retryAfterHeader} and * {@code retryAfterTimeUnit} that will be used when a request is retried. * * @param retryStrategy The {@link RetryStrategy} used for retries. It will default to {@link ExponentialBackoff} * if provided value is {@code null}. * @param retryAfterHeader The HTTP header, such as 'Retry-After' or 'x-ms-retry-after-ms', to lookup for the * retry delay. If the value is {@code null}, {@link RetryPolicy} will use the retry strategy to compute the delay * and ignore the delay provided in response header. * @param retryAfterTimeUnit The time unit to use when applying the retry delay. {@code null} is valid if, and only * if, {@code retryAfterHeader} is {@code null}. * @throws NullPointerException When {@code retryAfterTimeUnit} is {@code null} and {@code retryAfterHeader} is * not {@code null}. */ /** * @return The {@link RetryStrategy} used when retrying requests. */ public RetryStrategy getRetryStrategy() { return retryStrategy; } /** * @return The HTTP header which contains the retry delay returned by the service. */ public String getRetryAfterHeader() { return retryAfterHeader; } /** * @return The {@link ChronoUnit} used when applying request retry delays. */ public ChronoUnit getRetryAfterTimeUnit() { return retryAfterTimeUnit; } }
Updated AppConfiguration with this header
public RetryPolicy() { this(new RetryPolicyOptions(new ExponentialBackoff(), RETRY_AFTER_MS_HEADER, ChronoUnit.MILLIS)); }
this(new RetryPolicyOptions(new ExponentialBackoff(), RETRY_AFTER_MS_HEADER, ChronoUnit.MILLIS));
public RetryPolicy() { this(new RetryPolicyOptions(new ExponentialBackoff())); }
class RetryPolicy implements HttpPipelinePolicy { private final ClientLogger logger = new ClientLogger(RetryPolicy.class); private static final String RETRY_AFTER_MS_HEADER = "retry-after-ms"; private final RetryPolicyOptions retryPolicyOptions; /** * Creates {@link RetryPolicy} with default {@link ExponentialBackoff} as {@link RetryStrategy}and use * 'retry-after-ms' in {@link HttpResponse} header for calculating retry delay. */ /** * Creates a RetryPolicy with the provided {@link RetryStrategy}. * * @param retryStrategy The {@link RetryStrategy} used for retries. */ public RetryPolicy(RetryStrategy retryStrategy) { Objects.requireNonNull(retryStrategy, "'retryStrategy' cannot be null"); this.retryPolicyOptions = new RetryPolicyOptions(retryStrategy); } /** * Creates a {@link RetryPolicy} with the provided {@link RetryPolicyOptions}. * * @param retryPolicyOptions with given {@link RetryPolicyOptions}. * @throws NullPointerException if {@code retryPolicyOptions} or {@code retryPolicyOptions getRetryStrategy } * is {@code null}. */ public RetryPolicy(RetryPolicyOptions retryPolicyOptions) { this.retryPolicyOptions = Objects.requireNonNull(retryPolicyOptions, "'retryPolicyOptions' cannot be null."); Objects.requireNonNull(retryPolicyOptions.getRetryStrategy(), "'retryPolicyOptions.retryStrategy' cannot be null."); } @Override public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { return attemptAsync(context, next, context.getHttpRequest(), 0); } private Mono<HttpResponse> attemptAsync(final HttpPipelineCallContext context, final HttpPipelineNextPolicy next, final HttpRequest originalHttpRequest, final int tryCount) { context.setHttpRequest(originalHttpRequest.copy()); return next.clone().process() .flatMap(httpResponse -> { if (shouldRetry(httpResponse, tryCount)) { final Duration delayDuration = determineDelayDuration(httpResponse, tryCount); logger.verbose("[Retrying] Try count: {}, Delay duration in seconds: {}", tryCount, delayDuration.getSeconds()); return attemptAsync(context, next, originalHttpRequest, tryCount + 1) .delaySubscription(delayDuration); } else { return Mono.just(httpResponse); } }) .onErrorResume(err -> { int maxRetries = retryPolicyOptions.getRetryStrategy().getMaxRetries(); if (tryCount < maxRetries) { logger.verbose("[Error Resume] Try count: {}, Error: {}", tryCount, err); return attemptAsync(context, next, originalHttpRequest, tryCount + 1) .delaySubscription(retryPolicyOptions.getRetryStrategy().calculateRetryDelay(tryCount)); } else { return Mono.error(new RuntimeException( String.format("Max retries %d times exceeded. Error Details: %s", maxRetries, err.getMessage()), err)); } }); } private boolean shouldRetry(HttpResponse response, int tryCount) { return tryCount < retryPolicyOptions.getRetryStrategy().getMaxRetries() && retryPolicyOptions.getRetryStrategy() .shouldRetry(response); } /** * Determines the delay duration that should be waited before retrying. * @param response HTTP response * @return If the HTTP response has a retry-after-ms header that will be returned, * otherwise the duration used during the construction of the policy. */ private Duration determineDelayDuration(HttpResponse response, int tryCount) { int code = response.getStatusCode(); if (code != 429 && code != 503) { return retryPolicyOptions.getRetryStrategy().calculateRetryDelay(tryCount); } String retryHeaderValue = null; if (!isNullOrEmpty(retryPolicyOptions.getRetryAfterHeader())) { retryHeaderValue = response.getHeaderValue(retryPolicyOptions.getRetryAfterHeader()); } if (isNullOrEmpty(retryHeaderValue)) { return retryPolicyOptions.getRetryStrategy().calculateRetryDelay(tryCount); } return Duration.of(Integer.parseInt(retryHeaderValue), retryPolicyOptions.getRetryAfterTimeUnit()); } }
class RetryPolicy implements HttpPipelinePolicy { private final ClientLogger logger = new ClientLogger(RetryPolicy.class); private final RetryPolicyOptions retryPolicyOptions; /** * Creates {@link RetryPolicy} with default {@link ExponentialBackoff} as {@link RetryStrategy}and use * 'retry-after-ms' in {@link HttpResponse} header for calculating retry delay. */ /** * Creates a RetryPolicy with the provided {@link RetryStrategy}. * * @param retryStrategy The {@link RetryStrategy} used for retries. */ public RetryPolicy(RetryStrategy retryStrategy) { Objects.requireNonNull(retryStrategy, "'retryStrategy' cannot be null"); this.retryPolicyOptions = new RetryPolicyOptions(retryStrategy); } /** * Creates a {@link RetryPolicy} with the provided {@link RetryPolicyOptions}. * * @param retryPolicyOptions with given {@link RetryPolicyOptions}. * @throws NullPointerException if {@code retryPolicyOptions} or {@code retryPolicyOptions getRetryStrategy } * is {@code null}. */ public RetryPolicy(RetryPolicyOptions retryPolicyOptions) { this.retryPolicyOptions = Objects.requireNonNull(retryPolicyOptions, "'retryPolicyOptions' cannot be null."); Objects.requireNonNull(retryPolicyOptions.getRetryStrategy(), "'retryPolicyOptions.retryStrategy' cannot be null."); } @Override public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { return attemptAsync(context, next, context.getHttpRequest(), 0); } private Mono<HttpResponse> attemptAsync(final HttpPipelineCallContext context, final HttpPipelineNextPolicy next, final HttpRequest originalHttpRequest, final int tryCount) { context.setHttpRequest(originalHttpRequest.copy()); return next.clone().process() .flatMap(httpResponse -> { if (shouldRetry(httpResponse, tryCount)) { final Duration delayDuration = determineDelayDuration(httpResponse, tryCount); logger.verbose("[Retrying] Try count: {}, Delay duration in seconds: {}", tryCount, delayDuration.getSeconds()); return attemptAsync(context, next, originalHttpRequest, tryCount + 1) .delaySubscription(delayDuration); } else { return Mono.just(httpResponse); } }) .onErrorResume(err -> { int maxRetries = retryPolicyOptions.getRetryStrategy().getMaxRetries(); if (tryCount < maxRetries) { logger.verbose("[Error Resume] Try count: {}, Error: {}", tryCount, err); return attemptAsync(context, next, originalHttpRequest, tryCount + 1) .delaySubscription(retryPolicyOptions.getRetryStrategy().calculateRetryDelay(tryCount)); } else { return Mono.error(new RuntimeException( String.format("Max retries %d times exceeded. Error Details: %s", maxRetries, err.getMessage()), err)); } }); } private boolean shouldRetry(HttpResponse response, int tryCount) { return tryCount < retryPolicyOptions.getRetryStrategy().getMaxRetries() && retryPolicyOptions.getRetryStrategy().shouldRetry(response); } /** * Determines the delay duration that should be waited before retrying. * @param response HTTP response * @return If the HTTP response has a retry-after-ms header that will be returned, * otherwise the duration used during the construction of the policy. */ private Duration determineDelayDuration(HttpResponse response, int tryCount) { int code = response.getStatusCode(); if (code != 429 && code != 503) { return retryPolicyOptions.getRetryStrategy().calculateRetryDelay(tryCount); } String retryHeaderValue = null; if (!isNullOrEmpty(retryPolicyOptions.getRetryAfterHeader())) { retryHeaderValue = response.getHeaderValue(retryPolicyOptions.getRetryAfterHeader()); } if (isNullOrEmpty(retryHeaderValue)) { return retryPolicyOptions.getRetryStrategy().calculateRetryDelay(tryCount); } return Duration.of(Integer.parseInt(retryHeaderValue), retryPolicyOptions.getRetryAfterTimeUnit()); } }
nit: this is an odd line break. Would expect it to be before `&&`.
private boolean shouldRetry(HttpResponse response, int tryCount) { return tryCount < retryPolicyOptions.getRetryStrategy().getMaxRetries() && retryPolicyOptions.getRetryStrategy() .shouldRetry(response); }
return tryCount < retryPolicyOptions.getRetryStrategy().getMaxRetries() && retryPolicyOptions.getRetryStrategy()
private boolean shouldRetry(HttpResponse response, int tryCount) { return tryCount < retryPolicyOptions.getRetryStrategy().getMaxRetries() && retryPolicyOptions.getRetryStrategy().shouldRetry(response); }
class RetryPolicy implements HttpPipelinePolicy { private final ClientLogger logger = new ClientLogger(RetryPolicy.class); private final RetryPolicyOptions retryPolicyOptions; /** * Creates {@link RetryPolicy} with default {@link ExponentialBackoff} as {@link RetryStrategy}and use * 'retry-after-ms' in {@link HttpResponse} header for calculating retry delay. */ public RetryPolicy() { this(new RetryPolicyOptions(new ExponentialBackoff())); } /** * Creates a RetryPolicy with the provided {@link RetryStrategy}. * * @param retryStrategy The {@link RetryStrategy} used for retries. */ public RetryPolicy(RetryStrategy retryStrategy) { Objects.requireNonNull(retryStrategy, "'retryStrategy' cannot be null"); this.retryPolicyOptions = new RetryPolicyOptions(retryStrategy); } /** * Creates a {@link RetryPolicy} with the provided {@link RetryPolicyOptions}. * * @param retryPolicyOptions with given {@link RetryPolicyOptions}. * @throws NullPointerException if {@code retryPolicyOptions} or {@code retryPolicyOptions getRetryStrategy } * is {@code null}. */ public RetryPolicy(RetryPolicyOptions retryPolicyOptions) { this.retryPolicyOptions = Objects.requireNonNull(retryPolicyOptions, "'retryPolicyOptions' cannot be null."); Objects.requireNonNull(retryPolicyOptions.getRetryStrategy(), "'retryPolicyOptions.retryStrategy' cannot be null."); } @Override public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { return attemptAsync(context, next, context.getHttpRequest(), 0); } private Mono<HttpResponse> attemptAsync(final HttpPipelineCallContext context, final HttpPipelineNextPolicy next, final HttpRequest originalHttpRequest, final int tryCount) { context.setHttpRequest(originalHttpRequest.copy()); return next.clone().process() .flatMap(httpResponse -> { if (shouldRetry(httpResponse, tryCount)) { final Duration delayDuration = determineDelayDuration(httpResponse, tryCount); logger.verbose("[Retrying] Try count: {}, Delay duration in seconds: {}", tryCount, delayDuration.getSeconds()); return attemptAsync(context, next, originalHttpRequest, tryCount + 1) .delaySubscription(delayDuration); } else { return Mono.just(httpResponse); } }) .onErrorResume(err -> { int maxRetries = retryPolicyOptions.getRetryStrategy().getMaxRetries(); if (tryCount < maxRetries) { logger.verbose("[Error Resume] Try count: {}, Error: {}", tryCount, err); return attemptAsync(context, next, originalHttpRequest, tryCount + 1) .delaySubscription(retryPolicyOptions.getRetryStrategy().calculateRetryDelay(tryCount)); } else { return Mono.error(new RuntimeException( String.format("Max retries %d times exceeded. Error Details: %s", maxRetries, err.getMessage()), err)); } }); } /** * Determines the delay duration that should be waited before retrying. * @param response HTTP response * @return If the HTTP response has a retry-after-ms header that will be returned, * otherwise the duration used during the construction of the policy. */ private Duration determineDelayDuration(HttpResponse response, int tryCount) { int code = response.getStatusCode(); if (code != 429 && code != 503) { return retryPolicyOptions.getRetryStrategy().calculateRetryDelay(tryCount); } String retryHeaderValue = null; if (!isNullOrEmpty(retryPolicyOptions.getRetryAfterHeader())) { retryHeaderValue = response.getHeaderValue(retryPolicyOptions.getRetryAfterHeader()); } if (isNullOrEmpty(retryHeaderValue)) { return retryPolicyOptions.getRetryStrategy().calculateRetryDelay(tryCount); } return Duration.of(Integer.parseInt(retryHeaderValue), retryPolicyOptions.getRetryAfterTimeUnit()); } }
class RetryPolicy implements HttpPipelinePolicy { private final ClientLogger logger = new ClientLogger(RetryPolicy.class); private final RetryPolicyOptions retryPolicyOptions; /** * Creates {@link RetryPolicy} with default {@link ExponentialBackoff} as {@link RetryStrategy}and use * 'retry-after-ms' in {@link HttpResponse} header for calculating retry delay. */ public RetryPolicy() { this(new RetryPolicyOptions(new ExponentialBackoff())); } /** * Creates a RetryPolicy with the provided {@link RetryStrategy}. * * @param retryStrategy The {@link RetryStrategy} used for retries. */ public RetryPolicy(RetryStrategy retryStrategy) { Objects.requireNonNull(retryStrategy, "'retryStrategy' cannot be null"); this.retryPolicyOptions = new RetryPolicyOptions(retryStrategy); } /** * Creates a {@link RetryPolicy} with the provided {@link RetryPolicyOptions}. * * @param retryPolicyOptions with given {@link RetryPolicyOptions}. * @throws NullPointerException if {@code retryPolicyOptions} or {@code retryPolicyOptions getRetryStrategy } * is {@code null}. */ public RetryPolicy(RetryPolicyOptions retryPolicyOptions) { this.retryPolicyOptions = Objects.requireNonNull(retryPolicyOptions, "'retryPolicyOptions' cannot be null."); Objects.requireNonNull(retryPolicyOptions.getRetryStrategy(), "'retryPolicyOptions.retryStrategy' cannot be null."); } @Override public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { return attemptAsync(context, next, context.getHttpRequest(), 0); } private Mono<HttpResponse> attemptAsync(final HttpPipelineCallContext context, final HttpPipelineNextPolicy next, final HttpRequest originalHttpRequest, final int tryCount) { context.setHttpRequest(originalHttpRequest.copy()); return next.clone().process() .flatMap(httpResponse -> { if (shouldRetry(httpResponse, tryCount)) { final Duration delayDuration = determineDelayDuration(httpResponse, tryCount); logger.verbose("[Retrying] Try count: {}, Delay duration in seconds: {}", tryCount, delayDuration.getSeconds()); return attemptAsync(context, next, originalHttpRequest, tryCount + 1) .delaySubscription(delayDuration); } else { return Mono.just(httpResponse); } }) .onErrorResume(err -> { int maxRetries = retryPolicyOptions.getRetryStrategy().getMaxRetries(); if (tryCount < maxRetries) { logger.verbose("[Error Resume] Try count: {}, Error: {}", tryCount, err); return attemptAsync(context, next, originalHttpRequest, tryCount + 1) .delaySubscription(retryPolicyOptions.getRetryStrategy().calculateRetryDelay(tryCount)); } else { return Mono.error(new RuntimeException( String.format("Max retries %d times exceeded. Error Details: %s", maxRetries, err.getMessage()), err)); } }); } /** * Determines the delay duration that should be waited before retrying. * @param response HTTP response * @return If the HTTP response has a retry-after-ms header that will be returned, * otherwise the duration used during the construction of the policy. */ private Duration determineDelayDuration(HttpResponse response, int tryCount) { int code = response.getStatusCode(); if (code != 429 && code != 503) { return retryPolicyOptions.getRetryStrategy().calculateRetryDelay(tryCount); } String retryHeaderValue = null; if (!isNullOrEmpty(retryPolicyOptions.getRetryAfterHeader())) { retryHeaderValue = response.getHeaderValue(retryPolicyOptions.getRetryAfterHeader()); } if (isNullOrEmpty(retryHeaderValue)) { return retryPolicyOptions.getRetryStrategy().calculateRetryDelay(tryCount); } return Duration.of(Integer.parseInt(retryHeaderValue), retryPolicyOptions.getRetryAfterTimeUnit()); } }
Since RetryPolicyOptions is immutable, to save allocations, I'd have a default RetryPolicyOptions that is static and immutable.
public ConfigurationAsyncClient buildAsyncClient() { Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration().clone() : configuration; ConfigurationClientCredentials configurationCredentials = getConfigurationCredentials(buildConfiguration); String buildEndpoint = getBuildEndpoint(configurationCredentials); Objects.requireNonNull(buildEndpoint); ConfigurationServiceVersion serviceVersion = version != null ? version : ConfigurationServiceVersion.getLatest(); if (pipeline != null) { return new ConfigurationAsyncClient(buildEndpoint, pipeline, serviceVersion); } ConfigurationClientCredentials buildCredential = (credential == null) ? configurationCredentials : credential; if (buildCredential == null) { throw logger.logExceptionAsWarning(new IllegalStateException("'credential' is required.")); } final List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add( new UserAgentPolicy(clientName, clientVersion, buildConfiguration, serviceVersion)); policies.add(new RequestIdPolicy()); policies.add(new AddHeadersPolicy(headers)); policies.add(new AddDatePolicy()); policies.add(new ConfigurationCredentialsPolicy(buildCredential)); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(retryPolicy == null ? new RetryPolicy( new RetryPolicyOptions(new ExponentialBackoff(), RETRY_AFTER_MS_HEADER, ChronoUnit.MILLIS)) : retryPolicy); policies.addAll(this.policies); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); HttpPipeline pipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); return new ConfigurationAsyncClient(buildEndpoint, pipeline, serviceVersion); }
policies.add(retryPolicy == null ? new RetryPolicy( new RetryPolicyOptions(new ExponentialBackoff(),
public ConfigurationAsyncClient buildAsyncClient() { Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration().clone() : configuration; ConfigurationClientCredentials configurationCredentials = getConfigurationCredentials(buildConfiguration); String buildEndpoint = getBuildEndpoint(configurationCredentials); Objects.requireNonNull(buildEndpoint); ConfigurationServiceVersion serviceVersion = version != null ? version : ConfigurationServiceVersion.getLatest(); if (pipeline != null) { return new ConfigurationAsyncClient(buildEndpoint, pipeline, serviceVersion); } ConfigurationClientCredentials buildCredential = (credential == null) ? configurationCredentials : credential; if (buildCredential == null) { throw logger.logExceptionAsWarning(new IllegalStateException("'credential' is required.")); } final List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add( new UserAgentPolicy(clientName, clientVersion, buildConfiguration, serviceVersion)); policies.add(new RequestIdPolicy()); policies.add(new AddHeadersPolicy(headers)); policies.add(new AddDatePolicy()); policies.add(new ConfigurationCredentialsPolicy(buildCredential)); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(retryPolicy == null ? DEFAULT_RETRY_POLICY : retryPolicy); policies.addAll(this.policies); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); HttpPipeline pipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); return new ConfigurationAsyncClient(buildEndpoint, pipeline, serviceVersion); }
class ConfigurationClientBuilder { private static final String ECHO_REQUEST_ID_HEADER = "x-ms-return-client-request-id"; private static final String CONTENT_TYPE_HEADER = "Content-Type"; private static final String CONTENT_TYPE_HEADER_VALUE = "application/json"; private static final String ACCEPT_HEADER = "Accept"; private static final String ACCEPT_HEADER_VALUE = "application/vnd.microsoft.azconfig.kv+json"; private static final String APP_CONFIG_PROPERTIES = "azure-appconfig.properties"; private static final String NAME = "name"; private static final String VERSION = "version"; private static final String RETRY_AFTER_MS_HEADER = "retry-after-ms"; private final ClientLogger logger = new ClientLogger(ConfigurationClientBuilder.class); private final List<HttpPipelinePolicy> policies; private final HttpHeaders headers; private final String clientName; private final String clientVersion; private ConfigurationClientCredentials credential; private String endpoint; private HttpClient httpClient; private HttpLogOptions httpLogOptions; private HttpPipeline pipeline; private HttpPipelinePolicy retryPolicy; private Configuration configuration; private ConfigurationServiceVersion version; /** * The constructor with defaults. */ public ConfigurationClientBuilder() { policies = new ArrayList<>(); httpLogOptions = new HttpLogOptions(); Map<String, String> properties = CoreUtils.getProperties(APP_CONFIG_PROPERTIES); clientName = properties.getOrDefault(NAME, "UnknownName"); clientVersion = properties.getOrDefault(VERSION, "UnknownVersion"); headers = new HttpHeaders() .put(ECHO_REQUEST_ID_HEADER, "true") .put(CONTENT_TYPE_HEADER, CONTENT_TYPE_HEADER_VALUE) .put(ACCEPT_HEADER, ACCEPT_HEADER_VALUE); } /** * Creates a {@link ConfigurationClient} based on options set in the Builder. Every time {@code buildClient()} is * called a new instance of {@link ConfigurationClient} is created. * * <p> * If {@link * {@link * settings are ignored.</p> * * @return A ConfigurationClient with the options set from the builder. * @throws NullPointerException If {@code endpoint} has not been set. This setting is automatically set when * {@link * explicitly by calling {@link * @throws IllegalStateException If {@link */ public ConfigurationClient buildClient() { return new ConfigurationClient(buildAsyncClient()); } /** * Creates a {@link ConfigurationAsyncClient} based on options set in the Builder. Every time * {@code buildAsyncClient()} is called a new instance of {@link ConfigurationAsyncClient} is created. * * <p> * If {@link * {@link * builder settings are ignored. * </p> * * @return A ConfigurationAsyncClient with the options set from the builder. * @throws NullPointerException If {@code endpoint} has not been set. This setting is automatically set when * {@link * explicitly by calling {@link * @throws IllegalStateException If {@link */ /** * Sets the service endpoint for the Azure App Configuration instance. * * @param endpoint The URL of the Azure App Configuration instance to send {@link ConfigurationSetting} * service requests to and receive responses from. * @return The updated ConfigurationClientBuilder object. * @throws IllegalArgumentException if {@code endpoint} is null or it cannot be parsed into a valid URL. */ public ConfigurationClientBuilder endpoint(String endpoint) { try { new URL(endpoint); } catch (MalformedURLException ex) { throw logger.logExceptionAsWarning(new IllegalArgumentException("'endpoint' must be a valid URL")); } this.endpoint = endpoint; return this; } /** * Sets the credential to use when authenticating HTTP requests. Also, sets the {@link * for this ConfigurationClientBuilder. * * @param connectionString Connection string in the format "endpoint={endpoint_value};id={id_value}; * secret={secret_value}" * @return The updated ConfigurationClientBuilder object. * @throws NullPointerException If {@code credential} is {@code null}. */ public ConfigurationClientBuilder connectionString(String connectionString) { Objects.requireNonNull(connectionString); try { this.credential = new ConfigurationClientCredentials(connectionString); } catch (InvalidKeyException err) { throw logger.logExceptionAsError(new IllegalArgumentException( "The secret is invalid and cannot instantiate the HMAC-SHA256 algorithm.", err)); } catch (NoSuchAlgorithmException err) { throw logger.logExceptionAsError( new IllegalArgumentException("HMAC-SHA256 MAC algorithm cannot be instantiated.", err)); } this.endpoint = credential.getBaseUri(); return this; } /** * Sets the logging configuration for HTTP requests and responses. * * <p> If logLevel is not provided, default value of {@link HttpLogDetailLevel * * @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder httpLogOptions(HttpLogOptions logOptions) { httpLogOptions = logOptions; return this; } /** * Adds a policy to the set of existing policies that are executed after required policies. * * @param policy The retry policy for service requests. * @return The updated ConfigurationClientBuilder object. * @throws NullPointerException If {@code policy} is {@code null}. */ public ConfigurationClientBuilder addPolicy(HttpPipelinePolicy policy) { Objects.requireNonNull(policy); policies.add(policy); return this; } /** * Sets the HTTP client to use for sending and receiving requests to and from the service. * * @param client The HTTP client to use for requests. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder httpClient(HttpClient client) { if (this.httpClient != null && client == null) { logger.info("HttpClient is being set to 'null' when it was previously configured."); } this.httpClient = client; return this; } /** * Sets the HTTP pipeline to use for the service client. * * If {@code pipeline} is set, all other settings are ignored, aside from * {@link ConfigurationClientBuilder * ConfigurationClient}. * * @param pipeline The HTTP pipeline to use for sending service requests and receiving responses. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder pipeline(HttpPipeline pipeline) { if (this.pipeline != null && pipeline == null) { logger.info("HttpPipeline is being set to 'null' when it was previously configured."); } this.pipeline = pipeline; return this; } /** * Sets the configuration store that is used during construction of the service client. * * The default configuration store is a clone of the {@link Configuration * configuration store}, use {@link Configuration * * @param configuration The configuration store used to * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the {@link HttpPipelinePolicy} that is used when each request is sent. * * The default retry policy will be used if not provided {@link ConfigurationClientBuilder * to build {@link ConfigurationAsyncClient} or {@link ConfigurationClient}. * @param retryPolicy user's retry policy applied to each request. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder retryPolicy(HttpPipelinePolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /** * Sets the {@link ConfigurationServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version the client library will have the result of potentially moving to a newer service version. * * @param version {@link ConfigurationServiceVersion} of the service to be used when making requests. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder serviceVersion(ConfigurationServiceVersion version) { this.version = version; return this; } private ConfigurationClientCredentials getConfigurationCredentials(Configuration configuration) { String connectionString = configuration.get("AZURE_APPCONFIG_CONNECTION_STRING"); if (CoreUtils.isNullOrEmpty(connectionString)) { return credential; } try { return new ConfigurationClientCredentials(connectionString); } catch (InvalidKeyException | NoSuchAlgorithmException ex) { return null; } } private String getBuildEndpoint(ConfigurationClientCredentials buildCredentials) { if (endpoint != null) { return endpoint; } else if (buildCredentials != null) { return buildCredentials.getBaseUri(); } else { return null; } } }
class ConfigurationClientBuilder { private static final String ECHO_REQUEST_ID_HEADER = "x-ms-return-client-request-id"; private static final String CONTENT_TYPE_HEADER = "Content-Type"; private static final String CONTENT_TYPE_HEADER_VALUE = "application/json"; private static final String ACCEPT_HEADER = "Accept"; private static final String ACCEPT_HEADER_VALUE = "application/vnd.microsoft.azconfig.kv+json"; private static final String APP_CONFIG_PROPERTIES = "azure-appconfig.properties"; private static final String NAME = "name"; private static final String VERSION = "version"; private static final String RETRY_AFTER_MS_HEADER = "retry-after-ms"; private static final RetryPolicy DEFAULT_RETRY_POLICY = new RetryPolicy( new RetryPolicyOptions(new ExponentialBackoff(), RETRY_AFTER_MS_HEADER, ChronoUnit.MILLIS)); private final ClientLogger logger = new ClientLogger(ConfigurationClientBuilder.class); private final List<HttpPipelinePolicy> policies; private final HttpHeaders headers; private final String clientName; private final String clientVersion; private ConfigurationClientCredentials credential; private String endpoint; private HttpClient httpClient; private HttpLogOptions httpLogOptions; private HttpPipeline pipeline; private HttpPipelinePolicy retryPolicy; private Configuration configuration; private ConfigurationServiceVersion version; /** * The constructor with defaults. */ public ConfigurationClientBuilder() { policies = new ArrayList<>(); httpLogOptions = new HttpLogOptions(); Map<String, String> properties = CoreUtils.getProperties(APP_CONFIG_PROPERTIES); clientName = properties.getOrDefault(NAME, "UnknownName"); clientVersion = properties.getOrDefault(VERSION, "UnknownVersion"); headers = new HttpHeaders() .put(ECHO_REQUEST_ID_HEADER, "true") .put(CONTENT_TYPE_HEADER, CONTENT_TYPE_HEADER_VALUE) .put(ACCEPT_HEADER, ACCEPT_HEADER_VALUE); } /** * Creates a {@link ConfigurationClient} based on options set in the Builder. Every time {@code buildClient()} is * called a new instance of {@link ConfigurationClient} is created. * * <p> * If {@link * {@link * settings are ignored.</p> * * @return A ConfigurationClient with the options set from the builder. * @throws NullPointerException If {@code endpoint} has not been set. This setting is automatically set when * {@link * explicitly by calling {@link * @throws IllegalStateException If {@link */ public ConfigurationClient buildClient() { return new ConfigurationClient(buildAsyncClient()); } /** * Creates a {@link ConfigurationAsyncClient} based on options set in the Builder. Every time * {@code buildAsyncClient()} is called a new instance of {@link ConfigurationAsyncClient} is created. * * <p> * If {@link * {@link * builder settings are ignored. * </p> * * @return A ConfigurationAsyncClient with the options set from the builder. * @throws NullPointerException If {@code endpoint} has not been set. This setting is automatically set when * {@link * explicitly by calling {@link * @throws IllegalStateException If {@link */ /** * Sets the service endpoint for the Azure App Configuration instance. * * @param endpoint The URL of the Azure App Configuration instance to send {@link ConfigurationSetting} * service requests to and receive responses from. * @return The updated ConfigurationClientBuilder object. * @throws IllegalArgumentException if {@code endpoint} is null or it cannot be parsed into a valid URL. */ public ConfigurationClientBuilder endpoint(String endpoint) { try { new URL(endpoint); } catch (MalformedURLException ex) { throw logger.logExceptionAsWarning(new IllegalArgumentException("'endpoint' must be a valid URL")); } this.endpoint = endpoint; return this; } /** * Sets the credential to use when authenticating HTTP requests. Also, sets the {@link * for this ConfigurationClientBuilder. * * @param connectionString Connection string in the format "endpoint={endpoint_value};id={id_value}; * secret={secret_value}" * @return The updated ConfigurationClientBuilder object. * @throws NullPointerException If {@code credential} is {@code null}. */ public ConfigurationClientBuilder connectionString(String connectionString) { Objects.requireNonNull(connectionString); try { this.credential = new ConfigurationClientCredentials(connectionString); } catch (InvalidKeyException err) { throw logger.logExceptionAsError(new IllegalArgumentException( "The secret is invalid and cannot instantiate the HMAC-SHA256 algorithm.", err)); } catch (NoSuchAlgorithmException err) { throw logger.logExceptionAsError( new IllegalArgumentException("HMAC-SHA256 MAC algorithm cannot be instantiated.", err)); } this.endpoint = credential.getBaseUri(); return this; } /** * Sets the logging configuration for HTTP requests and responses. * * <p> If logLevel is not provided, default value of {@link HttpLogDetailLevel * * @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder httpLogOptions(HttpLogOptions logOptions) { httpLogOptions = logOptions; return this; } /** * Adds a policy to the set of existing policies that are executed after required policies. * * @param policy The retry policy for service requests. * @return The updated ConfigurationClientBuilder object. * @throws NullPointerException If {@code policy} is {@code null}. */ public ConfigurationClientBuilder addPolicy(HttpPipelinePolicy policy) { Objects.requireNonNull(policy); policies.add(policy); return this; } /** * Sets the HTTP client to use for sending and receiving requests to and from the service. * * @param client The HTTP client to use for requests. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder httpClient(HttpClient client) { if (this.httpClient != null && client == null) { logger.info("HttpClient is being set to 'null' when it was previously configured."); } this.httpClient = client; return this; } /** * Sets the HTTP pipeline to use for the service client. * * If {@code pipeline} is set, all other settings are ignored, aside from * {@link ConfigurationClientBuilder * ConfigurationClient}. * * @param pipeline The HTTP pipeline to use for sending service requests and receiving responses. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder pipeline(HttpPipeline pipeline) { if (this.pipeline != null && pipeline == null) { logger.info("HttpPipeline is being set to 'null' when it was previously configured."); } this.pipeline = pipeline; return this; } /** * Sets the configuration store that is used during construction of the service client. * * The default configuration store is a clone of the {@link Configuration * configuration store}, use {@link Configuration * * @param configuration The configuration store used to * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the {@link HttpPipelinePolicy} that is used when each request is sent. * * The default retry policy will be used if not provided {@link ConfigurationClientBuilder * to build {@link ConfigurationAsyncClient} or {@link ConfigurationClient}. * @param retryPolicy user's retry policy applied to each request. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder retryPolicy(HttpPipelinePolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /** * Sets the {@link ConfigurationServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version the client library will have the result of potentially moving to a newer service version. * * @param version {@link ConfigurationServiceVersion} of the service to be used when making requests. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder serviceVersion(ConfigurationServiceVersion version) { this.version = version; return this; } private ConfigurationClientCredentials getConfigurationCredentials(Configuration configuration) { String connectionString = configuration.get("AZURE_APPCONFIG_CONNECTION_STRING"); if (CoreUtils.isNullOrEmpty(connectionString)) { return credential; } try { return new ConfigurationClientCredentials(connectionString); } catch (InvalidKeyException | NoSuchAlgorithmException ex) { return null; } } private String getBuildEndpoint(ConfigurationClientCredentials buildCredentials) { if (endpoint != null) { return endpoint; } else if (buildCredentials != null) { return buildCredentials.getBaseUri(); } else { return null; } } }
`logger` will never be `null`. Don't need the null check here.
private boolean canLogAtLevel(int logLevel, int environmentLoggingLevel) { if (logLevel < environmentLoggingLevel || logger == null) { return false; } switch (logLevel) { case VERBOSE_LEVEL: return logger.isDebugEnabled(); case INFORMATIONAL_LEVEL: return logger.isInfoEnabled(); case WARNING_LEVEL: return logger.isWarnEnabled(); case ERROR_LEVEL: return logger.isErrorEnabled(); default: return false; } }
if (logLevel < environmentLoggingLevel || logger == null) {
private boolean canLogAtLevel(int logLevel, int environmentLoggingLevel) { if (logLevel < environmentLoggingLevel) { return false; } switch (logLevel) { case VERBOSE_LEVEL: return logger.isDebugEnabled(); case INFORMATIONAL_LEVEL: return logger.isInfoEnabled(); case WARNING_LEVEL: return logger.isWarnEnabled(); case ERROR_LEVEL: return logger.isErrorEnabled(); default: return false; } }
class name using the {@link LoggerFactory}
class name using the {@link LoggerFactory}
This may have been done in the past for SpotBugs, will investigate.
private boolean canLogAtLevel(int logLevel, int environmentLoggingLevel) { if (logLevel < environmentLoggingLevel || logger == null) { return false; } switch (logLevel) { case VERBOSE_LEVEL: return logger.isDebugEnabled(); case INFORMATIONAL_LEVEL: return logger.isInfoEnabled(); case WARNING_LEVEL: return logger.isWarnEnabled(); case ERROR_LEVEL: return logger.isErrorEnabled(); default: return false; } }
if (logLevel < environmentLoggingLevel || logger == null) {
private boolean canLogAtLevel(int logLevel, int environmentLoggingLevel) { if (logLevel < environmentLoggingLevel) { return false; } switch (logLevel) { case VERBOSE_LEVEL: return logger.isDebugEnabled(); case INFORMATIONAL_LEVEL: return logger.isInfoEnabled(); case WARNING_LEVEL: return logger.isWarnEnabled(); case ERROR_LEVEL: return logger.isErrorEnabled(); default: return false; } }
class name using the {@link LoggerFactory}
class name using the {@link LoggerFactory}
I think the logic here could be simplified, right now this is going to perform the same check multiple times. ``` java if (!CoreUtils.isNullOrEmpty(applicationId)) { // Would an issue arise if the application ID is an empty string? Should we disallow that? if (applicationId.contains(" ")) { // throw error } else if (applicationId.length() > 24) { // throw error } else { this,applicationId = applicationId; } } return this; ```
public HttpLogOptions setApplicationId(final String applicationId) { if (applicationId != null && (applicationId.length() > MAX_APPLICATION_ID_LENGTH || applicationId.contains(" "))) { if (applicationId.contains(" ")) { throw logger .logExceptionAsError(new IllegalArgumentException("'applicationId' must not contain a space.")); } else { throw logger .logExceptionAsError(new IllegalArgumentException("'applicationId' length cannot be greater than " + MAX_APPLICATION_ID_LENGTH)); } } else { this.applicationId = applicationId; } return this; }
if (applicationId != null
public HttpLogOptions setApplicationId(final String applicationId) { if (!CoreUtils.isNullOrEmpty(applicationId)) { if (applicationId.length() > MAX_APPLICATION_ID_LENGTH) { throw logger .logExceptionAsError(new IllegalArgumentException("'applicationId' length cannot be greater than " + MAX_APPLICATION_ID_LENGTH)); } else if (applicationId.contains(" ")) { throw logger .logExceptionAsError(new IllegalArgumentException("'applicationId' must not contain a space.")); } else { this.applicationId = applicationId; } } return this; }
class HttpLogOptions { private String applicationId; private HttpLogDetailLevel logLevel; private Set<String> allowedHeaderNames; private Set<String> allowedQueryParamNames; private final ClientLogger logger = new ClientLogger(HttpLogOptions.class); private static final int MAX_APPLICATION_ID_LENGTH = 24; private static final List<String> DEFAULT_HEADERS_WHITELIST = Arrays.asList( "x-ms-client-request-id", "x-ms-return-client-request-id", "traceparent", "Accept", "Cache-Control", "Connection", "Content-Length", "Content-Type", "Date", "ETag", "Expires", "If-Match", "If-Modified-Since", "If-None-Match", "If-Unmodified-Since", "Last-Modified", "Pragma", "Request-Id", "Retry-After", "Server", "Transfer-Encoding", "User-Agent" ); /** * Creates a new instance that does not log any information about HTTP requests or responses. */ public HttpLogOptions() { logLevel = HttpLogDetailLevel.NONE; allowedHeaderNames = new HashSet<>(DEFAULT_HEADERS_WHITELIST); allowedQueryParamNames = new HashSet<>(); applicationId = null; } /** * Gets the level of detail to log on HTTP messages. * * @return The {@link HttpLogDetailLevel}. */ public HttpLogDetailLevel getLogLevel() { return logLevel; } /** * Sets the level of detail to log on Http messages. * * <p>If logLevel is not provided, default value of {@link HttpLogDetailLevel * * @param logLevel The {@link HttpLogDetailLevel}. * @return The updated HttpLogOptions object. */ public HttpLogOptions setLogLevel(final HttpLogDetailLevel logLevel) { this.logLevel = logLevel == null ? HttpLogDetailLevel.NONE : logLevel; return this; } /** * Gets the application specific id. * * @return The application specific id. */ public String getApplicationId() { return applicationId; } /** * Sets the custom application specific id supplied by the user of the client library. * * @param applicationId The user specified application id. * @return The updated HttpLogOptions object. */ /** * Gets the whitelisted headers that should be logged. * * @return The list of whitelisted headers. */ public Set<String> getAllowedHeaderNames() { return allowedHeaderNames; } /** * Sets the given whitelisted headers that should be logged. * * <p> * This method sets the provided header names to be the whitelisted header names which will be logged for all HTTP * requests and responses, overwriting any previously configured headers, including the default set. Additionally, * users can use {@link HttpLogOptions * {@link HttpLogOptions * allowed header names. * </p> * * @param allowedHeaderNames The list of whitelisted header names from the user. * @return The updated HttpLogOptions object. */ public HttpLogOptions setAllowedHeaderNames(final Set<String> allowedHeaderNames) { this.allowedHeaderNames = allowedHeaderNames == null ? new HashSet<>() : allowedHeaderNames; return this; } /** * Sets the given whitelisted header to the default header set that should be logged. * * @param allowedHeaderName The whitelisted header name from the user. * @return The updated HttpLogOptions object. * @throws NullPointerException If {@code allowedHeaderName} is {@code null}. */ public HttpLogOptions addAllowedHeaderName(final String allowedHeaderName) { Objects.requireNonNull(allowedHeaderName); this.allowedHeaderNames.add(allowedHeaderName); return this; } /** * Gets the whitelisted query parameters. * * @return The list of whitelisted query parameters. */ public Set<String> getAllowedQueryParamNames() { return allowedQueryParamNames; } /** * Sets the given whitelisted query params to be displayed in the logging info. * * @param allowedQueryParamNames The list of whitelisted query params from the user. * @return The updated HttpLogOptions object. */ public HttpLogOptions setAllowedQueryParamNames(final Set<String> allowedQueryParamNames) { this.allowedQueryParamNames = allowedQueryParamNames == null ? new HashSet<>() : allowedQueryParamNames; return this; } /** * Sets the given whitelisted query param that should be logged. * * @param allowedQueryParamName The whitelisted query param name from the user. * @return The updated HttpLogOptions object. * @throws NullPointerException If {@code allowedQueryParamName} is {@code null}. */ public HttpLogOptions addAllowedQueryParamName(final String allowedQueryParamName) { this.allowedQueryParamNames.add(allowedQueryParamName); return this; } }
class HttpLogOptions { private String applicationId; private HttpLogDetailLevel logLevel; private Set<String> allowedHeaderNames; private Set<String> allowedQueryParamNames; private final ClientLogger logger = new ClientLogger(HttpLogOptions.class); private static final int MAX_APPLICATION_ID_LENGTH = 24; private static final List<String> DEFAULT_HEADERS_WHITELIST = Arrays.asList( "x-ms-client-request-id", "x-ms-return-client-request-id", "traceparent", "Accept", "Cache-Control", "Connection", "Content-Length", "Content-Type", "Date", "ETag", "Expires", "If-Match", "If-Modified-Since", "If-None-Match", "If-Unmodified-Since", "Last-Modified", "Pragma", "Request-Id", "Retry-After", "Server", "Transfer-Encoding", "User-Agent" ); /** * Creates a new instance that does not log any information about HTTP requests or responses. */ public HttpLogOptions() { logLevel = HttpLogDetailLevel.NONE; allowedHeaderNames = new HashSet<>(DEFAULT_HEADERS_WHITELIST); allowedQueryParamNames = new HashSet<>(); applicationId = null; } /** * Gets the level of detail to log on HTTP messages. * * @return The {@link HttpLogDetailLevel}. */ public HttpLogDetailLevel getLogLevel() { return logLevel; } /** * Sets the level of detail to log on Http messages. * * <p>If logLevel is not provided, default value of {@link HttpLogDetailLevel * * @param logLevel The {@link HttpLogDetailLevel}. * @return The updated HttpLogOptions object. */ public HttpLogOptions setLogLevel(final HttpLogDetailLevel logLevel) { this.logLevel = logLevel == null ? HttpLogDetailLevel.NONE : logLevel; return this; } /** * Gets the whitelisted headers that should be logged. * * @return The list of whitelisted headers. */ public Set<String> getAllowedHeaderNames() { return allowedHeaderNames; } /** * Sets the given whitelisted headers that should be logged. * * <p> * This method sets the provided header names to be the whitelisted header names which will be logged for all HTTP * requests and responses, overwriting any previously configured headers, including the default set. Additionally, * users can use {@link HttpLogOptions * {@link HttpLogOptions * allowed header names. * </p> * * @param allowedHeaderNames The list of whitelisted header names from the user. * @return The updated HttpLogOptions object. */ public HttpLogOptions setAllowedHeaderNames(final Set<String> allowedHeaderNames) { this.allowedHeaderNames = allowedHeaderNames == null ? new HashSet<>() : allowedHeaderNames; return this; } /** * Sets the given whitelisted header to the default header set that should be logged. * * @param allowedHeaderName The whitelisted header name from the user. * @return The updated HttpLogOptions object. * @throws NullPointerException If {@code allowedHeaderName} is {@code null}. */ public HttpLogOptions addAllowedHeaderName(final String allowedHeaderName) { Objects.requireNonNull(allowedHeaderName); this.allowedHeaderNames.add(allowedHeaderName); return this; } /** * Gets the whitelisted query parameters. * * @return The list of whitelisted query parameters. */ public Set<String> getAllowedQueryParamNames() { return allowedQueryParamNames; } /** * Sets the given whitelisted query params to be displayed in the logging info. * * @param allowedQueryParamNames The list of whitelisted query params from the user. * @return The updated HttpLogOptions object. */ public HttpLogOptions setAllowedQueryParamNames(final Set<String> allowedQueryParamNames) { this.allowedQueryParamNames = allowedQueryParamNames == null ? new HashSet<>() : allowedQueryParamNames; return this; } /** * Sets the given whitelisted query param that should be logged. * * @param allowedQueryParamName The whitelisted query param name from the user. * @return The updated HttpLogOptions object. * @throws NullPointerException If {@code allowedQueryParamName} is {@code null}. */ public HttpLogOptions addAllowedQueryParamName(final String allowedQueryParamName) { this.allowedQueryParamNames.add(allowedQueryParamName); return this; } /** * Gets the application specific id. * * @return The application specific id. */ public String getApplicationId() { return applicationId; } /** * Sets the custom application specific id supplied by the user of the client library. * * @param applicationId The user specified application id. * @return The updated HttpLogOptions object. */ }
Could we validate the whole User-Agent string
public void customApplicationIdUserAgentTest() throws Exception { final HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(new NoOpHttpClient() { @Override public Mono<HttpResponse> send(HttpRequest request) { String header = request.getHeaders().getValue("User-Agent"); String expectedHeaderPrefix = "user_specified_appId"; Assertions.assertTrue(header.startsWith(expectedHeaderPrefix)); return Mono.just(new MockHttpResponse(request, 200)); } }) .policies(new UserAgentPolicy( "user_specified_appId", "package.name", "package_version", Configuration.NONE, () -> "1.0")) .build(); HttpResponse response = pipeline.send(new HttpRequest(HttpMethod.GET, new URL("http: Assertions.assertEquals(200, response.getStatusCode()); }
Assertions.assertTrue(header.startsWith(expectedHeaderPrefix));
public void customApplicationIdUserAgentTest() throws Exception { final String testSdkName = "sdk.name"; final String testAppId = "user_specified_appId"; final String testPackageVersion = "package_version"; String javaVersion = Configuration.getGlobalConfiguration().get("java.version"); String osName = Configuration.getGlobalConfiguration().get("os.name"); String osVersion = Configuration.getGlobalConfiguration().get("os.version"); String testPlatformInfo = javaVersion + "; " + osName + " " + osVersion; String expectedHeader = testAppId + " " + "azsdk-java-" + testSdkName + "/" + testPackageVersion + " " + testPlatformInfo; final HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(new NoOpHttpClient() { @Override public Mono<HttpResponse> send(HttpRequest request) { String header = request.getHeaders().getValue("User-Agent"); assertEquals(header, expectedHeader); return Mono.just(new MockHttpResponse(request, 200)); } }) .policies(new UserAgentPolicy( testAppId, testSdkName, testPackageVersion, Configuration.NONE, () -> "1.0")) .build(); Mono<HttpResponse> response = pipeline.send(new HttpRequest(HttpMethod.GET, new URL("http: StepVerifier.create(response) .expectNextMatches(httpResponse -> { assertEquals(200, httpResponse.getStatusCode()); assertEquals(expectedHeader, httpResponse.getRequest().getHeaders().getValue("User-Agent")); return true; }) .verifyComplete(); }
class UserAgentTests { @Test public void defaultUserAgentTests() throws Exception { final HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(new NoOpHttpClient() { @Override public Mono<HttpResponse> send(HttpRequest request) { Assertions.assertEquals( request.getHeaders().getValue("User-Agent"), "AutoRest-Java"); return Mono.just(new MockHttpResponse(request, 200)); } }) .policies(new UserAgentPolicy("AutoRest-Java")) .build(); HttpResponse response = pipeline.send(new HttpRequest( HttpMethod.GET, new URL("http: Assertions.assertEquals(200, response.getStatusCode()); } @Test public void customUserAgentTests() throws Exception { final HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(new NoOpHttpClient() { @Override public Mono<HttpResponse> send(HttpRequest request) { String header = request.getHeaders().getValue("User-Agent"); Assertions.assertEquals("Awesome", header); return Mono.just(new MockHttpResponse(request, 200)); } }) .policies(new UserAgentPolicy("Awesome")) .build(); HttpResponse response = pipeline.send(new HttpRequest(HttpMethod.GET, new URL("http: Assertions.assertEquals(200, response.getStatusCode()); } @Test public void defaultApplicationIdUserAgentTest() throws Exception { final HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(new NoOpHttpClient() { @Override public Mono<HttpResponse> send(HttpRequest request) { String header = request.getHeaders().getValue("User-Agent"); String expectedHeaderPrefix = "azsdk-java-package.name"; Assertions.assertTrue(header.startsWith(expectedHeaderPrefix)); return Mono.just(new MockHttpResponse(request, 200)); } }) .policies(new UserAgentPolicy( null, "package.name", "package_version", Configuration.NONE, () -> "1.0")) .build(); HttpResponse response = pipeline.send(new HttpRequest(HttpMethod.GET, new URL("http: Assertions.assertEquals(200, response.getStatusCode()); } @Test }
class UserAgentTests { @Test public void defaultUserAgentTests() throws Exception { final HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(new NoOpHttpClient() { @Override public Mono<HttpResponse> send(HttpRequest request) { assertEquals( request.getHeaders().getValue("User-Agent"), "AutoRest-Java"); return Mono.just(new MockHttpResponse(request, 200)); } }) .policies(new UserAgentPolicy("AutoRest-Java")) .build(); HttpResponse response = pipeline.send(new HttpRequest( HttpMethod.GET, new URL("http: assertEquals(200, response.getStatusCode()); } @Test public void customUserAgentTests() throws Exception { final HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(new NoOpHttpClient() { @Override public Mono<HttpResponse> send(HttpRequest request) { String header = request.getHeaders().getValue("User-Agent"); assertEquals("Awesome", header); return Mono.just(new MockHttpResponse(request, 200)); } }) .policies(new UserAgentPolicy("Awesome")) .build(); HttpResponse response = pipeline.send(new HttpRequest(HttpMethod.GET, new URL("http: assertEquals(200, response.getStatusCode()); } @Test public void defaultApplicationIdUserAgentTest() throws Exception { final HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(new NoOpHttpClient() { @Override public Mono<HttpResponse> send(HttpRequest request) { String header = request.getHeaders().getValue("User-Agent"); String expectedHeaderPrefix = "azsdk-java-package.name"; Assertions.assertTrue(header.startsWith(expectedHeaderPrefix)); return Mono.just(new MockHttpResponse(request, 200)); } }) .policies(new UserAgentPolicy( null, "package.name", "package_version", Configuration.NONE, () -> "1.0")) .build(); Mono<HttpResponse> response = pipeline.send(new HttpRequest(HttpMethod.GET, new URL("http: StepVerifier.create(response) .expectNextMatches(httpResponse -> { assertEquals(200, httpResponse.getStatusCode()); assertTrue(httpResponse.getRequest().getHeaders().getValue("User-Agent").startsWith("azsdk")); return true; }) .verifyComplete(); } @Test }
Simplified the logic. Currently, we are allowing empty application Id. @JonathanGiles do we want to disallow empty application Id?
public HttpLogOptions setApplicationId(final String applicationId) { if (applicationId != null && (applicationId.length() > MAX_APPLICATION_ID_LENGTH || applicationId.contains(" "))) { if (applicationId.contains(" ")) { throw logger .logExceptionAsError(new IllegalArgumentException("'applicationId' must not contain a space.")); } else { throw logger .logExceptionAsError(new IllegalArgumentException("'applicationId' length cannot be greater than " + MAX_APPLICATION_ID_LENGTH)); } } else { this.applicationId = applicationId; } return this; }
if (applicationId != null
public HttpLogOptions setApplicationId(final String applicationId) { if (!CoreUtils.isNullOrEmpty(applicationId)) { if (applicationId.length() > MAX_APPLICATION_ID_LENGTH) { throw logger .logExceptionAsError(new IllegalArgumentException("'applicationId' length cannot be greater than " + MAX_APPLICATION_ID_LENGTH)); } else if (applicationId.contains(" ")) { throw logger .logExceptionAsError(new IllegalArgumentException("'applicationId' must not contain a space.")); } else { this.applicationId = applicationId; } } return this; }
class HttpLogOptions { private String applicationId; private HttpLogDetailLevel logLevel; private Set<String> allowedHeaderNames; private Set<String> allowedQueryParamNames; private final ClientLogger logger = new ClientLogger(HttpLogOptions.class); private static final int MAX_APPLICATION_ID_LENGTH = 24; private static final List<String> DEFAULT_HEADERS_WHITELIST = Arrays.asList( "x-ms-client-request-id", "x-ms-return-client-request-id", "traceparent", "Accept", "Cache-Control", "Connection", "Content-Length", "Content-Type", "Date", "ETag", "Expires", "If-Match", "If-Modified-Since", "If-None-Match", "If-Unmodified-Since", "Last-Modified", "Pragma", "Request-Id", "Retry-After", "Server", "Transfer-Encoding", "User-Agent" ); /** * Creates a new instance that does not log any information about HTTP requests or responses. */ public HttpLogOptions() { logLevel = HttpLogDetailLevel.NONE; allowedHeaderNames = new HashSet<>(DEFAULT_HEADERS_WHITELIST); allowedQueryParamNames = new HashSet<>(); applicationId = null; } /** * Gets the level of detail to log on HTTP messages. * * @return The {@link HttpLogDetailLevel}. */ public HttpLogDetailLevel getLogLevel() { return logLevel; } /** * Sets the level of detail to log on Http messages. * * <p>If logLevel is not provided, default value of {@link HttpLogDetailLevel * * @param logLevel The {@link HttpLogDetailLevel}. * @return The updated HttpLogOptions object. */ public HttpLogOptions setLogLevel(final HttpLogDetailLevel logLevel) { this.logLevel = logLevel == null ? HttpLogDetailLevel.NONE : logLevel; return this; } /** * Gets the application specific id. * * @return The application specific id. */ public String getApplicationId() { return applicationId; } /** * Sets the custom application specific id supplied by the user of the client library. * * @param applicationId The user specified application id. * @return The updated HttpLogOptions object. */ /** * Gets the whitelisted headers that should be logged. * * @return The list of whitelisted headers. */ public Set<String> getAllowedHeaderNames() { return allowedHeaderNames; } /** * Sets the given whitelisted headers that should be logged. * * <p> * This method sets the provided header names to be the whitelisted header names which will be logged for all HTTP * requests and responses, overwriting any previously configured headers, including the default set. Additionally, * users can use {@link HttpLogOptions * {@link HttpLogOptions * allowed header names. * </p> * * @param allowedHeaderNames The list of whitelisted header names from the user. * @return The updated HttpLogOptions object. */ public HttpLogOptions setAllowedHeaderNames(final Set<String> allowedHeaderNames) { this.allowedHeaderNames = allowedHeaderNames == null ? new HashSet<>() : allowedHeaderNames; return this; } /** * Sets the given whitelisted header to the default header set that should be logged. * * @param allowedHeaderName The whitelisted header name from the user. * @return The updated HttpLogOptions object. * @throws NullPointerException If {@code allowedHeaderName} is {@code null}. */ public HttpLogOptions addAllowedHeaderName(final String allowedHeaderName) { Objects.requireNonNull(allowedHeaderName); this.allowedHeaderNames.add(allowedHeaderName); return this; } /** * Gets the whitelisted query parameters. * * @return The list of whitelisted query parameters. */ public Set<String> getAllowedQueryParamNames() { return allowedQueryParamNames; } /** * Sets the given whitelisted query params to be displayed in the logging info. * * @param allowedQueryParamNames The list of whitelisted query params from the user. * @return The updated HttpLogOptions object. */ public HttpLogOptions setAllowedQueryParamNames(final Set<String> allowedQueryParamNames) { this.allowedQueryParamNames = allowedQueryParamNames == null ? new HashSet<>() : allowedQueryParamNames; return this; } /** * Sets the given whitelisted query param that should be logged. * * @param allowedQueryParamName The whitelisted query param name from the user. * @return The updated HttpLogOptions object. * @throws NullPointerException If {@code allowedQueryParamName} is {@code null}. */ public HttpLogOptions addAllowedQueryParamName(final String allowedQueryParamName) { this.allowedQueryParamNames.add(allowedQueryParamName); return this; } }
class HttpLogOptions { private String applicationId; private HttpLogDetailLevel logLevel; private Set<String> allowedHeaderNames; private Set<String> allowedQueryParamNames; private final ClientLogger logger = new ClientLogger(HttpLogOptions.class); private static final int MAX_APPLICATION_ID_LENGTH = 24; private static final List<String> DEFAULT_HEADERS_WHITELIST = Arrays.asList( "x-ms-client-request-id", "x-ms-return-client-request-id", "traceparent", "Accept", "Cache-Control", "Connection", "Content-Length", "Content-Type", "Date", "ETag", "Expires", "If-Match", "If-Modified-Since", "If-None-Match", "If-Unmodified-Since", "Last-Modified", "Pragma", "Request-Id", "Retry-After", "Server", "Transfer-Encoding", "User-Agent" ); /** * Creates a new instance that does not log any information about HTTP requests or responses. */ public HttpLogOptions() { logLevel = HttpLogDetailLevel.NONE; allowedHeaderNames = new HashSet<>(DEFAULT_HEADERS_WHITELIST); allowedQueryParamNames = new HashSet<>(); applicationId = null; } /** * Gets the level of detail to log on HTTP messages. * * @return The {@link HttpLogDetailLevel}. */ public HttpLogDetailLevel getLogLevel() { return logLevel; } /** * Sets the level of detail to log on Http messages. * * <p>If logLevel is not provided, default value of {@link HttpLogDetailLevel * * @param logLevel The {@link HttpLogDetailLevel}. * @return The updated HttpLogOptions object. */ public HttpLogOptions setLogLevel(final HttpLogDetailLevel logLevel) { this.logLevel = logLevel == null ? HttpLogDetailLevel.NONE : logLevel; return this; } /** * Gets the whitelisted headers that should be logged. * * @return The list of whitelisted headers. */ public Set<String> getAllowedHeaderNames() { return allowedHeaderNames; } /** * Sets the given whitelisted headers that should be logged. * * <p> * This method sets the provided header names to be the whitelisted header names which will be logged for all HTTP * requests and responses, overwriting any previously configured headers, including the default set. Additionally, * users can use {@link HttpLogOptions * {@link HttpLogOptions * allowed header names. * </p> * * @param allowedHeaderNames The list of whitelisted header names from the user. * @return The updated HttpLogOptions object. */ public HttpLogOptions setAllowedHeaderNames(final Set<String> allowedHeaderNames) { this.allowedHeaderNames = allowedHeaderNames == null ? new HashSet<>() : allowedHeaderNames; return this; } /** * Sets the given whitelisted header to the default header set that should be logged. * * @param allowedHeaderName The whitelisted header name from the user. * @return The updated HttpLogOptions object. * @throws NullPointerException If {@code allowedHeaderName} is {@code null}. */ public HttpLogOptions addAllowedHeaderName(final String allowedHeaderName) { Objects.requireNonNull(allowedHeaderName); this.allowedHeaderNames.add(allowedHeaderName); return this; } /** * Gets the whitelisted query parameters. * * @return The list of whitelisted query parameters. */ public Set<String> getAllowedQueryParamNames() { return allowedQueryParamNames; } /** * Sets the given whitelisted query params to be displayed in the logging info. * * @param allowedQueryParamNames The list of whitelisted query params from the user. * @return The updated HttpLogOptions object. */ public HttpLogOptions setAllowedQueryParamNames(final Set<String> allowedQueryParamNames) { this.allowedQueryParamNames = allowedQueryParamNames == null ? new HashSet<>() : allowedQueryParamNames; return this; } /** * Sets the given whitelisted query param that should be logged. * * @param allowedQueryParamName The whitelisted query param name from the user. * @return The updated HttpLogOptions object. * @throws NullPointerException If {@code allowedQueryParamName} is {@code null}. */ public HttpLogOptions addAllowedQueryParamName(final String allowedQueryParamName) { this.allowedQueryParamNames.add(allowedQueryParamName); return this; } /** * Gets the application specific id. * * @return The application specific id. */ public String getApplicationId() { return applicationId; } /** * Sets the custom application specific id supplied by the user of the client library. * * @param applicationId The user specified application id. * @return The updated HttpLogOptions object. */ }
added
public void customApplicationIdUserAgentTest() throws Exception { final HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(new NoOpHttpClient() { @Override public Mono<HttpResponse> send(HttpRequest request) { String header = request.getHeaders().getValue("User-Agent"); String expectedHeaderPrefix = "user_specified_appId"; Assertions.assertTrue(header.startsWith(expectedHeaderPrefix)); return Mono.just(new MockHttpResponse(request, 200)); } }) .policies(new UserAgentPolicy( "user_specified_appId", "package.name", "package_version", Configuration.NONE, () -> "1.0")) .build(); HttpResponse response = pipeline.send(new HttpRequest(HttpMethod.GET, new URL("http: Assertions.assertEquals(200, response.getStatusCode()); }
Assertions.assertTrue(header.startsWith(expectedHeaderPrefix));
public void customApplicationIdUserAgentTest() throws Exception { final String testSdkName = "sdk.name"; final String testAppId = "user_specified_appId"; final String testPackageVersion = "package_version"; String javaVersion = Configuration.getGlobalConfiguration().get("java.version"); String osName = Configuration.getGlobalConfiguration().get("os.name"); String osVersion = Configuration.getGlobalConfiguration().get("os.version"); String testPlatformInfo = javaVersion + "; " + osName + " " + osVersion; String expectedHeader = testAppId + " " + "azsdk-java-" + testSdkName + "/" + testPackageVersion + " " + testPlatformInfo; final HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(new NoOpHttpClient() { @Override public Mono<HttpResponse> send(HttpRequest request) { String header = request.getHeaders().getValue("User-Agent"); assertEquals(header, expectedHeader); return Mono.just(new MockHttpResponse(request, 200)); } }) .policies(new UserAgentPolicy( testAppId, testSdkName, testPackageVersion, Configuration.NONE, () -> "1.0")) .build(); Mono<HttpResponse> response = pipeline.send(new HttpRequest(HttpMethod.GET, new URL("http: StepVerifier.create(response) .expectNextMatches(httpResponse -> { assertEquals(200, httpResponse.getStatusCode()); assertEquals(expectedHeader, httpResponse.getRequest().getHeaders().getValue("User-Agent")); return true; }) .verifyComplete(); }
class UserAgentTests { @Test public void defaultUserAgentTests() throws Exception { final HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(new NoOpHttpClient() { @Override public Mono<HttpResponse> send(HttpRequest request) { Assertions.assertEquals( request.getHeaders().getValue("User-Agent"), "AutoRest-Java"); return Mono.just(new MockHttpResponse(request, 200)); } }) .policies(new UserAgentPolicy("AutoRest-Java")) .build(); HttpResponse response = pipeline.send(new HttpRequest( HttpMethod.GET, new URL("http: Assertions.assertEquals(200, response.getStatusCode()); } @Test public void customUserAgentTests() throws Exception { final HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(new NoOpHttpClient() { @Override public Mono<HttpResponse> send(HttpRequest request) { String header = request.getHeaders().getValue("User-Agent"); Assertions.assertEquals("Awesome", header); return Mono.just(new MockHttpResponse(request, 200)); } }) .policies(new UserAgentPolicy("Awesome")) .build(); HttpResponse response = pipeline.send(new HttpRequest(HttpMethod.GET, new URL("http: Assertions.assertEquals(200, response.getStatusCode()); } @Test public void defaultApplicationIdUserAgentTest() throws Exception { final HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(new NoOpHttpClient() { @Override public Mono<HttpResponse> send(HttpRequest request) { String header = request.getHeaders().getValue("User-Agent"); String expectedHeaderPrefix = "azsdk-java-package.name"; Assertions.assertTrue(header.startsWith(expectedHeaderPrefix)); return Mono.just(new MockHttpResponse(request, 200)); } }) .policies(new UserAgentPolicy( null, "package.name", "package_version", Configuration.NONE, () -> "1.0")) .build(); HttpResponse response = pipeline.send(new HttpRequest(HttpMethod.GET, new URL("http: Assertions.assertEquals(200, response.getStatusCode()); } @Test }
class UserAgentTests { @Test public void defaultUserAgentTests() throws Exception { final HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(new NoOpHttpClient() { @Override public Mono<HttpResponse> send(HttpRequest request) { assertEquals( request.getHeaders().getValue("User-Agent"), "AutoRest-Java"); return Mono.just(new MockHttpResponse(request, 200)); } }) .policies(new UserAgentPolicy("AutoRest-Java")) .build(); HttpResponse response = pipeline.send(new HttpRequest( HttpMethod.GET, new URL("http: assertEquals(200, response.getStatusCode()); } @Test public void customUserAgentTests() throws Exception { final HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(new NoOpHttpClient() { @Override public Mono<HttpResponse> send(HttpRequest request) { String header = request.getHeaders().getValue("User-Agent"); assertEquals("Awesome", header); return Mono.just(new MockHttpResponse(request, 200)); } }) .policies(new UserAgentPolicy("Awesome")) .build(); HttpResponse response = pipeline.send(new HttpRequest(HttpMethod.GET, new URL("http: assertEquals(200, response.getStatusCode()); } @Test public void defaultApplicationIdUserAgentTest() throws Exception { final HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(new NoOpHttpClient() { @Override public Mono<HttpResponse> send(HttpRequest request) { String header = request.getHeaders().getValue("User-Agent"); String expectedHeaderPrefix = "azsdk-java-package.name"; Assertions.assertTrue(header.startsWith(expectedHeaderPrefix)); return Mono.just(new MockHttpResponse(request, 200)); } }) .policies(new UserAgentPolicy( null, "package.name", "package_version", Configuration.NONE, () -> "1.0")) .build(); Mono<HttpResponse> response = pipeline.send(new HttpRequest(HttpMethod.GET, new URL("http: StepVerifier.create(response) .expectNextMatches(httpResponse -> { assertEquals(200, httpResponse.getStatusCode()); assertTrue(httpResponse.getRequest().getHeaders().getValue("User-Agent").startsWith("azsdk")); return true; }) .verifyComplete(); } @Test }
Use StepVerifier rather than .block()
public void defaultApplicationIdUserAgentTest() throws Exception { final HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(new NoOpHttpClient() { @Override public Mono<HttpResponse> send(HttpRequest request) { String header = request.getHeaders().getValue("User-Agent"); String expectedHeaderPrefix = "azsdk-java-package.name"; Assertions.assertTrue(header.startsWith(expectedHeaderPrefix)); return Mono.just(new MockHttpResponse(request, 200)); } }) .policies(new UserAgentPolicy( null, "package.name", "package_version", Configuration.NONE, () -> "1.0")) .build(); HttpResponse response = pipeline.send(new HttpRequest(HttpMethod.GET, new URL("http: Assertions.assertEquals(200, response.getStatusCode()); }
HttpResponse response = pipeline.send(new HttpRequest(HttpMethod.GET,
public void defaultApplicationIdUserAgentTest() throws Exception { final HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(new NoOpHttpClient() { @Override public Mono<HttpResponse> send(HttpRequest request) { String header = request.getHeaders().getValue("User-Agent"); String expectedHeaderPrefix = "azsdk-java-package.name"; Assertions.assertTrue(header.startsWith(expectedHeaderPrefix)); return Mono.just(new MockHttpResponse(request, 200)); } }) .policies(new UserAgentPolicy( null, "package.name", "package_version", Configuration.NONE, () -> "1.0")) .build(); Mono<HttpResponse> response = pipeline.send(new HttpRequest(HttpMethod.GET, new URL("http: StepVerifier.create(response) .expectNextMatches(httpResponse -> { assertEquals(200, httpResponse.getStatusCode()); assertTrue(httpResponse.getRequest().getHeaders().getValue("User-Agent").startsWith("azsdk")); return true; }) .verifyComplete(); }
class UserAgentTests { @Test public void defaultUserAgentTests() throws Exception { final HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(new NoOpHttpClient() { @Override public Mono<HttpResponse> send(HttpRequest request) { Assertions.assertEquals( request.getHeaders().getValue("User-Agent"), "AutoRest-Java"); return Mono.just(new MockHttpResponse(request, 200)); } }) .policies(new UserAgentPolicy("AutoRest-Java")) .build(); HttpResponse response = pipeline.send(new HttpRequest( HttpMethod.GET, new URL("http: Assertions.assertEquals(200, response.getStatusCode()); } @Test public void customUserAgentTests() throws Exception { final HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(new NoOpHttpClient() { @Override public Mono<HttpResponse> send(HttpRequest request) { String header = request.getHeaders().getValue("User-Agent"); Assertions.assertEquals("Awesome", header); return Mono.just(new MockHttpResponse(request, 200)); } }) .policies(new UserAgentPolicy("Awesome")) .build(); HttpResponse response = pipeline.send(new HttpRequest(HttpMethod.GET, new URL("http: Assertions.assertEquals(200, response.getStatusCode()); } @Test @Test public void customApplicationIdUserAgentTest() throws Exception { final String testSdkName = "sdk.name"; final String testAppId = "user_specified_appId"; final String testPackageVersion = "package_version"; String javaVersion = Configuration.getGlobalConfiguration().get("java.version"); String osName = Configuration.getGlobalConfiguration().get("os.name"); String osVersion = Configuration.getGlobalConfiguration().get("os.version"); String testPlatformInfo = javaVersion + "; " + osName + " " + osVersion; String expectedHeader = testAppId + " " + "azsdk-java-" + testSdkName + "/" + testPackageVersion + " " + testPlatformInfo; final HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(new NoOpHttpClient() { @Override public Mono<HttpResponse> send(HttpRequest request) { String header = request.getHeaders().getValue("User-Agent"); Assertions.assertEquals(header, expectedHeader); return Mono.just(new MockHttpResponse(request, 200)); } }) .policies(new UserAgentPolicy( testAppId, testSdkName, testPackageVersion, Configuration.NONE, () -> "1.0")) .build(); HttpResponse response = pipeline.send(new HttpRequest(HttpMethod.GET, new URL("http: Assertions.assertEquals(200, response.getStatusCode()); } }
class UserAgentTests { @Test public void defaultUserAgentTests() throws Exception { final HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(new NoOpHttpClient() { @Override public Mono<HttpResponse> send(HttpRequest request) { assertEquals( request.getHeaders().getValue("User-Agent"), "AutoRest-Java"); return Mono.just(new MockHttpResponse(request, 200)); } }) .policies(new UserAgentPolicy("AutoRest-Java")) .build(); HttpResponse response = pipeline.send(new HttpRequest( HttpMethod.GET, new URL("http: assertEquals(200, response.getStatusCode()); } @Test public void customUserAgentTests() throws Exception { final HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(new NoOpHttpClient() { @Override public Mono<HttpResponse> send(HttpRequest request) { String header = request.getHeaders().getValue("User-Agent"); assertEquals("Awesome", header); return Mono.just(new MockHttpResponse(request, 200)); } }) .policies(new UserAgentPolicy("Awesome")) .build(); HttpResponse response = pipeline.send(new HttpRequest(HttpMethod.GET, new URL("http: assertEquals(200, response.getStatusCode()); } @Test @Test public void customApplicationIdUserAgentTest() throws Exception { final String testSdkName = "sdk.name"; final String testAppId = "user_specified_appId"; final String testPackageVersion = "package_version"; String javaVersion = Configuration.getGlobalConfiguration().get("java.version"); String osName = Configuration.getGlobalConfiguration().get("os.name"); String osVersion = Configuration.getGlobalConfiguration().get("os.version"); String testPlatformInfo = javaVersion + "; " + osName + " " + osVersion; String expectedHeader = testAppId + " " + "azsdk-java-" + testSdkName + "/" + testPackageVersion + " " + testPlatformInfo; final HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(new NoOpHttpClient() { @Override public Mono<HttpResponse> send(HttpRequest request) { String header = request.getHeaders().getValue("User-Agent"); assertEquals(header, expectedHeader); return Mono.just(new MockHttpResponse(request, 200)); } }) .policies(new UserAgentPolicy( testAppId, testSdkName, testPackageVersion, Configuration.NONE, () -> "1.0")) .build(); Mono<HttpResponse> response = pipeline.send(new HttpRequest(HttpMethod.GET, new URL("http: StepVerifier.create(response) .expectNextMatches(httpResponse -> { assertEquals(200, httpResponse.getStatusCode()); assertEquals(expectedHeader, httpResponse.getRequest().getHeaders().getValue("User-Agent")); return true; }) .verifyComplete(); } }
Use step verifier.
public void customApplicationIdUserAgentTest() throws Exception { final String testSdkName = "sdk.name"; final String testAppId = "user_specified_appId"; final String testPackageVersion = "package_version"; String javaVersion = Configuration.getGlobalConfiguration().get("java.version"); String osName = Configuration.getGlobalConfiguration().get("os.name"); String osVersion = Configuration.getGlobalConfiguration().get("os.version"); String testPlatformInfo = javaVersion + "; " + osName + " " + osVersion; String expectedHeader = testAppId + " " + "azsdk-java-" + testSdkName + "/" + testPackageVersion + " " + testPlatformInfo; final HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(new NoOpHttpClient() { @Override public Mono<HttpResponse> send(HttpRequest request) { String header = request.getHeaders().getValue("User-Agent"); Assertions.assertEquals(header, expectedHeader); return Mono.just(new MockHttpResponse(request, 200)); } }) .policies(new UserAgentPolicy( testAppId, testSdkName, testPackageVersion, Configuration.NONE, () -> "1.0")) .build(); HttpResponse response = pipeline.send(new HttpRequest(HttpMethod.GET, new URL("http: Assertions.assertEquals(200, response.getStatusCode()); }
HttpResponse response = pipeline.send(new HttpRequest(HttpMethod.GET,
public void customApplicationIdUserAgentTest() throws Exception { final String testSdkName = "sdk.name"; final String testAppId = "user_specified_appId"; final String testPackageVersion = "package_version"; String javaVersion = Configuration.getGlobalConfiguration().get("java.version"); String osName = Configuration.getGlobalConfiguration().get("os.name"); String osVersion = Configuration.getGlobalConfiguration().get("os.version"); String testPlatformInfo = javaVersion + "; " + osName + " " + osVersion; String expectedHeader = testAppId + " " + "azsdk-java-" + testSdkName + "/" + testPackageVersion + " " + testPlatformInfo; final HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(new NoOpHttpClient() { @Override public Mono<HttpResponse> send(HttpRequest request) { String header = request.getHeaders().getValue("User-Agent"); assertEquals(header, expectedHeader); return Mono.just(new MockHttpResponse(request, 200)); } }) .policies(new UserAgentPolicy( testAppId, testSdkName, testPackageVersion, Configuration.NONE, () -> "1.0")) .build(); Mono<HttpResponse> response = pipeline.send(new HttpRequest(HttpMethod.GET, new URL("http: StepVerifier.create(response) .expectNextMatches(httpResponse -> { assertEquals(200, httpResponse.getStatusCode()); assertEquals(expectedHeader, httpResponse.getRequest().getHeaders().getValue("User-Agent")); return true; }) .verifyComplete(); }
class UserAgentTests { @Test public void defaultUserAgentTests() throws Exception { final HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(new NoOpHttpClient() { @Override public Mono<HttpResponse> send(HttpRequest request) { Assertions.assertEquals( request.getHeaders().getValue("User-Agent"), "AutoRest-Java"); return Mono.just(new MockHttpResponse(request, 200)); } }) .policies(new UserAgentPolicy("AutoRest-Java")) .build(); HttpResponse response = pipeline.send(new HttpRequest( HttpMethod.GET, new URL("http: Assertions.assertEquals(200, response.getStatusCode()); } @Test public void customUserAgentTests() throws Exception { final HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(new NoOpHttpClient() { @Override public Mono<HttpResponse> send(HttpRequest request) { String header = request.getHeaders().getValue("User-Agent"); Assertions.assertEquals("Awesome", header); return Mono.just(new MockHttpResponse(request, 200)); } }) .policies(new UserAgentPolicy("Awesome")) .build(); HttpResponse response = pipeline.send(new HttpRequest(HttpMethod.GET, new URL("http: Assertions.assertEquals(200, response.getStatusCode()); } @Test public void defaultApplicationIdUserAgentTest() throws Exception { final HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(new NoOpHttpClient() { @Override public Mono<HttpResponse> send(HttpRequest request) { String header = request.getHeaders().getValue("User-Agent"); String expectedHeaderPrefix = "azsdk-java-package.name"; Assertions.assertTrue(header.startsWith(expectedHeaderPrefix)); return Mono.just(new MockHttpResponse(request, 200)); } }) .policies(new UserAgentPolicy( null, "package.name", "package_version", Configuration.NONE, () -> "1.0")) .build(); HttpResponse response = pipeline.send(new HttpRequest(HttpMethod.GET, new URL("http: Assertions.assertEquals(200, response.getStatusCode()); } @Test }
class UserAgentTests { @Test public void defaultUserAgentTests() throws Exception { final HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(new NoOpHttpClient() { @Override public Mono<HttpResponse> send(HttpRequest request) { assertEquals( request.getHeaders().getValue("User-Agent"), "AutoRest-Java"); return Mono.just(new MockHttpResponse(request, 200)); } }) .policies(new UserAgentPolicy("AutoRest-Java")) .build(); HttpResponse response = pipeline.send(new HttpRequest( HttpMethod.GET, new URL("http: assertEquals(200, response.getStatusCode()); } @Test public void customUserAgentTests() throws Exception { final HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(new NoOpHttpClient() { @Override public Mono<HttpResponse> send(HttpRequest request) { String header = request.getHeaders().getValue("User-Agent"); assertEquals("Awesome", header); return Mono.just(new MockHttpResponse(request, 200)); } }) .policies(new UserAgentPolicy("Awesome")) .build(); HttpResponse response = pipeline.send(new HttpRequest(HttpMethod.GET, new URL("http: assertEquals(200, response.getStatusCode()); } @Test public void defaultApplicationIdUserAgentTest() throws Exception { final HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(new NoOpHttpClient() { @Override public Mono<HttpResponse> send(HttpRequest request) { String header = request.getHeaders().getValue("User-Agent"); String expectedHeaderPrefix = "azsdk-java-package.name"; Assertions.assertTrue(header.startsWith(expectedHeaderPrefix)); return Mono.just(new MockHttpResponse(request, 200)); } }) .policies(new UserAgentPolicy( null, "package.name", "package_version", Configuration.NONE, () -> "1.0")) .build(); Mono<HttpResponse> response = pipeline.send(new HttpRequest(HttpMethod.GET, new URL("http: StepVerifier.create(response) .expectNextMatches(httpResponse -> { assertEquals(200, httpResponse.getStatusCode()); assertTrue(httpResponse.getRequest().getHeaders().getValue("User-Agent").startsWith("azsdk")); return true; }) .verifyComplete(); } @Test }
I would also assert that the `response` contains the headers you expect to exist?
public void defaultApplicationIdUserAgentTest() throws Exception { final HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(new NoOpHttpClient() { @Override public Mono<HttpResponse> send(HttpRequest request) { String header = request.getHeaders().getValue("User-Agent"); String expectedHeaderPrefix = "azsdk-java-package.name"; Assertions.assertTrue(header.startsWith(expectedHeaderPrefix)); return Mono.just(new MockHttpResponse(request, 200)); } }) .policies(new UserAgentPolicy( null, "package.name", "package_version", Configuration.NONE, () -> "1.0")) .build(); HttpResponse response = pipeline.send(new HttpRequest(HttpMethod.GET, new URL("http: Assertions.assertEquals(200, response.getStatusCode()); }
Assertions.assertEquals(200, response.getStatusCode());
public void defaultApplicationIdUserAgentTest() throws Exception { final HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(new NoOpHttpClient() { @Override public Mono<HttpResponse> send(HttpRequest request) { String header = request.getHeaders().getValue("User-Agent"); String expectedHeaderPrefix = "azsdk-java-package.name"; Assertions.assertTrue(header.startsWith(expectedHeaderPrefix)); return Mono.just(new MockHttpResponse(request, 200)); } }) .policies(new UserAgentPolicy( null, "package.name", "package_version", Configuration.NONE, () -> "1.0")) .build(); Mono<HttpResponse> response = pipeline.send(new HttpRequest(HttpMethod.GET, new URL("http: StepVerifier.create(response) .expectNextMatches(httpResponse -> { assertEquals(200, httpResponse.getStatusCode()); assertTrue(httpResponse.getRequest().getHeaders().getValue("User-Agent").startsWith("azsdk")); return true; }) .verifyComplete(); }
class UserAgentTests { @Test public void defaultUserAgentTests() throws Exception { final HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(new NoOpHttpClient() { @Override public Mono<HttpResponse> send(HttpRequest request) { Assertions.assertEquals( request.getHeaders().getValue("User-Agent"), "AutoRest-Java"); return Mono.just(new MockHttpResponse(request, 200)); } }) .policies(new UserAgentPolicy("AutoRest-Java")) .build(); HttpResponse response = pipeline.send(new HttpRequest( HttpMethod.GET, new URL("http: Assertions.assertEquals(200, response.getStatusCode()); } @Test public void customUserAgentTests() throws Exception { final HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(new NoOpHttpClient() { @Override public Mono<HttpResponse> send(HttpRequest request) { String header = request.getHeaders().getValue("User-Agent"); Assertions.assertEquals("Awesome", header); return Mono.just(new MockHttpResponse(request, 200)); } }) .policies(new UserAgentPolicy("Awesome")) .build(); HttpResponse response = pipeline.send(new HttpRequest(HttpMethod.GET, new URL("http: Assertions.assertEquals(200, response.getStatusCode()); } @Test @Test public void customApplicationIdUserAgentTest() throws Exception { final String testSdkName = "sdk.name"; final String testAppId = "user_specified_appId"; final String testPackageVersion = "package_version"; String javaVersion = Configuration.getGlobalConfiguration().get("java.version"); String osName = Configuration.getGlobalConfiguration().get("os.name"); String osVersion = Configuration.getGlobalConfiguration().get("os.version"); String testPlatformInfo = javaVersion + "; " + osName + " " + osVersion; String expectedHeader = testAppId + " " + "azsdk-java-" + testSdkName + "/" + testPackageVersion + " " + testPlatformInfo; final HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(new NoOpHttpClient() { @Override public Mono<HttpResponse> send(HttpRequest request) { String header = request.getHeaders().getValue("User-Agent"); Assertions.assertEquals(header, expectedHeader); return Mono.just(new MockHttpResponse(request, 200)); } }) .policies(new UserAgentPolicy( testAppId, testSdkName, testPackageVersion, Configuration.NONE, () -> "1.0")) .build(); HttpResponse response = pipeline.send(new HttpRequest(HttpMethod.GET, new URL("http: Assertions.assertEquals(200, response.getStatusCode()); } }
class UserAgentTests { @Test public void defaultUserAgentTests() throws Exception { final HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(new NoOpHttpClient() { @Override public Mono<HttpResponse> send(HttpRequest request) { assertEquals( request.getHeaders().getValue("User-Agent"), "AutoRest-Java"); return Mono.just(new MockHttpResponse(request, 200)); } }) .policies(new UserAgentPolicy("AutoRest-Java")) .build(); HttpResponse response = pipeline.send(new HttpRequest( HttpMethod.GET, new URL("http: assertEquals(200, response.getStatusCode()); } @Test public void customUserAgentTests() throws Exception { final HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(new NoOpHttpClient() { @Override public Mono<HttpResponse> send(HttpRequest request) { String header = request.getHeaders().getValue("User-Agent"); assertEquals("Awesome", header); return Mono.just(new MockHttpResponse(request, 200)); } }) .policies(new UserAgentPolicy("Awesome")) .build(); HttpResponse response = pipeline.send(new HttpRequest(HttpMethod.GET, new URL("http: assertEquals(200, response.getStatusCode()); } @Test @Test public void customApplicationIdUserAgentTest() throws Exception { final String testSdkName = "sdk.name"; final String testAppId = "user_specified_appId"; final String testPackageVersion = "package_version"; String javaVersion = Configuration.getGlobalConfiguration().get("java.version"); String osName = Configuration.getGlobalConfiguration().get("os.name"); String osVersion = Configuration.getGlobalConfiguration().get("os.version"); String testPlatformInfo = javaVersion + "; " + osName + " " + osVersion; String expectedHeader = testAppId + " " + "azsdk-java-" + testSdkName + "/" + testPackageVersion + " " + testPlatformInfo; final HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(new NoOpHttpClient() { @Override public Mono<HttpResponse> send(HttpRequest request) { String header = request.getHeaders().getValue("User-Agent"); assertEquals(header, expectedHeader); return Mono.just(new MockHttpResponse(request, 200)); } }) .policies(new UserAgentPolicy( testAppId, testSdkName, testPackageVersion, Configuration.NONE, () -> "1.0")) .build(); Mono<HttpResponse> response = pipeline.send(new HttpRequest(HttpMethod.GET, new URL("http: StepVerifier.create(response) .expectNextMatches(httpResponse -> { assertEquals(200, httpResponse.getStatusCode()); assertEquals(expectedHeader, httpResponse.getRequest().getHeaders().getValue("User-Agent")); return true; }) .verifyComplete(); } }
Only this line should be in the `assertThrows(() -> {})`, to scope down where we expect the error to be.
public void throwsWhenIncorrectTypeInResponse() { assertThrows(AzureException.class, () -> { final Long eventHubName = 100L; final Date createdAtAsDate = new Date(1569275540L); final String[] partitionIds = new String[]{ "1", "foo", "bar", "baz" }; final Map<String, Object> values = new HashMap<>(); values.put(ManagementChannel.MANAGEMENT_ENTITY_NAME_KEY, eventHubName); values.put(ManagementChannel.MANAGEMENT_RESULT_CREATED_AT, createdAtAsDate); values.put(ManagementChannel.MANAGEMENT_RESULT_PARTITION_IDS, partitionIds); final AmqpValue amqpValue = new AmqpValue(values); final Message message = Proton.message(); message.setBody(amqpValue); serializer.deserialize(message, EventHubProperties.class); }); }
serializer.deserialize(message, EventHubProperties.class);
public void throwsWhenIncorrectTypeInResponse() { final Long eventHubName = 100L; final Date createdAtAsDate = new Date(1569275540L); final String[] partitionIds = new String[]{"1", "foo", "bar", "baz"}; final Map<String, Object> values = new HashMap<>(); values.put(ManagementChannel.MANAGEMENT_ENTITY_NAME_KEY, eventHubName); values.put(ManagementChannel.MANAGEMENT_RESULT_CREATED_AT, createdAtAsDate); values.put(ManagementChannel.MANAGEMENT_RESULT_PARTITION_IDS, partitionIds); final AmqpValue amqpValue = new AmqpValue(values); final Message message = Proton.message(); message.setBody(amqpValue); assertThrows(AzureException.class, () -> { serializer.deserialize(message, EventHubProperties.class); }); }
class EventHubMessageSerializerTest { private final EventHubMessageSerializer serializer = new EventHubMessageSerializer(); @Test public void deserializeMessageNotNull() { assertThrows(NullPointerException.class, () -> serializer.deserialize(null, EventData.class)); } @Test public void deserializeClassNotNull() { assertThrows(NullPointerException.class, () -> serializer.deserialize(Proton.message(), null)); } @Test public void serializeObjectNotNull() { assertThrows(NullPointerException.class, () -> serializer.serialize(null)); } /** * Verify that we cannot serialize something that is not of type EventData. */ @Test public void cannotSerializeObject() { String something = "oops"; assertThrows(IllegalArgumentException.class, () -> serializer.serialize(something)); } /** * Verify we can only deserialize supported classes. */ @Test public void cannotDeserializeObject() { final Message message = getMessage("hello-world".getBytes(UTF_8)); assertThrows(IllegalArgumentException.class, () -> serializer.deserialize(message, EventHubAsyncClient.class)); } /** * Verify that we can deserialize a proton-j message with all the correct contents to {@link EventData}. */ @Test public void deserializeEventData() { final String[] systemPropertyNames = new String[]{ PARTITION_KEY_ANNOTATION_NAME.getValue(), OFFSET_ANNOTATION_NAME.getValue(), ENQUEUED_TIME_UTC_ANNOTATION_NAME.getValue(), SEQUENCE_NUMBER_ANNOTATION_NAME.getValue(), }; final Message message = getMessage("hello-world".getBytes(UTF_8)); final EventData eventData = serializer.deserialize(message, EventData.class); Assertions.assertEquals(ENQUEUED_TIME, eventData.getEnqueuedTime()); Assertions.assertEquals(OFFSET, eventData.getOffset()); Assertions.assertEquals(PARTITION_KEY, eventData.getPartitionKey()); Assertions.assertEquals(SEQUENCE_NUMBER, eventData.getSequenceNumber()); Assertions.assertTrue(eventData.getSystemProperties().containsKey(OTHER_SYSTEM_PROPERTY)); final Object otherPropertyValue = eventData.getSystemProperties().get(OTHER_SYSTEM_PROPERTY); Assertions.assertTrue(otherPropertyValue instanceof Boolean); Assertions.assertTrue((Boolean) otherPropertyValue); Assertions.assertEquals(APPLICATION_PROPERTIES.size(), eventData.getProperties().size()); APPLICATION_PROPERTIES.forEach((key, value) -> { Assertions.assertTrue(eventData.getProperties().containsKey(key)); Assertions.assertEquals(value, eventData.getProperties().get(key)); }); for (String property : systemPropertyNames) { Assertions.assertFalse(eventData.getSystemProperties().containsKey(property), property + " should not be in system properties map."); } } /** * Verify we can deserialize a message to {@link PartitionProperties}. */ @Test public void deserializePartitionProperties() { final String eventHubName = "my-event-hub"; final String id = "partition-id-test"; final long beginningSequenceNumber = 1343L; final long lastEnqueuedSequenceNumber = 1500L; final String lastEnqueuedOffset = "102"; final Date lastEnqueuedTimeAsDate = new Date(1569275540L); final Instant lastEnqueuedTime = lastEnqueuedTimeAsDate.toInstant(); final boolean isEmpty = true; final Map<String, Object> values = new HashMap<>(); values.put(ManagementChannel.MANAGEMENT_ENTITY_NAME_KEY, eventHubName); values.put(ManagementChannel.MANAGEMENT_PARTITION_NAME_KEY, id); values.put(ManagementChannel.MANAGEMENT_RESULT_BEGIN_SEQUENCE_NUMBER, beginningSequenceNumber); values.put(ManagementChannel.MANAGEMENT_RESULT_LAST_ENQUEUED_SEQUENCE_NUMBER, lastEnqueuedSequenceNumber); values.put(ManagementChannel.MANAGEMENT_RESULT_LAST_ENQUEUED_OFFSET, lastEnqueuedOffset); values.put(ManagementChannel.MANAGEMENT_RESULT_LAST_ENQUEUED_TIME_UTC, lastEnqueuedTimeAsDate); values.put(ManagementChannel.MANAGEMENT_RESULT_PARTITION_IS_EMPTY, isEmpty); final AmqpValue amqpValue = new AmqpValue(values); final Message message = Proton.message(); message.setBody(amqpValue); final PartitionProperties partitionProperties = serializer.deserialize(message, PartitionProperties.class); Assertions.assertNotNull(partitionProperties); Assertions.assertEquals(eventHubName, partitionProperties.getEventHubName()); Assertions.assertEquals(id, partitionProperties.getId()); Assertions.assertEquals(beginningSequenceNumber, partitionProperties.getBeginningSequenceNumber()); Assertions.assertEquals(lastEnqueuedSequenceNumber, partitionProperties.getLastEnqueuedSequenceNumber()); Assertions.assertEquals(lastEnqueuedOffset, partitionProperties.getLastEnqueuedOffset()); Assertions.assertEquals(lastEnqueuedTime, partitionProperties.getLastEnqueuedTime()); Assertions.assertEquals(isEmpty, partitionProperties.isEmpty()); } /** * Verify we can deserialize a message to {@link EventHubProperties}. */ @Test public void deserializeEventHubProperties() { final String eventHubName = "my-event-hub"; final Date createdAtAsDate = new Date(1569275540L); final Instant createdAt = createdAtAsDate.toInstant(); final String[] partitionIds = new String[]{ "1", "foo", "bar", "baz" }; final Map<String, Object> values = new HashMap<>(); values.put(ManagementChannel.MANAGEMENT_ENTITY_NAME_KEY, eventHubName); values.put(ManagementChannel.MANAGEMENT_RESULT_CREATED_AT, createdAtAsDate); values.put(ManagementChannel.MANAGEMENT_RESULT_PARTITION_IDS, partitionIds); final AmqpValue amqpValue = new AmqpValue(values); final Message message = Proton.message(); message.setBody(amqpValue); final EventHubProperties properties = serializer.deserialize(message, EventHubProperties.class); Assertions.assertNotNull(properties); Assertions.assertEquals(eventHubName, properties.getName()); Assertions.assertEquals(createdAt, properties.getCreatedAt()); Assertions.assertArrayEquals(partitionIds, properties.getPartitionIds()); } /** * Verify that it throws if the value is not what we expect. In this case, eventHubName is not a string. */ @Test /** * Verify that it throws if the value in the map is null. */ @Test public void throwsWhenNullValueInResponse() { assertThrows(AzureException.class, () -> { final String eventHubName = "event-hub-name-test"; final Date createdAtAsDate = new Date(1569275540L); final Map<String, Object> values = new HashMap<>(); values.put(ManagementChannel.MANAGEMENT_ENTITY_NAME_KEY, eventHubName); values.put(ManagementChannel.MANAGEMENT_RESULT_CREATED_AT, createdAtAsDate); values.put(ManagementChannel.MANAGEMENT_RESULT_PARTITION_IDS, null); final AmqpValue amqpValue = new AmqpValue(values); final Message message = Proton.message(); message.setBody(amqpValue); serializer.deserialize(message, EventHubProperties.class); }); } }
class EventHubMessageSerializerTest { private final EventHubMessageSerializer serializer = new EventHubMessageSerializer(); @Test public void deserializeMessageNotNull() { assertThrows(NullPointerException.class, () -> serializer.deserialize(null, EventData.class)); } @Test public void deserializeClassNotNull() { assertThrows(NullPointerException.class, () -> serializer.deserialize(Proton.message(), null)); } @Test public void serializeObjectNotNull() { assertThrows(NullPointerException.class, () -> serializer.serialize(null)); } /** * Verify that we cannot serialize something that is not of type EventData. */ @Test public void cannotSerializeObject() { String something = "oops"; assertThrows(IllegalArgumentException.class, () -> serializer.serialize(something)); } /** * Verify we can only deserialize supported classes. */ @Test public void cannotDeserializeObject() { final Message message = getMessage("hello-world".getBytes(UTF_8)); assertThrows(IllegalArgumentException.class, () -> serializer.deserialize(message, EventHubAsyncClient.class)); } /** * Verify that we can deserialize a proton-j message with all the correct contents to {@link EventData}. */ @Test public void deserializeEventData() { final String[] systemPropertyNames = new String[]{ PARTITION_KEY_ANNOTATION_NAME.getValue(), OFFSET_ANNOTATION_NAME.getValue(), ENQUEUED_TIME_UTC_ANNOTATION_NAME.getValue(), SEQUENCE_NUMBER_ANNOTATION_NAME.getValue(), }; final Message message = getMessage("hello-world".getBytes(UTF_8)); final EventData eventData = serializer.deserialize(message, EventData.class); Assertions.assertEquals(ENQUEUED_TIME, eventData.getEnqueuedTime()); Assertions.assertEquals(OFFSET, eventData.getOffset()); Assertions.assertEquals(PARTITION_KEY, eventData.getPartitionKey()); Assertions.assertEquals(SEQUENCE_NUMBER, eventData.getSequenceNumber()); Assertions.assertTrue(eventData.getSystemProperties().containsKey(OTHER_SYSTEM_PROPERTY)); final Object otherPropertyValue = eventData.getSystemProperties().get(OTHER_SYSTEM_PROPERTY); Assertions.assertTrue(otherPropertyValue instanceof Boolean); Assertions.assertTrue((Boolean) otherPropertyValue); Assertions.assertEquals(APPLICATION_PROPERTIES.size(), eventData.getProperties().size()); APPLICATION_PROPERTIES.forEach((key, value) -> { Assertions.assertTrue(eventData.getProperties().containsKey(key)); Assertions.assertEquals(value, eventData.getProperties().get(key)); }); for (String property : systemPropertyNames) { Assertions.assertFalse(eventData.getSystemProperties().containsKey(property), property + " should not be in system properties map."); } } /** * Verify we can deserialize a message to {@link PartitionProperties}. */ @Test public void deserializePartitionProperties() { final String eventHubName = "my-event-hub"; final String id = "partition-id-test"; final long beginningSequenceNumber = 1343L; final long lastEnqueuedSequenceNumber = 1500L; final String lastEnqueuedOffset = "102"; final Date lastEnqueuedTimeAsDate = new Date(1569275540L); final Instant lastEnqueuedTime = lastEnqueuedTimeAsDate.toInstant(); final boolean isEmpty = true; final Map<String, Object> values = new HashMap<>(); values.put(ManagementChannel.MANAGEMENT_ENTITY_NAME_KEY, eventHubName); values.put(ManagementChannel.MANAGEMENT_PARTITION_NAME_KEY, id); values.put(ManagementChannel.MANAGEMENT_RESULT_BEGIN_SEQUENCE_NUMBER, beginningSequenceNumber); values.put(ManagementChannel.MANAGEMENT_RESULT_LAST_ENQUEUED_SEQUENCE_NUMBER, lastEnqueuedSequenceNumber); values.put(ManagementChannel.MANAGEMENT_RESULT_LAST_ENQUEUED_OFFSET, lastEnqueuedOffset); values.put(ManagementChannel.MANAGEMENT_RESULT_LAST_ENQUEUED_TIME_UTC, lastEnqueuedTimeAsDate); values.put(ManagementChannel.MANAGEMENT_RESULT_PARTITION_IS_EMPTY, isEmpty); final AmqpValue amqpValue = new AmqpValue(values); final Message message = Proton.message(); message.setBody(amqpValue); final PartitionProperties partitionProperties = serializer.deserialize(message, PartitionProperties.class); Assertions.assertNotNull(partitionProperties); Assertions.assertEquals(eventHubName, partitionProperties.getEventHubName()); Assertions.assertEquals(id, partitionProperties.getId()); Assertions.assertEquals(beginningSequenceNumber, partitionProperties.getBeginningSequenceNumber()); Assertions.assertEquals(lastEnqueuedSequenceNumber, partitionProperties.getLastEnqueuedSequenceNumber()); Assertions.assertEquals(lastEnqueuedOffset, partitionProperties.getLastEnqueuedOffset()); Assertions.assertEquals(lastEnqueuedTime, partitionProperties.getLastEnqueuedTime()); Assertions.assertEquals(isEmpty, partitionProperties.isEmpty()); } /** * Verify we can deserialize a message to {@link EventHubProperties}. */ @Test public void deserializeEventHubProperties() { final String eventHubName = "my-event-hub"; final Date createdAtAsDate = new Date(1569275540L); final Instant createdAt = createdAtAsDate.toInstant(); final String[] partitionIds = new String[]{ "1", "foo", "bar", "baz" }; final Map<String, Object> values = new HashMap<>(); values.put(ManagementChannel.MANAGEMENT_ENTITY_NAME_KEY, eventHubName); values.put(ManagementChannel.MANAGEMENT_RESULT_CREATED_AT, createdAtAsDate); values.put(ManagementChannel.MANAGEMENT_RESULT_PARTITION_IDS, partitionIds); final AmqpValue amqpValue = new AmqpValue(values); final Message message = Proton.message(); message.setBody(amqpValue); final EventHubProperties properties = serializer.deserialize(message, EventHubProperties.class); Assertions.assertNotNull(properties); Assertions.assertEquals(eventHubName, properties.getName()); Assertions.assertEquals(createdAt, properties.getCreatedAt()); Assertions.assertArrayEquals(partitionIds, properties.getPartitionIds()); } /** * Verify that it throws if the value is not what we expect. In this case, eventHubName is not a string. */ @Test /** * Verify that it throws if the value in the map is null. */ @Test public void throwsWhenNullValueInResponse() { final String eventHubName = "event-hub-name-test"; final Date createdAtAsDate = new Date(1569275540L); final Map<String, Object> values = new HashMap<>(); values.put(ManagementChannel.MANAGEMENT_ENTITY_NAME_KEY, eventHubName); values.put(ManagementChannel.MANAGEMENT_RESULT_CREATED_AT, createdAtAsDate); values.put(ManagementChannel.MANAGEMENT_RESULT_PARTITION_IDS, null); final AmqpValue amqpValue = new AmqpValue(values); final Message message = Proton.message(); message.setBody(amqpValue); assertThrows(AzureException.class, () -> { serializer.deserialize(message, EventHubProperties.class); }); } }
Only this line should be in the `assertThrows(() -> {})`, to scope down where we expect the error to be.
public void throwsWhenNullValueInResponse() { assertThrows(AzureException.class, () -> { final String eventHubName = "event-hub-name-test"; final Date createdAtAsDate = new Date(1569275540L); final Map<String, Object> values = new HashMap<>(); values.put(ManagementChannel.MANAGEMENT_ENTITY_NAME_KEY, eventHubName); values.put(ManagementChannel.MANAGEMENT_RESULT_CREATED_AT, createdAtAsDate); values.put(ManagementChannel.MANAGEMENT_RESULT_PARTITION_IDS, null); final AmqpValue amqpValue = new AmqpValue(values); final Message message = Proton.message(); message.setBody(amqpValue); serializer.deserialize(message, EventHubProperties.class); }); }
serializer.deserialize(message, EventHubProperties.class);
public void throwsWhenNullValueInResponse() { final String eventHubName = "event-hub-name-test"; final Date createdAtAsDate = new Date(1569275540L); final Map<String, Object> values = new HashMap<>(); values.put(ManagementChannel.MANAGEMENT_ENTITY_NAME_KEY, eventHubName); values.put(ManagementChannel.MANAGEMENT_RESULT_CREATED_AT, createdAtAsDate); values.put(ManagementChannel.MANAGEMENT_RESULT_PARTITION_IDS, null); final AmqpValue amqpValue = new AmqpValue(values); final Message message = Proton.message(); message.setBody(amqpValue); assertThrows(AzureException.class, () -> { serializer.deserialize(message, EventHubProperties.class); }); }
class EventHubMessageSerializerTest { private final EventHubMessageSerializer serializer = new EventHubMessageSerializer(); @Test public void deserializeMessageNotNull() { assertThrows(NullPointerException.class, () -> serializer.deserialize(null, EventData.class)); } @Test public void deserializeClassNotNull() { assertThrows(NullPointerException.class, () -> serializer.deserialize(Proton.message(), null)); } @Test public void serializeObjectNotNull() { assertThrows(NullPointerException.class, () -> serializer.serialize(null)); } /** * Verify that we cannot serialize something that is not of type EventData. */ @Test public void cannotSerializeObject() { String something = "oops"; assertThrows(IllegalArgumentException.class, () -> serializer.serialize(something)); } /** * Verify we can only deserialize supported classes. */ @Test public void cannotDeserializeObject() { final Message message = getMessage("hello-world".getBytes(UTF_8)); assertThrows(IllegalArgumentException.class, () -> serializer.deserialize(message, EventHubAsyncClient.class)); } /** * Verify that we can deserialize a proton-j message with all the correct contents to {@link EventData}. */ @Test public void deserializeEventData() { final String[] systemPropertyNames = new String[]{ PARTITION_KEY_ANNOTATION_NAME.getValue(), OFFSET_ANNOTATION_NAME.getValue(), ENQUEUED_TIME_UTC_ANNOTATION_NAME.getValue(), SEQUENCE_NUMBER_ANNOTATION_NAME.getValue(), }; final Message message = getMessage("hello-world".getBytes(UTF_8)); final EventData eventData = serializer.deserialize(message, EventData.class); Assertions.assertEquals(ENQUEUED_TIME, eventData.getEnqueuedTime()); Assertions.assertEquals(OFFSET, eventData.getOffset()); Assertions.assertEquals(PARTITION_KEY, eventData.getPartitionKey()); Assertions.assertEquals(SEQUENCE_NUMBER, eventData.getSequenceNumber()); Assertions.assertTrue(eventData.getSystemProperties().containsKey(OTHER_SYSTEM_PROPERTY)); final Object otherPropertyValue = eventData.getSystemProperties().get(OTHER_SYSTEM_PROPERTY); Assertions.assertTrue(otherPropertyValue instanceof Boolean); Assertions.assertTrue((Boolean) otherPropertyValue); Assertions.assertEquals(APPLICATION_PROPERTIES.size(), eventData.getProperties().size()); APPLICATION_PROPERTIES.forEach((key, value) -> { Assertions.assertTrue(eventData.getProperties().containsKey(key)); Assertions.assertEquals(value, eventData.getProperties().get(key)); }); for (String property : systemPropertyNames) { Assertions.assertFalse(eventData.getSystemProperties().containsKey(property), property + " should not be in system properties map."); } } /** * Verify we can deserialize a message to {@link PartitionProperties}. */ @Test public void deserializePartitionProperties() { final String eventHubName = "my-event-hub"; final String id = "partition-id-test"; final long beginningSequenceNumber = 1343L; final long lastEnqueuedSequenceNumber = 1500L; final String lastEnqueuedOffset = "102"; final Date lastEnqueuedTimeAsDate = new Date(1569275540L); final Instant lastEnqueuedTime = lastEnqueuedTimeAsDate.toInstant(); final boolean isEmpty = true; final Map<String, Object> values = new HashMap<>(); values.put(ManagementChannel.MANAGEMENT_ENTITY_NAME_KEY, eventHubName); values.put(ManagementChannel.MANAGEMENT_PARTITION_NAME_KEY, id); values.put(ManagementChannel.MANAGEMENT_RESULT_BEGIN_SEQUENCE_NUMBER, beginningSequenceNumber); values.put(ManagementChannel.MANAGEMENT_RESULT_LAST_ENQUEUED_SEQUENCE_NUMBER, lastEnqueuedSequenceNumber); values.put(ManagementChannel.MANAGEMENT_RESULT_LAST_ENQUEUED_OFFSET, lastEnqueuedOffset); values.put(ManagementChannel.MANAGEMENT_RESULT_LAST_ENQUEUED_TIME_UTC, lastEnqueuedTimeAsDate); values.put(ManagementChannel.MANAGEMENT_RESULT_PARTITION_IS_EMPTY, isEmpty); final AmqpValue amqpValue = new AmqpValue(values); final Message message = Proton.message(); message.setBody(amqpValue); final PartitionProperties partitionProperties = serializer.deserialize(message, PartitionProperties.class); Assertions.assertNotNull(partitionProperties); Assertions.assertEquals(eventHubName, partitionProperties.getEventHubName()); Assertions.assertEquals(id, partitionProperties.getId()); Assertions.assertEquals(beginningSequenceNumber, partitionProperties.getBeginningSequenceNumber()); Assertions.assertEquals(lastEnqueuedSequenceNumber, partitionProperties.getLastEnqueuedSequenceNumber()); Assertions.assertEquals(lastEnqueuedOffset, partitionProperties.getLastEnqueuedOffset()); Assertions.assertEquals(lastEnqueuedTime, partitionProperties.getLastEnqueuedTime()); Assertions.assertEquals(isEmpty, partitionProperties.isEmpty()); } /** * Verify we can deserialize a message to {@link EventHubProperties}. */ @Test public void deserializeEventHubProperties() { final String eventHubName = "my-event-hub"; final Date createdAtAsDate = new Date(1569275540L); final Instant createdAt = createdAtAsDate.toInstant(); final String[] partitionIds = new String[]{ "1", "foo", "bar", "baz" }; final Map<String, Object> values = new HashMap<>(); values.put(ManagementChannel.MANAGEMENT_ENTITY_NAME_KEY, eventHubName); values.put(ManagementChannel.MANAGEMENT_RESULT_CREATED_AT, createdAtAsDate); values.put(ManagementChannel.MANAGEMENT_RESULT_PARTITION_IDS, partitionIds); final AmqpValue amqpValue = new AmqpValue(values); final Message message = Proton.message(); message.setBody(amqpValue); final EventHubProperties properties = serializer.deserialize(message, EventHubProperties.class); Assertions.assertNotNull(properties); Assertions.assertEquals(eventHubName, properties.getName()); Assertions.assertEquals(createdAt, properties.getCreatedAt()); Assertions.assertArrayEquals(partitionIds, properties.getPartitionIds()); } /** * Verify that it throws if the value is not what we expect. In this case, eventHubName is not a string. */ @Test public void throwsWhenIncorrectTypeInResponse() { assertThrows(AzureException.class, () -> { final Long eventHubName = 100L; final Date createdAtAsDate = new Date(1569275540L); final String[] partitionIds = new String[]{ "1", "foo", "bar", "baz" }; final Map<String, Object> values = new HashMap<>(); values.put(ManagementChannel.MANAGEMENT_ENTITY_NAME_KEY, eventHubName); values.put(ManagementChannel.MANAGEMENT_RESULT_CREATED_AT, createdAtAsDate); values.put(ManagementChannel.MANAGEMENT_RESULT_PARTITION_IDS, partitionIds); final AmqpValue amqpValue = new AmqpValue(values); final Message message = Proton.message(); message.setBody(amqpValue); serializer.deserialize(message, EventHubProperties.class); }); } /** * Verify that it throws if the value in the map is null. */ @Test }
class EventHubMessageSerializerTest { private final EventHubMessageSerializer serializer = new EventHubMessageSerializer(); @Test public void deserializeMessageNotNull() { assertThrows(NullPointerException.class, () -> serializer.deserialize(null, EventData.class)); } @Test public void deserializeClassNotNull() { assertThrows(NullPointerException.class, () -> serializer.deserialize(Proton.message(), null)); } @Test public void serializeObjectNotNull() { assertThrows(NullPointerException.class, () -> serializer.serialize(null)); } /** * Verify that we cannot serialize something that is not of type EventData. */ @Test public void cannotSerializeObject() { String something = "oops"; assertThrows(IllegalArgumentException.class, () -> serializer.serialize(something)); } /** * Verify we can only deserialize supported classes. */ @Test public void cannotDeserializeObject() { final Message message = getMessage("hello-world".getBytes(UTF_8)); assertThrows(IllegalArgumentException.class, () -> serializer.deserialize(message, EventHubAsyncClient.class)); } /** * Verify that we can deserialize a proton-j message with all the correct contents to {@link EventData}. */ @Test public void deserializeEventData() { final String[] systemPropertyNames = new String[]{ PARTITION_KEY_ANNOTATION_NAME.getValue(), OFFSET_ANNOTATION_NAME.getValue(), ENQUEUED_TIME_UTC_ANNOTATION_NAME.getValue(), SEQUENCE_NUMBER_ANNOTATION_NAME.getValue(), }; final Message message = getMessage("hello-world".getBytes(UTF_8)); final EventData eventData = serializer.deserialize(message, EventData.class); Assertions.assertEquals(ENQUEUED_TIME, eventData.getEnqueuedTime()); Assertions.assertEquals(OFFSET, eventData.getOffset()); Assertions.assertEquals(PARTITION_KEY, eventData.getPartitionKey()); Assertions.assertEquals(SEQUENCE_NUMBER, eventData.getSequenceNumber()); Assertions.assertTrue(eventData.getSystemProperties().containsKey(OTHER_SYSTEM_PROPERTY)); final Object otherPropertyValue = eventData.getSystemProperties().get(OTHER_SYSTEM_PROPERTY); Assertions.assertTrue(otherPropertyValue instanceof Boolean); Assertions.assertTrue((Boolean) otherPropertyValue); Assertions.assertEquals(APPLICATION_PROPERTIES.size(), eventData.getProperties().size()); APPLICATION_PROPERTIES.forEach((key, value) -> { Assertions.assertTrue(eventData.getProperties().containsKey(key)); Assertions.assertEquals(value, eventData.getProperties().get(key)); }); for (String property : systemPropertyNames) { Assertions.assertFalse(eventData.getSystemProperties().containsKey(property), property + " should not be in system properties map."); } } /** * Verify we can deserialize a message to {@link PartitionProperties}. */ @Test public void deserializePartitionProperties() { final String eventHubName = "my-event-hub"; final String id = "partition-id-test"; final long beginningSequenceNumber = 1343L; final long lastEnqueuedSequenceNumber = 1500L; final String lastEnqueuedOffset = "102"; final Date lastEnqueuedTimeAsDate = new Date(1569275540L); final Instant lastEnqueuedTime = lastEnqueuedTimeAsDate.toInstant(); final boolean isEmpty = true; final Map<String, Object> values = new HashMap<>(); values.put(ManagementChannel.MANAGEMENT_ENTITY_NAME_KEY, eventHubName); values.put(ManagementChannel.MANAGEMENT_PARTITION_NAME_KEY, id); values.put(ManagementChannel.MANAGEMENT_RESULT_BEGIN_SEQUENCE_NUMBER, beginningSequenceNumber); values.put(ManagementChannel.MANAGEMENT_RESULT_LAST_ENQUEUED_SEQUENCE_NUMBER, lastEnqueuedSequenceNumber); values.put(ManagementChannel.MANAGEMENT_RESULT_LAST_ENQUEUED_OFFSET, lastEnqueuedOffset); values.put(ManagementChannel.MANAGEMENT_RESULT_LAST_ENQUEUED_TIME_UTC, lastEnqueuedTimeAsDate); values.put(ManagementChannel.MANAGEMENT_RESULT_PARTITION_IS_EMPTY, isEmpty); final AmqpValue amqpValue = new AmqpValue(values); final Message message = Proton.message(); message.setBody(amqpValue); final PartitionProperties partitionProperties = serializer.deserialize(message, PartitionProperties.class); Assertions.assertNotNull(partitionProperties); Assertions.assertEquals(eventHubName, partitionProperties.getEventHubName()); Assertions.assertEquals(id, partitionProperties.getId()); Assertions.assertEquals(beginningSequenceNumber, partitionProperties.getBeginningSequenceNumber()); Assertions.assertEquals(lastEnqueuedSequenceNumber, partitionProperties.getLastEnqueuedSequenceNumber()); Assertions.assertEquals(lastEnqueuedOffset, partitionProperties.getLastEnqueuedOffset()); Assertions.assertEquals(lastEnqueuedTime, partitionProperties.getLastEnqueuedTime()); Assertions.assertEquals(isEmpty, partitionProperties.isEmpty()); } /** * Verify we can deserialize a message to {@link EventHubProperties}. */ @Test public void deserializeEventHubProperties() { final String eventHubName = "my-event-hub"; final Date createdAtAsDate = new Date(1569275540L); final Instant createdAt = createdAtAsDate.toInstant(); final String[] partitionIds = new String[]{ "1", "foo", "bar", "baz" }; final Map<String, Object> values = new HashMap<>(); values.put(ManagementChannel.MANAGEMENT_ENTITY_NAME_KEY, eventHubName); values.put(ManagementChannel.MANAGEMENT_RESULT_CREATED_AT, createdAtAsDate); values.put(ManagementChannel.MANAGEMENT_RESULT_PARTITION_IDS, partitionIds); final AmqpValue amqpValue = new AmqpValue(values); final Message message = Proton.message(); message.setBody(amqpValue); final EventHubProperties properties = serializer.deserialize(message, EventHubProperties.class); Assertions.assertNotNull(properties); Assertions.assertEquals(eventHubName, properties.getName()); Assertions.assertEquals(createdAt, properties.getCreatedAt()); Assertions.assertArrayEquals(partitionIds, properties.getPartitionIds()); } /** * Verify that it throws if the value is not what we expect. In this case, eventHubName is not a string. */ @Test public void throwsWhenIncorrectTypeInResponse() { final Long eventHubName = 100L; final Date createdAtAsDate = new Date(1569275540L); final String[] partitionIds = new String[]{"1", "foo", "bar", "baz"}; final Map<String, Object> values = new HashMap<>(); values.put(ManagementChannel.MANAGEMENT_ENTITY_NAME_KEY, eventHubName); values.put(ManagementChannel.MANAGEMENT_RESULT_CREATED_AT, createdAtAsDate); values.put(ManagementChannel.MANAGEMENT_RESULT_PARTITION_IDS, partitionIds); final AmqpValue amqpValue = new AmqpValue(values); final Message message = Proton.message(); message.setBody(amqpValue); assertThrows(AzureException.class, () -> { serializer.deserialize(message, EventHubProperties.class); }); } /** * Verify that it throws if the value in the map is null. */ @Test }
thanks for the review. changes are incorporated
public void throwsWhenIncorrectTypeInResponse() { assertThrows(AzureException.class, () -> { final Long eventHubName = 100L; final Date createdAtAsDate = new Date(1569275540L); final String[] partitionIds = new String[]{ "1", "foo", "bar", "baz" }; final Map<String, Object> values = new HashMap<>(); values.put(ManagementChannel.MANAGEMENT_ENTITY_NAME_KEY, eventHubName); values.put(ManagementChannel.MANAGEMENT_RESULT_CREATED_AT, createdAtAsDate); values.put(ManagementChannel.MANAGEMENT_RESULT_PARTITION_IDS, partitionIds); final AmqpValue amqpValue = new AmqpValue(values); final Message message = Proton.message(); message.setBody(amqpValue); serializer.deserialize(message, EventHubProperties.class); }); }
serializer.deserialize(message, EventHubProperties.class);
public void throwsWhenIncorrectTypeInResponse() { final Long eventHubName = 100L; final Date createdAtAsDate = new Date(1569275540L); final String[] partitionIds = new String[]{"1", "foo", "bar", "baz"}; final Map<String, Object> values = new HashMap<>(); values.put(ManagementChannel.MANAGEMENT_ENTITY_NAME_KEY, eventHubName); values.put(ManagementChannel.MANAGEMENT_RESULT_CREATED_AT, createdAtAsDate); values.put(ManagementChannel.MANAGEMENT_RESULT_PARTITION_IDS, partitionIds); final AmqpValue amqpValue = new AmqpValue(values); final Message message = Proton.message(); message.setBody(amqpValue); assertThrows(AzureException.class, () -> { serializer.deserialize(message, EventHubProperties.class); }); }
class EventHubMessageSerializerTest { private final EventHubMessageSerializer serializer = new EventHubMessageSerializer(); @Test public void deserializeMessageNotNull() { assertThrows(NullPointerException.class, () -> serializer.deserialize(null, EventData.class)); } @Test public void deserializeClassNotNull() { assertThrows(NullPointerException.class, () -> serializer.deserialize(Proton.message(), null)); } @Test public void serializeObjectNotNull() { assertThrows(NullPointerException.class, () -> serializer.serialize(null)); } /** * Verify that we cannot serialize something that is not of type EventData. */ @Test public void cannotSerializeObject() { String something = "oops"; assertThrows(IllegalArgumentException.class, () -> serializer.serialize(something)); } /** * Verify we can only deserialize supported classes. */ @Test public void cannotDeserializeObject() { final Message message = getMessage("hello-world".getBytes(UTF_8)); assertThrows(IllegalArgumentException.class, () -> serializer.deserialize(message, EventHubAsyncClient.class)); } /** * Verify that we can deserialize a proton-j message with all the correct contents to {@link EventData}. */ @Test public void deserializeEventData() { final String[] systemPropertyNames = new String[]{ PARTITION_KEY_ANNOTATION_NAME.getValue(), OFFSET_ANNOTATION_NAME.getValue(), ENQUEUED_TIME_UTC_ANNOTATION_NAME.getValue(), SEQUENCE_NUMBER_ANNOTATION_NAME.getValue(), }; final Message message = getMessage("hello-world".getBytes(UTF_8)); final EventData eventData = serializer.deserialize(message, EventData.class); Assertions.assertEquals(ENQUEUED_TIME, eventData.getEnqueuedTime()); Assertions.assertEquals(OFFSET, eventData.getOffset()); Assertions.assertEquals(PARTITION_KEY, eventData.getPartitionKey()); Assertions.assertEquals(SEQUENCE_NUMBER, eventData.getSequenceNumber()); Assertions.assertTrue(eventData.getSystemProperties().containsKey(OTHER_SYSTEM_PROPERTY)); final Object otherPropertyValue = eventData.getSystemProperties().get(OTHER_SYSTEM_PROPERTY); Assertions.assertTrue(otherPropertyValue instanceof Boolean); Assertions.assertTrue((Boolean) otherPropertyValue); Assertions.assertEquals(APPLICATION_PROPERTIES.size(), eventData.getProperties().size()); APPLICATION_PROPERTIES.forEach((key, value) -> { Assertions.assertTrue(eventData.getProperties().containsKey(key)); Assertions.assertEquals(value, eventData.getProperties().get(key)); }); for (String property : systemPropertyNames) { Assertions.assertFalse(eventData.getSystemProperties().containsKey(property), property + " should not be in system properties map."); } } /** * Verify we can deserialize a message to {@link PartitionProperties}. */ @Test public void deserializePartitionProperties() { final String eventHubName = "my-event-hub"; final String id = "partition-id-test"; final long beginningSequenceNumber = 1343L; final long lastEnqueuedSequenceNumber = 1500L; final String lastEnqueuedOffset = "102"; final Date lastEnqueuedTimeAsDate = new Date(1569275540L); final Instant lastEnqueuedTime = lastEnqueuedTimeAsDate.toInstant(); final boolean isEmpty = true; final Map<String, Object> values = new HashMap<>(); values.put(ManagementChannel.MANAGEMENT_ENTITY_NAME_KEY, eventHubName); values.put(ManagementChannel.MANAGEMENT_PARTITION_NAME_KEY, id); values.put(ManagementChannel.MANAGEMENT_RESULT_BEGIN_SEQUENCE_NUMBER, beginningSequenceNumber); values.put(ManagementChannel.MANAGEMENT_RESULT_LAST_ENQUEUED_SEQUENCE_NUMBER, lastEnqueuedSequenceNumber); values.put(ManagementChannel.MANAGEMENT_RESULT_LAST_ENQUEUED_OFFSET, lastEnqueuedOffset); values.put(ManagementChannel.MANAGEMENT_RESULT_LAST_ENQUEUED_TIME_UTC, lastEnqueuedTimeAsDate); values.put(ManagementChannel.MANAGEMENT_RESULT_PARTITION_IS_EMPTY, isEmpty); final AmqpValue amqpValue = new AmqpValue(values); final Message message = Proton.message(); message.setBody(amqpValue); final PartitionProperties partitionProperties = serializer.deserialize(message, PartitionProperties.class); Assertions.assertNotNull(partitionProperties); Assertions.assertEquals(eventHubName, partitionProperties.getEventHubName()); Assertions.assertEquals(id, partitionProperties.getId()); Assertions.assertEquals(beginningSequenceNumber, partitionProperties.getBeginningSequenceNumber()); Assertions.assertEquals(lastEnqueuedSequenceNumber, partitionProperties.getLastEnqueuedSequenceNumber()); Assertions.assertEquals(lastEnqueuedOffset, partitionProperties.getLastEnqueuedOffset()); Assertions.assertEquals(lastEnqueuedTime, partitionProperties.getLastEnqueuedTime()); Assertions.assertEquals(isEmpty, partitionProperties.isEmpty()); } /** * Verify we can deserialize a message to {@link EventHubProperties}. */ @Test public void deserializeEventHubProperties() { final String eventHubName = "my-event-hub"; final Date createdAtAsDate = new Date(1569275540L); final Instant createdAt = createdAtAsDate.toInstant(); final String[] partitionIds = new String[]{ "1", "foo", "bar", "baz" }; final Map<String, Object> values = new HashMap<>(); values.put(ManagementChannel.MANAGEMENT_ENTITY_NAME_KEY, eventHubName); values.put(ManagementChannel.MANAGEMENT_RESULT_CREATED_AT, createdAtAsDate); values.put(ManagementChannel.MANAGEMENT_RESULT_PARTITION_IDS, partitionIds); final AmqpValue amqpValue = new AmqpValue(values); final Message message = Proton.message(); message.setBody(amqpValue); final EventHubProperties properties = serializer.deserialize(message, EventHubProperties.class); Assertions.assertNotNull(properties); Assertions.assertEquals(eventHubName, properties.getName()); Assertions.assertEquals(createdAt, properties.getCreatedAt()); Assertions.assertArrayEquals(partitionIds, properties.getPartitionIds()); } /** * Verify that it throws if the value is not what we expect. In this case, eventHubName is not a string. */ @Test /** * Verify that it throws if the value in the map is null. */ @Test public void throwsWhenNullValueInResponse() { assertThrows(AzureException.class, () -> { final String eventHubName = "event-hub-name-test"; final Date createdAtAsDate = new Date(1569275540L); final Map<String, Object> values = new HashMap<>(); values.put(ManagementChannel.MANAGEMENT_ENTITY_NAME_KEY, eventHubName); values.put(ManagementChannel.MANAGEMENT_RESULT_CREATED_AT, createdAtAsDate); values.put(ManagementChannel.MANAGEMENT_RESULT_PARTITION_IDS, null); final AmqpValue amqpValue = new AmqpValue(values); final Message message = Proton.message(); message.setBody(amqpValue); serializer.deserialize(message, EventHubProperties.class); }); } }
class EventHubMessageSerializerTest { private final EventHubMessageSerializer serializer = new EventHubMessageSerializer(); @Test public void deserializeMessageNotNull() { assertThrows(NullPointerException.class, () -> serializer.deserialize(null, EventData.class)); } @Test public void deserializeClassNotNull() { assertThrows(NullPointerException.class, () -> serializer.deserialize(Proton.message(), null)); } @Test public void serializeObjectNotNull() { assertThrows(NullPointerException.class, () -> serializer.serialize(null)); } /** * Verify that we cannot serialize something that is not of type EventData. */ @Test public void cannotSerializeObject() { String something = "oops"; assertThrows(IllegalArgumentException.class, () -> serializer.serialize(something)); } /** * Verify we can only deserialize supported classes. */ @Test public void cannotDeserializeObject() { final Message message = getMessage("hello-world".getBytes(UTF_8)); assertThrows(IllegalArgumentException.class, () -> serializer.deserialize(message, EventHubAsyncClient.class)); } /** * Verify that we can deserialize a proton-j message with all the correct contents to {@link EventData}. */ @Test public void deserializeEventData() { final String[] systemPropertyNames = new String[]{ PARTITION_KEY_ANNOTATION_NAME.getValue(), OFFSET_ANNOTATION_NAME.getValue(), ENQUEUED_TIME_UTC_ANNOTATION_NAME.getValue(), SEQUENCE_NUMBER_ANNOTATION_NAME.getValue(), }; final Message message = getMessage("hello-world".getBytes(UTF_8)); final EventData eventData = serializer.deserialize(message, EventData.class); Assertions.assertEquals(ENQUEUED_TIME, eventData.getEnqueuedTime()); Assertions.assertEquals(OFFSET, eventData.getOffset()); Assertions.assertEquals(PARTITION_KEY, eventData.getPartitionKey()); Assertions.assertEquals(SEQUENCE_NUMBER, eventData.getSequenceNumber()); Assertions.assertTrue(eventData.getSystemProperties().containsKey(OTHER_SYSTEM_PROPERTY)); final Object otherPropertyValue = eventData.getSystemProperties().get(OTHER_SYSTEM_PROPERTY); Assertions.assertTrue(otherPropertyValue instanceof Boolean); Assertions.assertTrue((Boolean) otherPropertyValue); Assertions.assertEquals(APPLICATION_PROPERTIES.size(), eventData.getProperties().size()); APPLICATION_PROPERTIES.forEach((key, value) -> { Assertions.assertTrue(eventData.getProperties().containsKey(key)); Assertions.assertEquals(value, eventData.getProperties().get(key)); }); for (String property : systemPropertyNames) { Assertions.assertFalse(eventData.getSystemProperties().containsKey(property), property + " should not be in system properties map."); } } /** * Verify we can deserialize a message to {@link PartitionProperties}. */ @Test public void deserializePartitionProperties() { final String eventHubName = "my-event-hub"; final String id = "partition-id-test"; final long beginningSequenceNumber = 1343L; final long lastEnqueuedSequenceNumber = 1500L; final String lastEnqueuedOffset = "102"; final Date lastEnqueuedTimeAsDate = new Date(1569275540L); final Instant lastEnqueuedTime = lastEnqueuedTimeAsDate.toInstant(); final boolean isEmpty = true; final Map<String, Object> values = new HashMap<>(); values.put(ManagementChannel.MANAGEMENT_ENTITY_NAME_KEY, eventHubName); values.put(ManagementChannel.MANAGEMENT_PARTITION_NAME_KEY, id); values.put(ManagementChannel.MANAGEMENT_RESULT_BEGIN_SEQUENCE_NUMBER, beginningSequenceNumber); values.put(ManagementChannel.MANAGEMENT_RESULT_LAST_ENQUEUED_SEQUENCE_NUMBER, lastEnqueuedSequenceNumber); values.put(ManagementChannel.MANAGEMENT_RESULT_LAST_ENQUEUED_OFFSET, lastEnqueuedOffset); values.put(ManagementChannel.MANAGEMENT_RESULT_LAST_ENQUEUED_TIME_UTC, lastEnqueuedTimeAsDate); values.put(ManagementChannel.MANAGEMENT_RESULT_PARTITION_IS_EMPTY, isEmpty); final AmqpValue amqpValue = new AmqpValue(values); final Message message = Proton.message(); message.setBody(amqpValue); final PartitionProperties partitionProperties = serializer.deserialize(message, PartitionProperties.class); Assertions.assertNotNull(partitionProperties); Assertions.assertEquals(eventHubName, partitionProperties.getEventHubName()); Assertions.assertEquals(id, partitionProperties.getId()); Assertions.assertEquals(beginningSequenceNumber, partitionProperties.getBeginningSequenceNumber()); Assertions.assertEquals(lastEnqueuedSequenceNumber, partitionProperties.getLastEnqueuedSequenceNumber()); Assertions.assertEquals(lastEnqueuedOffset, partitionProperties.getLastEnqueuedOffset()); Assertions.assertEquals(lastEnqueuedTime, partitionProperties.getLastEnqueuedTime()); Assertions.assertEquals(isEmpty, partitionProperties.isEmpty()); } /** * Verify we can deserialize a message to {@link EventHubProperties}. */ @Test public void deserializeEventHubProperties() { final String eventHubName = "my-event-hub"; final Date createdAtAsDate = new Date(1569275540L); final Instant createdAt = createdAtAsDate.toInstant(); final String[] partitionIds = new String[]{ "1", "foo", "bar", "baz" }; final Map<String, Object> values = new HashMap<>(); values.put(ManagementChannel.MANAGEMENT_ENTITY_NAME_KEY, eventHubName); values.put(ManagementChannel.MANAGEMENT_RESULT_CREATED_AT, createdAtAsDate); values.put(ManagementChannel.MANAGEMENT_RESULT_PARTITION_IDS, partitionIds); final AmqpValue amqpValue = new AmqpValue(values); final Message message = Proton.message(); message.setBody(amqpValue); final EventHubProperties properties = serializer.deserialize(message, EventHubProperties.class); Assertions.assertNotNull(properties); Assertions.assertEquals(eventHubName, properties.getName()); Assertions.assertEquals(createdAt, properties.getCreatedAt()); Assertions.assertArrayEquals(partitionIds, properties.getPartitionIds()); } /** * Verify that it throws if the value is not what we expect. In this case, eventHubName is not a string. */ @Test /** * Verify that it throws if the value in the map is null. */ @Test public void throwsWhenNullValueInResponse() { final String eventHubName = "event-hub-name-test"; final Date createdAtAsDate = new Date(1569275540L); final Map<String, Object> values = new HashMap<>(); values.put(ManagementChannel.MANAGEMENT_ENTITY_NAME_KEY, eventHubName); values.put(ManagementChannel.MANAGEMENT_RESULT_CREATED_AT, createdAtAsDate); values.put(ManagementChannel.MANAGEMENT_RESULT_PARTITION_IDS, null); final AmqpValue amqpValue = new AmqpValue(values); final Message message = Proton.message(); message.setBody(amqpValue); assertThrows(AzureException.class, () -> { serializer.deserialize(message, EventHubProperties.class); }); } }
thanks for the review. changes are incorporated
public void throwsWhenNullValueInResponse() { assertThrows(AzureException.class, () -> { final String eventHubName = "event-hub-name-test"; final Date createdAtAsDate = new Date(1569275540L); final Map<String, Object> values = new HashMap<>(); values.put(ManagementChannel.MANAGEMENT_ENTITY_NAME_KEY, eventHubName); values.put(ManagementChannel.MANAGEMENT_RESULT_CREATED_AT, createdAtAsDate); values.put(ManagementChannel.MANAGEMENT_RESULT_PARTITION_IDS, null); final AmqpValue amqpValue = new AmqpValue(values); final Message message = Proton.message(); message.setBody(amqpValue); serializer.deserialize(message, EventHubProperties.class); }); }
serializer.deserialize(message, EventHubProperties.class);
public void throwsWhenNullValueInResponse() { final String eventHubName = "event-hub-name-test"; final Date createdAtAsDate = new Date(1569275540L); final Map<String, Object> values = new HashMap<>(); values.put(ManagementChannel.MANAGEMENT_ENTITY_NAME_KEY, eventHubName); values.put(ManagementChannel.MANAGEMENT_RESULT_CREATED_AT, createdAtAsDate); values.put(ManagementChannel.MANAGEMENT_RESULT_PARTITION_IDS, null); final AmqpValue amqpValue = new AmqpValue(values); final Message message = Proton.message(); message.setBody(amqpValue); assertThrows(AzureException.class, () -> { serializer.deserialize(message, EventHubProperties.class); }); }
class EventHubMessageSerializerTest { private final EventHubMessageSerializer serializer = new EventHubMessageSerializer(); @Test public void deserializeMessageNotNull() { assertThrows(NullPointerException.class, () -> serializer.deserialize(null, EventData.class)); } @Test public void deserializeClassNotNull() { assertThrows(NullPointerException.class, () -> serializer.deserialize(Proton.message(), null)); } @Test public void serializeObjectNotNull() { assertThrows(NullPointerException.class, () -> serializer.serialize(null)); } /** * Verify that we cannot serialize something that is not of type EventData. */ @Test public void cannotSerializeObject() { String something = "oops"; assertThrows(IllegalArgumentException.class, () -> serializer.serialize(something)); } /** * Verify we can only deserialize supported classes. */ @Test public void cannotDeserializeObject() { final Message message = getMessage("hello-world".getBytes(UTF_8)); assertThrows(IllegalArgumentException.class, () -> serializer.deserialize(message, EventHubAsyncClient.class)); } /** * Verify that we can deserialize a proton-j message with all the correct contents to {@link EventData}. */ @Test public void deserializeEventData() { final String[] systemPropertyNames = new String[]{ PARTITION_KEY_ANNOTATION_NAME.getValue(), OFFSET_ANNOTATION_NAME.getValue(), ENQUEUED_TIME_UTC_ANNOTATION_NAME.getValue(), SEQUENCE_NUMBER_ANNOTATION_NAME.getValue(), }; final Message message = getMessage("hello-world".getBytes(UTF_8)); final EventData eventData = serializer.deserialize(message, EventData.class); Assertions.assertEquals(ENQUEUED_TIME, eventData.getEnqueuedTime()); Assertions.assertEquals(OFFSET, eventData.getOffset()); Assertions.assertEquals(PARTITION_KEY, eventData.getPartitionKey()); Assertions.assertEquals(SEQUENCE_NUMBER, eventData.getSequenceNumber()); Assertions.assertTrue(eventData.getSystemProperties().containsKey(OTHER_SYSTEM_PROPERTY)); final Object otherPropertyValue = eventData.getSystemProperties().get(OTHER_SYSTEM_PROPERTY); Assertions.assertTrue(otherPropertyValue instanceof Boolean); Assertions.assertTrue((Boolean) otherPropertyValue); Assertions.assertEquals(APPLICATION_PROPERTIES.size(), eventData.getProperties().size()); APPLICATION_PROPERTIES.forEach((key, value) -> { Assertions.assertTrue(eventData.getProperties().containsKey(key)); Assertions.assertEquals(value, eventData.getProperties().get(key)); }); for (String property : systemPropertyNames) { Assertions.assertFalse(eventData.getSystemProperties().containsKey(property), property + " should not be in system properties map."); } } /** * Verify we can deserialize a message to {@link PartitionProperties}. */ @Test public void deserializePartitionProperties() { final String eventHubName = "my-event-hub"; final String id = "partition-id-test"; final long beginningSequenceNumber = 1343L; final long lastEnqueuedSequenceNumber = 1500L; final String lastEnqueuedOffset = "102"; final Date lastEnqueuedTimeAsDate = new Date(1569275540L); final Instant lastEnqueuedTime = lastEnqueuedTimeAsDate.toInstant(); final boolean isEmpty = true; final Map<String, Object> values = new HashMap<>(); values.put(ManagementChannel.MANAGEMENT_ENTITY_NAME_KEY, eventHubName); values.put(ManagementChannel.MANAGEMENT_PARTITION_NAME_KEY, id); values.put(ManagementChannel.MANAGEMENT_RESULT_BEGIN_SEQUENCE_NUMBER, beginningSequenceNumber); values.put(ManagementChannel.MANAGEMENT_RESULT_LAST_ENQUEUED_SEQUENCE_NUMBER, lastEnqueuedSequenceNumber); values.put(ManagementChannel.MANAGEMENT_RESULT_LAST_ENQUEUED_OFFSET, lastEnqueuedOffset); values.put(ManagementChannel.MANAGEMENT_RESULT_LAST_ENQUEUED_TIME_UTC, lastEnqueuedTimeAsDate); values.put(ManagementChannel.MANAGEMENT_RESULT_PARTITION_IS_EMPTY, isEmpty); final AmqpValue amqpValue = new AmqpValue(values); final Message message = Proton.message(); message.setBody(amqpValue); final PartitionProperties partitionProperties = serializer.deserialize(message, PartitionProperties.class); Assertions.assertNotNull(partitionProperties); Assertions.assertEquals(eventHubName, partitionProperties.getEventHubName()); Assertions.assertEquals(id, partitionProperties.getId()); Assertions.assertEquals(beginningSequenceNumber, partitionProperties.getBeginningSequenceNumber()); Assertions.assertEquals(lastEnqueuedSequenceNumber, partitionProperties.getLastEnqueuedSequenceNumber()); Assertions.assertEquals(lastEnqueuedOffset, partitionProperties.getLastEnqueuedOffset()); Assertions.assertEquals(lastEnqueuedTime, partitionProperties.getLastEnqueuedTime()); Assertions.assertEquals(isEmpty, partitionProperties.isEmpty()); } /** * Verify we can deserialize a message to {@link EventHubProperties}. */ @Test public void deserializeEventHubProperties() { final String eventHubName = "my-event-hub"; final Date createdAtAsDate = new Date(1569275540L); final Instant createdAt = createdAtAsDate.toInstant(); final String[] partitionIds = new String[]{ "1", "foo", "bar", "baz" }; final Map<String, Object> values = new HashMap<>(); values.put(ManagementChannel.MANAGEMENT_ENTITY_NAME_KEY, eventHubName); values.put(ManagementChannel.MANAGEMENT_RESULT_CREATED_AT, createdAtAsDate); values.put(ManagementChannel.MANAGEMENT_RESULT_PARTITION_IDS, partitionIds); final AmqpValue amqpValue = new AmqpValue(values); final Message message = Proton.message(); message.setBody(amqpValue); final EventHubProperties properties = serializer.deserialize(message, EventHubProperties.class); Assertions.assertNotNull(properties); Assertions.assertEquals(eventHubName, properties.getName()); Assertions.assertEquals(createdAt, properties.getCreatedAt()); Assertions.assertArrayEquals(partitionIds, properties.getPartitionIds()); } /** * Verify that it throws if the value is not what we expect. In this case, eventHubName is not a string. */ @Test public void throwsWhenIncorrectTypeInResponse() { assertThrows(AzureException.class, () -> { final Long eventHubName = 100L; final Date createdAtAsDate = new Date(1569275540L); final String[] partitionIds = new String[]{ "1", "foo", "bar", "baz" }; final Map<String, Object> values = new HashMap<>(); values.put(ManagementChannel.MANAGEMENT_ENTITY_NAME_KEY, eventHubName); values.put(ManagementChannel.MANAGEMENT_RESULT_CREATED_AT, createdAtAsDate); values.put(ManagementChannel.MANAGEMENT_RESULT_PARTITION_IDS, partitionIds); final AmqpValue amqpValue = new AmqpValue(values); final Message message = Proton.message(); message.setBody(amqpValue); serializer.deserialize(message, EventHubProperties.class); }); } /** * Verify that it throws if the value in the map is null. */ @Test }
class EventHubMessageSerializerTest { private final EventHubMessageSerializer serializer = new EventHubMessageSerializer(); @Test public void deserializeMessageNotNull() { assertThrows(NullPointerException.class, () -> serializer.deserialize(null, EventData.class)); } @Test public void deserializeClassNotNull() { assertThrows(NullPointerException.class, () -> serializer.deserialize(Proton.message(), null)); } @Test public void serializeObjectNotNull() { assertThrows(NullPointerException.class, () -> serializer.serialize(null)); } /** * Verify that we cannot serialize something that is not of type EventData. */ @Test public void cannotSerializeObject() { String something = "oops"; assertThrows(IllegalArgumentException.class, () -> serializer.serialize(something)); } /** * Verify we can only deserialize supported classes. */ @Test public void cannotDeserializeObject() { final Message message = getMessage("hello-world".getBytes(UTF_8)); assertThrows(IllegalArgumentException.class, () -> serializer.deserialize(message, EventHubAsyncClient.class)); } /** * Verify that we can deserialize a proton-j message with all the correct contents to {@link EventData}. */ @Test public void deserializeEventData() { final String[] systemPropertyNames = new String[]{ PARTITION_KEY_ANNOTATION_NAME.getValue(), OFFSET_ANNOTATION_NAME.getValue(), ENQUEUED_TIME_UTC_ANNOTATION_NAME.getValue(), SEQUENCE_NUMBER_ANNOTATION_NAME.getValue(), }; final Message message = getMessage("hello-world".getBytes(UTF_8)); final EventData eventData = serializer.deserialize(message, EventData.class); Assertions.assertEquals(ENQUEUED_TIME, eventData.getEnqueuedTime()); Assertions.assertEquals(OFFSET, eventData.getOffset()); Assertions.assertEquals(PARTITION_KEY, eventData.getPartitionKey()); Assertions.assertEquals(SEQUENCE_NUMBER, eventData.getSequenceNumber()); Assertions.assertTrue(eventData.getSystemProperties().containsKey(OTHER_SYSTEM_PROPERTY)); final Object otherPropertyValue = eventData.getSystemProperties().get(OTHER_SYSTEM_PROPERTY); Assertions.assertTrue(otherPropertyValue instanceof Boolean); Assertions.assertTrue((Boolean) otherPropertyValue); Assertions.assertEquals(APPLICATION_PROPERTIES.size(), eventData.getProperties().size()); APPLICATION_PROPERTIES.forEach((key, value) -> { Assertions.assertTrue(eventData.getProperties().containsKey(key)); Assertions.assertEquals(value, eventData.getProperties().get(key)); }); for (String property : systemPropertyNames) { Assertions.assertFalse(eventData.getSystemProperties().containsKey(property), property + " should not be in system properties map."); } } /** * Verify we can deserialize a message to {@link PartitionProperties}. */ @Test public void deserializePartitionProperties() { final String eventHubName = "my-event-hub"; final String id = "partition-id-test"; final long beginningSequenceNumber = 1343L; final long lastEnqueuedSequenceNumber = 1500L; final String lastEnqueuedOffset = "102"; final Date lastEnqueuedTimeAsDate = new Date(1569275540L); final Instant lastEnqueuedTime = lastEnqueuedTimeAsDate.toInstant(); final boolean isEmpty = true; final Map<String, Object> values = new HashMap<>(); values.put(ManagementChannel.MANAGEMENT_ENTITY_NAME_KEY, eventHubName); values.put(ManagementChannel.MANAGEMENT_PARTITION_NAME_KEY, id); values.put(ManagementChannel.MANAGEMENT_RESULT_BEGIN_SEQUENCE_NUMBER, beginningSequenceNumber); values.put(ManagementChannel.MANAGEMENT_RESULT_LAST_ENQUEUED_SEQUENCE_NUMBER, lastEnqueuedSequenceNumber); values.put(ManagementChannel.MANAGEMENT_RESULT_LAST_ENQUEUED_OFFSET, lastEnqueuedOffset); values.put(ManagementChannel.MANAGEMENT_RESULT_LAST_ENQUEUED_TIME_UTC, lastEnqueuedTimeAsDate); values.put(ManagementChannel.MANAGEMENT_RESULT_PARTITION_IS_EMPTY, isEmpty); final AmqpValue amqpValue = new AmqpValue(values); final Message message = Proton.message(); message.setBody(amqpValue); final PartitionProperties partitionProperties = serializer.deserialize(message, PartitionProperties.class); Assertions.assertNotNull(partitionProperties); Assertions.assertEquals(eventHubName, partitionProperties.getEventHubName()); Assertions.assertEquals(id, partitionProperties.getId()); Assertions.assertEquals(beginningSequenceNumber, partitionProperties.getBeginningSequenceNumber()); Assertions.assertEquals(lastEnqueuedSequenceNumber, partitionProperties.getLastEnqueuedSequenceNumber()); Assertions.assertEquals(lastEnqueuedOffset, partitionProperties.getLastEnqueuedOffset()); Assertions.assertEquals(lastEnqueuedTime, partitionProperties.getLastEnqueuedTime()); Assertions.assertEquals(isEmpty, partitionProperties.isEmpty()); } /** * Verify we can deserialize a message to {@link EventHubProperties}. */ @Test public void deserializeEventHubProperties() { final String eventHubName = "my-event-hub"; final Date createdAtAsDate = new Date(1569275540L); final Instant createdAt = createdAtAsDate.toInstant(); final String[] partitionIds = new String[]{ "1", "foo", "bar", "baz" }; final Map<String, Object> values = new HashMap<>(); values.put(ManagementChannel.MANAGEMENT_ENTITY_NAME_KEY, eventHubName); values.put(ManagementChannel.MANAGEMENT_RESULT_CREATED_AT, createdAtAsDate); values.put(ManagementChannel.MANAGEMENT_RESULT_PARTITION_IDS, partitionIds); final AmqpValue amqpValue = new AmqpValue(values); final Message message = Proton.message(); message.setBody(amqpValue); final EventHubProperties properties = serializer.deserialize(message, EventHubProperties.class); Assertions.assertNotNull(properties); Assertions.assertEquals(eventHubName, properties.getName()); Assertions.assertEquals(createdAt, properties.getCreatedAt()); Assertions.assertArrayEquals(partitionIds, properties.getPartitionIds()); } /** * Verify that it throws if the value is not what we expect. In this case, eventHubName is not a string. */ @Test public void throwsWhenIncorrectTypeInResponse() { final Long eventHubName = 100L; final Date createdAtAsDate = new Date(1569275540L); final String[] partitionIds = new String[]{"1", "foo", "bar", "baz"}; final Map<String, Object> values = new HashMap<>(); values.put(ManagementChannel.MANAGEMENT_ENTITY_NAME_KEY, eventHubName); values.put(ManagementChannel.MANAGEMENT_RESULT_CREATED_AT, createdAtAsDate); values.put(ManagementChannel.MANAGEMENT_RESULT_PARTITION_IDS, partitionIds); final AmqpValue amqpValue = new AmqpValue(values); final Message message = Proton.message(); message.setBody(amqpValue); assertThrows(AzureException.class, () -> { serializer.deserialize(message, EventHubProperties.class); }); } /** * Verify that it throws if the value in the map is null. */ @Test }
nit: remove extra empty line
public IterableStream<PartitionEvent> receive(int maximumMessageCount, Duration maximumWaitTime) { Objects.requireNonNull(maximumWaitTime, "'maximumWaitTime' cannot be null."); if (maximumMessageCount < 1) { throw logger.logExceptionAsError( new IllegalArgumentException("'maximumMessageCount' cannot be less than 1.")); } else if (maximumWaitTime.isNegative() || maximumWaitTime.isZero()) { throw logger.logExceptionAsError( new IllegalArgumentException("'maximumWaitTime' cannot be zero or less.")); } final Flux<PartitionEvent> events = Flux.create(emitter -> { queueWork(RECEIVE_ALL_KEY, maximumMessageCount, maximumWaitTime, emitter); }); final Flux<PartitionEvent> map = events.collectList().map(x -> { logger.info("Number of events received: {}", x.size()); return Flux.fromIterable(x); }).block(); return new IterableStream<>(map); }
} else if (maximumWaitTime.isNegative() || maximumWaitTime.isZero()) {
public IterableStream<PartitionEvent> receive(int maximumMessageCount, Duration maximumWaitTime) { Objects.requireNonNull(maximumWaitTime, "'maximumWaitTime' cannot be null."); if (maximumMessageCount < 1) { throw logger.logExceptionAsError( new IllegalArgumentException("'maximumMessageCount' cannot be less than 1.")); } else if (maximumWaitTime.isNegative() || maximumWaitTime.isZero()) { throw logger.logExceptionAsError( new IllegalArgumentException("'maximumWaitTime' cannot be zero or less.")); } final Flux<PartitionEvent> events = Flux.create(emitter -> { queueWork(RECEIVE_ALL_KEY, maximumMessageCount, maximumWaitTime, emitter); }); return new IterableStream<>(events); }
class EventHubConsumerClient implements Closeable { private static final String RECEIVE_ALL_KEY = "receive-all"; private final ClientLogger logger = new ClientLogger(EventHubConsumerClient.class); private final ConcurrentHashMap<String, SynchronousEventSubscriber> openSubscribers = new ConcurrentHashMap<>(); private final AtomicLong idGenerator = new AtomicLong(); private final EventHubConsumerAsyncClient consumer; private final Duration timeout; EventHubConsumerClient(EventHubConsumerAsyncClient consumer, Duration tryTimeout) { Objects.requireNonNull(tryTimeout, "'tryTimeout' cannot be null."); this.consumer = Objects.requireNonNull(consumer, "'consumer' cannot be null."); this.timeout = tryTimeout; } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with */ public String getFullyQualifiedNamespace() { return consumer.getFullyQualifiedNamespace(); } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return consumer.getEventHubName(); } /** * Gets the position of the event in the partition where the consumer should begin reading. * * @return The position of the event in the partition where the consumer should begin reading. */ public EventPosition getStartingPosition() { return consumer.getStartingPosition(); } /** * Gets the consumer group this consumer is reading events as a part of. * * @return The consumer group this consumer is reading events as a part of. */ public String getConsumerGroup() { return consumer.getConsumerGroup(); } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ public EventHubProperties getProperties() { return consumer.getProperties().block(timeout); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public IterableStream<String> getPartitionIds() { return new IterableStream<>(consumer.getPartitionIds()); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * * @return The set of information for the requested partition under the Event Hub this client is associated with. */ public PartitionProperties getPartitionProperties(String partitionId) { return consumer.getPartitionProperties(partitionId).block(timeout); } /** * Receives a batch of {@link PartitionEvent events} from the Event Hub partition. * * @param maximumMessageCount The maximum number of messages to receive in this batch. * @param partitionId Identifier of the partition to read events from. * * @return A set of {@link PartitionEvent} that was received. The iterable contains up to * {@code maximumMessageCount} events. If a stream for the events was opened before, the same position within * that partition is returned. Otherwise, events are read starting from {@link * * @throws IllegalArgumentException if {@code maximumMessageCount} is less than 1. */ public IterableStream<PartitionEvent> receive(String partitionId, int maximumMessageCount) { return receive(partitionId, maximumMessageCount, timeout); } /** * Receives a batch of {@link PartitionEvent events} from the Event Hub partition. * * @param partitionId Identifier of the partition to read events from. * @param maximumMessageCount The maximum number of messages to receive in this batch. * @param maximumWaitTime The maximum amount of time to wait to build up the requested message count for the * batch; if not specified, the default wait time specified when the consumer was created will be used. * * @return A set of {@link PartitionEvent} that was received. The iterable contains up to * {@code maximumMessageCount} events. * * @throws NullPointerException if {@code maximumWaitTime} is null. * @throws IllegalArgumentException if {@code maximumMessageCount} is less than 1 or {@code maximumWaitTime} is * zero or a negative duration. */ public IterableStream<PartitionEvent> receive(String partitionId, int maximumMessageCount, Duration maximumWaitTime) { Objects.requireNonNull(maximumWaitTime, "'maximumWaitTime' cannot be null."); if (maximumMessageCount < 1) { throw logger.logExceptionAsError( new IllegalArgumentException("'maximumMessageCount' cannot be less than 1.")); } else if (maximumWaitTime.isNegative() || maximumWaitTime.isZero()) { throw logger.logExceptionAsError( new IllegalArgumentException("'maximumWaitTime' cannot be zero or less.")); } final Flux<PartitionEvent> events = Flux.create(emitter -> { queueWork(partitionId, maximumMessageCount, maximumWaitTime, emitter); }); final Flux<PartitionEvent> map = events.collectList().map(x -> { logger.info("Number of events received: {}", x.size()); return Flux.fromIterable(x); }).block(); return new IterableStream<>(map); } /** * Receives a batch of {@link PartitionEvent events} from all partitions. The batch is returned after * {@code maximumMessageCount} events are received or after the * {@link RetryOptions * * @param maximumMessageCount The maximum number of messages to receive in this batch. * * @return A set of {@link PartitionEvent} that was received. The iterable contains up to {@code * maximumMessageCount} events. * * @throws IllegalArgumentException if {@code maximumMessageCount} is less than 1. */ public IterableStream<PartitionEvent> receive(int maximumMessageCount) { return receive(maximumMessageCount, timeout); } /** * Receives a batch of {@link PartitionEvent events} from all partitions. The batch is returned after * {@code maximumMessageCount} events are received or after {@code maximumWaitTime} has elapsed. * * @param maximumMessageCount The maximum number of messages to receive in this batch. * @param maximumWaitTime The maximum amount of time to wait to build up the requested message count for the * batch; if not specified, the default wait time specified when the consumer was created will be used. * * @return A set of {@link PartitionEvent} that was received. The iterable contains up to * {@code maximumMessageCount} events. * * @throws NullPointerException if {@code maximumWaitTime} is null. * @throws IllegalArgumentException if {@code maximumMessageCount} is less than 1 or {@code maximumWaitTime} is * zero or a negative duration. */ /** * Given an {@code emitter}, queues that work in {@link SynchronousEventSubscriber}. If the synchronous job has not * been created, will initialise it. */ private void queueWork(String partitionId, int maximumMessageCount, Duration maximumWaitTime, FluxSink<PartitionEvent> emitter) { final long id = idGenerator.getAndIncrement(); final SynchronousReceiveWork work = new SynchronousReceiveWork(id, maximumMessageCount, maximumWaitTime, emitter); final SynchronousEventSubscriber subscriber = openSubscribers.computeIfAbsent(partitionId, key -> { SynchronousEventSubscriber syncSubscriber = new SynchronousEventSubscriber(); if (RECEIVE_ALL_KEY.equals(key)) { logger.info("Started synchronous event subscriber for all partitions"); consumer.receive().subscribeWith(syncSubscriber); } else { logger.info("Started synchronous event subscriber for partition '{}'.", key); consumer.receive(key).subscribeWith(syncSubscriber); } return syncSubscriber; }); logger.info("Queueing work item in SynchronousEventSubscriber."); subscriber.queueReceiveWork(work); } /** * {@inheritDoc} */ @Override public void close() { consumer.close(); } }
class EventHubConsumerClient implements Closeable { private static final String RECEIVE_ALL_KEY = "receive-all"; private final ClientLogger logger = new ClientLogger(EventHubConsumerClient.class); private final ConcurrentHashMap<String, SynchronousEventSubscriber> openSubscribers = new ConcurrentHashMap<>(); private final AtomicLong idGenerator = new AtomicLong(); private final EventHubConsumerAsyncClient consumer; private final Duration timeout; EventHubConsumerClient(EventHubConsumerAsyncClient consumer, Duration tryTimeout) { Objects.requireNonNull(tryTimeout, "'tryTimeout' cannot be null."); this.consumer = Objects.requireNonNull(consumer, "'consumer' cannot be null."); this.timeout = tryTimeout; } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with */ public String getFullyQualifiedNamespace() { return consumer.getFullyQualifiedNamespace(); } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return consumer.getEventHubName(); } /** * Gets the position of the event in the partition where the consumer should begin reading. * * @return The position of the event in the partition where the consumer should begin reading. */ public EventPosition getStartingPosition() { return consumer.getStartingPosition(); } /** * Gets the consumer group this consumer is reading events as a part of. * * @return The consumer group this consumer is reading events as a part of. */ public String getConsumerGroup() { return consumer.getConsumerGroup(); } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ public EventHubProperties getProperties() { return consumer.getProperties().block(timeout); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public IterableStream<String> getPartitionIds() { return new IterableStream<>(consumer.getPartitionIds()); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * * @return The set of information for the requested partition under the Event Hub this client is associated with. */ public PartitionProperties getPartitionProperties(String partitionId) { return consumer.getPartitionProperties(partitionId).block(timeout); } /** * Receives a batch of {@link PartitionEvent events} from the Event Hub partition. * * @param maximumMessageCount The maximum number of messages to receive in this batch. * @param partitionId Identifier of the partition to read events from. * * @return A set of {@link PartitionEvent} that was received. The iterable contains up to * {@code maximumMessageCount} events. If a stream for the events was opened before, the same position within * that partition is returned. Otherwise, events are read starting from {@link * * @throws IllegalArgumentException if {@code maximumMessageCount} is less than 1. */ public IterableStream<PartitionEvent> receive(String partitionId, int maximumMessageCount) { return receive(partitionId, maximumMessageCount, timeout); } /** * Receives a batch of {@link PartitionEvent events} from the Event Hub partition. * * @param partitionId Identifier of the partition to read events from. * @param maximumMessageCount The maximum number of messages to receive in this batch. * @param maximumWaitTime The maximum amount of time to wait to build up the requested message count for the * batch; if not specified, the default wait time specified when the consumer was created will be used. * * @return A set of {@link PartitionEvent} that was received. The iterable contains up to * {@code maximumMessageCount} events. * * @throws NullPointerException if {@code maximumWaitTime} is null. * @throws IllegalArgumentException if {@code maximumMessageCount} is less than 1 or {@code maximumWaitTime} is * zero or a negative duration. */ public IterableStream<PartitionEvent> receive(String partitionId, int maximumMessageCount, Duration maximumWaitTime) { Objects.requireNonNull(maximumWaitTime, "'maximumWaitTime' cannot be null."); if (maximumMessageCount < 1) { throw logger.logExceptionAsError( new IllegalArgumentException("'maximumMessageCount' cannot be less than 1.")); } else if (maximumWaitTime.isNegative() || maximumWaitTime.isZero()) { throw logger.logExceptionAsError( new IllegalArgumentException("'maximumWaitTime' cannot be zero or less.")); } final Flux<PartitionEvent> events = Flux.create(emitter -> { queueWork(partitionId, maximumMessageCount, maximumWaitTime, emitter); }); return new IterableStream<>(events); } /** * Receives a batch of {@link PartitionEvent events} from all partitions. The batch is returned after * {@code maximumMessageCount} events are received or after the * {@link RetryOptions * * @param maximumMessageCount The maximum number of messages to receive in this batch. * * @return A set of {@link PartitionEvent} that was received. The iterable contains up to * {@code maximumMessageCount} events. * * @throws IllegalArgumentException if {@code maximumMessageCount} is less than 1. */ public IterableStream<PartitionEvent> receive(int maximumMessageCount) { return receive(maximumMessageCount, timeout); } /** * Receives a batch of {@link PartitionEvent events} from all partitions. The batch is returned after * {@code maximumMessageCount} events are received or after {@code maximumWaitTime} has elapsed. * * @param maximumMessageCount The maximum number of messages to receive in this batch. * @param maximumWaitTime The maximum amount of time to wait to build up the requested message count for the * batch; if not specified, the default wait time specified when the consumer was created will be used. * * @return A set of {@link PartitionEvent} that was received. The iterable contains up to * {@code maximumMessageCount} events. * * @throws NullPointerException if {@code maximumWaitTime} is null. * @throws IllegalArgumentException if {@code maximumMessageCount} is less than 1 or {@code maximumWaitTime} is * zero or a negative duration. */ /** * Given an {@code emitter}, queues that work in {@link SynchronousEventSubscriber}. If the synchronous job has not * been created, will initialise it. */ private void queueWork(String partitionId, int maximumMessageCount, Duration maximumWaitTime, FluxSink<PartitionEvent> emitter) { final long id = idGenerator.getAndIncrement(); final SynchronousReceiveWork work = new SynchronousReceiveWork(id, maximumMessageCount, maximumWaitTime, emitter); final SynchronousEventSubscriber subscriber = openSubscribers.computeIfAbsent(partitionId, key -> { SynchronousEventSubscriber syncSubscriber = new SynchronousEventSubscriber(); if (RECEIVE_ALL_KEY.equals(key)) { logger.info("Started synchronous event subscriber for all partitions"); consumer.receive().subscribeWith(syncSubscriber); } else { logger.info("Started synchronous event subscriber for partition '{}'.", key); consumer.receive(key).subscribeWith(syncSubscriber); } return syncSubscriber; }); logger.info("Queueing work item in SynchronousEventSubscriber."); subscriber.queueReceiveWork(work); } /** * {@inheritDoc} */ @Override public void close() { consumer.close(); } }
Any reason why we can't just `return new IterableStream<>(events.block())` here?
public IterableStream<PartitionEvent> receive(int maximumMessageCount, Duration maximumWaitTime) { Objects.requireNonNull(maximumWaitTime, "'maximumWaitTime' cannot be null."); if (maximumMessageCount < 1) { throw logger.logExceptionAsError( new IllegalArgumentException("'maximumMessageCount' cannot be less than 1.")); } else if (maximumWaitTime.isNegative() || maximumWaitTime.isZero()) { throw logger.logExceptionAsError( new IllegalArgumentException("'maximumWaitTime' cannot be zero or less.")); } final Flux<PartitionEvent> events = Flux.create(emitter -> { queueWork(RECEIVE_ALL_KEY, maximumMessageCount, maximumWaitTime, emitter); }); final Flux<PartitionEvent> map = events.collectList().map(x -> { logger.info("Number of events received: {}", x.size()); return Flux.fromIterable(x); }).block(); return new IterableStream<>(map); }
return new IterableStream<>(map);
public IterableStream<PartitionEvent> receive(int maximumMessageCount, Duration maximumWaitTime) { Objects.requireNonNull(maximumWaitTime, "'maximumWaitTime' cannot be null."); if (maximumMessageCount < 1) { throw logger.logExceptionAsError( new IllegalArgumentException("'maximumMessageCount' cannot be less than 1.")); } else if (maximumWaitTime.isNegative() || maximumWaitTime.isZero()) { throw logger.logExceptionAsError( new IllegalArgumentException("'maximumWaitTime' cannot be zero or less.")); } final Flux<PartitionEvent> events = Flux.create(emitter -> { queueWork(RECEIVE_ALL_KEY, maximumMessageCount, maximumWaitTime, emitter); }); return new IterableStream<>(events); }
class EventHubConsumerClient implements Closeable { private static final String RECEIVE_ALL_KEY = "receive-all"; private final ClientLogger logger = new ClientLogger(EventHubConsumerClient.class); private final ConcurrentHashMap<String, SynchronousEventSubscriber> openSubscribers = new ConcurrentHashMap<>(); private final AtomicLong idGenerator = new AtomicLong(); private final EventHubConsumerAsyncClient consumer; private final Duration timeout; EventHubConsumerClient(EventHubConsumerAsyncClient consumer, Duration tryTimeout) { Objects.requireNonNull(tryTimeout, "'tryTimeout' cannot be null."); this.consumer = Objects.requireNonNull(consumer, "'consumer' cannot be null."); this.timeout = tryTimeout; } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with */ public String getFullyQualifiedNamespace() { return consumer.getFullyQualifiedNamespace(); } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return consumer.getEventHubName(); } /** * Gets the position of the event in the partition where the consumer should begin reading. * * @return The position of the event in the partition where the consumer should begin reading. */ public EventPosition getStartingPosition() { return consumer.getStartingPosition(); } /** * Gets the consumer group this consumer is reading events as a part of. * * @return The consumer group this consumer is reading events as a part of. */ public String getConsumerGroup() { return consumer.getConsumerGroup(); } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ public EventHubProperties getProperties() { return consumer.getProperties().block(timeout); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public IterableStream<String> getPartitionIds() { return new IterableStream<>(consumer.getPartitionIds()); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * * @return The set of information for the requested partition under the Event Hub this client is associated with. */ public PartitionProperties getPartitionProperties(String partitionId) { return consumer.getPartitionProperties(partitionId).block(timeout); } /** * Receives a batch of {@link PartitionEvent events} from the Event Hub partition. * * @param maximumMessageCount The maximum number of messages to receive in this batch. * @param partitionId Identifier of the partition to read events from. * * @return A set of {@link PartitionEvent} that was received. The iterable contains up to * {@code maximumMessageCount} events. If a stream for the events was opened before, the same position within * that partition is returned. Otherwise, events are read starting from {@link * * @throws IllegalArgumentException if {@code maximumMessageCount} is less than 1. */ public IterableStream<PartitionEvent> receive(String partitionId, int maximumMessageCount) { return receive(partitionId, maximumMessageCount, timeout); } /** * Receives a batch of {@link PartitionEvent events} from the Event Hub partition. * * @param partitionId Identifier of the partition to read events from. * @param maximumMessageCount The maximum number of messages to receive in this batch. * @param maximumWaitTime The maximum amount of time to wait to build up the requested message count for the * batch; if not specified, the default wait time specified when the consumer was created will be used. * * @return A set of {@link PartitionEvent} that was received. The iterable contains up to * {@code maximumMessageCount} events. * * @throws NullPointerException if {@code maximumWaitTime} is null. * @throws IllegalArgumentException if {@code maximumMessageCount} is less than 1 or {@code maximumWaitTime} is * zero or a negative duration. */ public IterableStream<PartitionEvent> receive(String partitionId, int maximumMessageCount, Duration maximumWaitTime) { Objects.requireNonNull(maximumWaitTime, "'maximumWaitTime' cannot be null."); if (maximumMessageCount < 1) { throw logger.logExceptionAsError( new IllegalArgumentException("'maximumMessageCount' cannot be less than 1.")); } else if (maximumWaitTime.isNegative() || maximumWaitTime.isZero()) { throw logger.logExceptionAsError( new IllegalArgumentException("'maximumWaitTime' cannot be zero or less.")); } final Flux<PartitionEvent> events = Flux.create(emitter -> { queueWork(partitionId, maximumMessageCount, maximumWaitTime, emitter); }); final Flux<PartitionEvent> map = events.collectList().map(x -> { logger.info("Number of events received: {}", x.size()); return Flux.fromIterable(x); }).block(); return new IterableStream<>(map); } /** * Receives a batch of {@link PartitionEvent events} from all partitions. The batch is returned after * {@code maximumMessageCount} events are received or after the * {@link RetryOptions * * @param maximumMessageCount The maximum number of messages to receive in this batch. * * @return A set of {@link PartitionEvent} that was received. The iterable contains up to {@code * maximumMessageCount} events. * * @throws IllegalArgumentException if {@code maximumMessageCount} is less than 1. */ public IterableStream<PartitionEvent> receive(int maximumMessageCount) { return receive(maximumMessageCount, timeout); } /** * Receives a batch of {@link PartitionEvent events} from all partitions. The batch is returned after * {@code maximumMessageCount} events are received or after {@code maximumWaitTime} has elapsed. * * @param maximumMessageCount The maximum number of messages to receive in this batch. * @param maximumWaitTime The maximum amount of time to wait to build up the requested message count for the * batch; if not specified, the default wait time specified when the consumer was created will be used. * * @return A set of {@link PartitionEvent} that was received. The iterable contains up to * {@code maximumMessageCount} events. * * @throws NullPointerException if {@code maximumWaitTime} is null. * @throws IllegalArgumentException if {@code maximumMessageCount} is less than 1 or {@code maximumWaitTime} is * zero or a negative duration. */ /** * Given an {@code emitter}, queues that work in {@link SynchronousEventSubscriber}. If the synchronous job has not * been created, will initialise it. */ private void queueWork(String partitionId, int maximumMessageCount, Duration maximumWaitTime, FluxSink<PartitionEvent> emitter) { final long id = idGenerator.getAndIncrement(); final SynchronousReceiveWork work = new SynchronousReceiveWork(id, maximumMessageCount, maximumWaitTime, emitter); final SynchronousEventSubscriber subscriber = openSubscribers.computeIfAbsent(partitionId, key -> { SynchronousEventSubscriber syncSubscriber = new SynchronousEventSubscriber(); if (RECEIVE_ALL_KEY.equals(key)) { logger.info("Started synchronous event subscriber for all partitions"); consumer.receive().subscribeWith(syncSubscriber); } else { logger.info("Started synchronous event subscriber for partition '{}'.", key); consumer.receive(key).subscribeWith(syncSubscriber); } return syncSubscriber; }); logger.info("Queueing work item in SynchronousEventSubscriber."); subscriber.queueReceiveWork(work); } /** * {@inheritDoc} */ @Override public void close() { consumer.close(); } }
class EventHubConsumerClient implements Closeable { private static final String RECEIVE_ALL_KEY = "receive-all"; private final ClientLogger logger = new ClientLogger(EventHubConsumerClient.class); private final ConcurrentHashMap<String, SynchronousEventSubscriber> openSubscribers = new ConcurrentHashMap<>(); private final AtomicLong idGenerator = new AtomicLong(); private final EventHubConsumerAsyncClient consumer; private final Duration timeout; EventHubConsumerClient(EventHubConsumerAsyncClient consumer, Duration tryTimeout) { Objects.requireNonNull(tryTimeout, "'tryTimeout' cannot be null."); this.consumer = Objects.requireNonNull(consumer, "'consumer' cannot be null."); this.timeout = tryTimeout; } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with */ public String getFullyQualifiedNamespace() { return consumer.getFullyQualifiedNamespace(); } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return consumer.getEventHubName(); } /** * Gets the position of the event in the partition where the consumer should begin reading. * * @return The position of the event in the partition where the consumer should begin reading. */ public EventPosition getStartingPosition() { return consumer.getStartingPosition(); } /** * Gets the consumer group this consumer is reading events as a part of. * * @return The consumer group this consumer is reading events as a part of. */ public String getConsumerGroup() { return consumer.getConsumerGroup(); } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ public EventHubProperties getProperties() { return consumer.getProperties().block(timeout); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public IterableStream<String> getPartitionIds() { return new IterableStream<>(consumer.getPartitionIds()); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * * @return The set of information for the requested partition under the Event Hub this client is associated with. */ public PartitionProperties getPartitionProperties(String partitionId) { return consumer.getPartitionProperties(partitionId).block(timeout); } /** * Receives a batch of {@link PartitionEvent events} from the Event Hub partition. * * @param maximumMessageCount The maximum number of messages to receive in this batch. * @param partitionId Identifier of the partition to read events from. * * @return A set of {@link PartitionEvent} that was received. The iterable contains up to * {@code maximumMessageCount} events. If a stream for the events was opened before, the same position within * that partition is returned. Otherwise, events are read starting from {@link * * @throws IllegalArgumentException if {@code maximumMessageCount} is less than 1. */ public IterableStream<PartitionEvent> receive(String partitionId, int maximumMessageCount) { return receive(partitionId, maximumMessageCount, timeout); } /** * Receives a batch of {@link PartitionEvent events} from the Event Hub partition. * * @param partitionId Identifier of the partition to read events from. * @param maximumMessageCount The maximum number of messages to receive in this batch. * @param maximumWaitTime The maximum amount of time to wait to build up the requested message count for the * batch; if not specified, the default wait time specified when the consumer was created will be used. * * @return A set of {@link PartitionEvent} that was received. The iterable contains up to * {@code maximumMessageCount} events. * * @throws NullPointerException if {@code maximumWaitTime} is null. * @throws IllegalArgumentException if {@code maximumMessageCount} is less than 1 or {@code maximumWaitTime} is * zero or a negative duration. */ public IterableStream<PartitionEvent> receive(String partitionId, int maximumMessageCount, Duration maximumWaitTime) { Objects.requireNonNull(maximumWaitTime, "'maximumWaitTime' cannot be null."); if (maximumMessageCount < 1) { throw logger.logExceptionAsError( new IllegalArgumentException("'maximumMessageCount' cannot be less than 1.")); } else if (maximumWaitTime.isNegative() || maximumWaitTime.isZero()) { throw logger.logExceptionAsError( new IllegalArgumentException("'maximumWaitTime' cannot be zero or less.")); } final Flux<PartitionEvent> events = Flux.create(emitter -> { queueWork(partitionId, maximumMessageCount, maximumWaitTime, emitter); }); return new IterableStream<>(events); } /** * Receives a batch of {@link PartitionEvent events} from all partitions. The batch is returned after * {@code maximumMessageCount} events are received or after the * {@link RetryOptions * * @param maximumMessageCount The maximum number of messages to receive in this batch. * * @return A set of {@link PartitionEvent} that was received. The iterable contains up to * {@code maximumMessageCount} events. * * @throws IllegalArgumentException if {@code maximumMessageCount} is less than 1. */ public IterableStream<PartitionEvent> receive(int maximumMessageCount) { return receive(maximumMessageCount, timeout); } /** * Receives a batch of {@link PartitionEvent events} from all partitions. The batch is returned after * {@code maximumMessageCount} events are received or after {@code maximumWaitTime} has elapsed. * * @param maximumMessageCount The maximum number of messages to receive in this batch. * @param maximumWaitTime The maximum amount of time to wait to build up the requested message count for the * batch; if not specified, the default wait time specified when the consumer was created will be used. * * @return A set of {@link PartitionEvent} that was received. The iterable contains up to * {@code maximumMessageCount} events. * * @throws NullPointerException if {@code maximumWaitTime} is null. * @throws IllegalArgumentException if {@code maximumMessageCount} is less than 1 or {@code maximumWaitTime} is * zero or a negative duration. */ /** * Given an {@code emitter}, queues that work in {@link SynchronousEventSubscriber}. If the synchronous job has not * been created, will initialise it. */ private void queueWork(String partitionId, int maximumMessageCount, Duration maximumWaitTime, FluxSink<PartitionEvent> emitter) { final long id = idGenerator.getAndIncrement(); final SynchronousReceiveWork work = new SynchronousReceiveWork(id, maximumMessageCount, maximumWaitTime, emitter); final SynchronousEventSubscriber subscriber = openSubscribers.computeIfAbsent(partitionId, key -> { SynchronousEventSubscriber syncSubscriber = new SynchronousEventSubscriber(); if (RECEIVE_ALL_KEY.equals(key)) { logger.info("Started synchronous event subscriber for all partitions"); consumer.receive().subscribeWith(syncSubscriber); } else { logger.info("Started synchronous event subscriber for partition '{}'.", key); consumer.receive(key).subscribeWith(syncSubscriber); } return syncSubscriber; }); logger.info("Queueing work item in SynchronousEventSubscriber."); subscriber.queueReceiveWork(work); } /** * {@inheritDoc} */ @Override public void close() { consumer.close(); } }
Good catch, I used it before for debugging.
public IterableStream<PartitionEvent> receive(int maximumMessageCount, Duration maximumWaitTime) { Objects.requireNonNull(maximumWaitTime, "'maximumWaitTime' cannot be null."); if (maximumMessageCount < 1) { throw logger.logExceptionAsError( new IllegalArgumentException("'maximumMessageCount' cannot be less than 1.")); } else if (maximumWaitTime.isNegative() || maximumWaitTime.isZero()) { throw logger.logExceptionAsError( new IllegalArgumentException("'maximumWaitTime' cannot be zero or less.")); } final Flux<PartitionEvent> events = Flux.create(emitter -> { queueWork(RECEIVE_ALL_KEY, maximumMessageCount, maximumWaitTime, emitter); }); final Flux<PartitionEvent> map = events.collectList().map(x -> { logger.info("Number of events received: {}", x.size()); return Flux.fromIterable(x); }).block(); return new IterableStream<>(map); }
return new IterableStream<>(map);
public IterableStream<PartitionEvent> receive(int maximumMessageCount, Duration maximumWaitTime) { Objects.requireNonNull(maximumWaitTime, "'maximumWaitTime' cannot be null."); if (maximumMessageCount < 1) { throw logger.logExceptionAsError( new IllegalArgumentException("'maximumMessageCount' cannot be less than 1.")); } else if (maximumWaitTime.isNegative() || maximumWaitTime.isZero()) { throw logger.logExceptionAsError( new IllegalArgumentException("'maximumWaitTime' cannot be zero or less.")); } final Flux<PartitionEvent> events = Flux.create(emitter -> { queueWork(RECEIVE_ALL_KEY, maximumMessageCount, maximumWaitTime, emitter); }); return new IterableStream<>(events); }
class EventHubConsumerClient implements Closeable { private static final String RECEIVE_ALL_KEY = "receive-all"; private final ClientLogger logger = new ClientLogger(EventHubConsumerClient.class); private final ConcurrentHashMap<String, SynchronousEventSubscriber> openSubscribers = new ConcurrentHashMap<>(); private final AtomicLong idGenerator = new AtomicLong(); private final EventHubConsumerAsyncClient consumer; private final Duration timeout; EventHubConsumerClient(EventHubConsumerAsyncClient consumer, Duration tryTimeout) { Objects.requireNonNull(tryTimeout, "'tryTimeout' cannot be null."); this.consumer = Objects.requireNonNull(consumer, "'consumer' cannot be null."); this.timeout = tryTimeout; } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with */ public String getFullyQualifiedNamespace() { return consumer.getFullyQualifiedNamespace(); } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return consumer.getEventHubName(); } /** * Gets the position of the event in the partition where the consumer should begin reading. * * @return The position of the event in the partition where the consumer should begin reading. */ public EventPosition getStartingPosition() { return consumer.getStartingPosition(); } /** * Gets the consumer group this consumer is reading events as a part of. * * @return The consumer group this consumer is reading events as a part of. */ public String getConsumerGroup() { return consumer.getConsumerGroup(); } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ public EventHubProperties getProperties() { return consumer.getProperties().block(timeout); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public IterableStream<String> getPartitionIds() { return new IterableStream<>(consumer.getPartitionIds()); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * * @return The set of information for the requested partition under the Event Hub this client is associated with. */ public PartitionProperties getPartitionProperties(String partitionId) { return consumer.getPartitionProperties(partitionId).block(timeout); } /** * Receives a batch of {@link PartitionEvent events} from the Event Hub partition. * * @param maximumMessageCount The maximum number of messages to receive in this batch. * @param partitionId Identifier of the partition to read events from. * * @return A set of {@link PartitionEvent} that was received. The iterable contains up to * {@code maximumMessageCount} events. If a stream for the events was opened before, the same position within * that partition is returned. Otherwise, events are read starting from {@link * * @throws IllegalArgumentException if {@code maximumMessageCount} is less than 1. */ public IterableStream<PartitionEvent> receive(String partitionId, int maximumMessageCount) { return receive(partitionId, maximumMessageCount, timeout); } /** * Receives a batch of {@link PartitionEvent events} from the Event Hub partition. * * @param partitionId Identifier of the partition to read events from. * @param maximumMessageCount The maximum number of messages to receive in this batch. * @param maximumWaitTime The maximum amount of time to wait to build up the requested message count for the * batch; if not specified, the default wait time specified when the consumer was created will be used. * * @return A set of {@link PartitionEvent} that was received. The iterable contains up to * {@code maximumMessageCount} events. * * @throws NullPointerException if {@code maximumWaitTime} is null. * @throws IllegalArgumentException if {@code maximumMessageCount} is less than 1 or {@code maximumWaitTime} is * zero or a negative duration. */ public IterableStream<PartitionEvent> receive(String partitionId, int maximumMessageCount, Duration maximumWaitTime) { Objects.requireNonNull(maximumWaitTime, "'maximumWaitTime' cannot be null."); if (maximumMessageCount < 1) { throw logger.logExceptionAsError( new IllegalArgumentException("'maximumMessageCount' cannot be less than 1.")); } else if (maximumWaitTime.isNegative() || maximumWaitTime.isZero()) { throw logger.logExceptionAsError( new IllegalArgumentException("'maximumWaitTime' cannot be zero or less.")); } final Flux<PartitionEvent> events = Flux.create(emitter -> { queueWork(partitionId, maximumMessageCount, maximumWaitTime, emitter); }); final Flux<PartitionEvent> map = events.collectList().map(x -> { logger.info("Number of events received: {}", x.size()); return Flux.fromIterable(x); }).block(); return new IterableStream<>(map); } /** * Receives a batch of {@link PartitionEvent events} from all partitions. The batch is returned after * {@code maximumMessageCount} events are received or after the * {@link RetryOptions * * @param maximumMessageCount The maximum number of messages to receive in this batch. * * @return A set of {@link PartitionEvent} that was received. The iterable contains up to {@code * maximumMessageCount} events. * * @throws IllegalArgumentException if {@code maximumMessageCount} is less than 1. */ public IterableStream<PartitionEvent> receive(int maximumMessageCount) { return receive(maximumMessageCount, timeout); } /** * Receives a batch of {@link PartitionEvent events} from all partitions. The batch is returned after * {@code maximumMessageCount} events are received or after {@code maximumWaitTime} has elapsed. * * @param maximumMessageCount The maximum number of messages to receive in this batch. * @param maximumWaitTime The maximum amount of time to wait to build up the requested message count for the * batch; if not specified, the default wait time specified when the consumer was created will be used. * * @return A set of {@link PartitionEvent} that was received. The iterable contains up to * {@code maximumMessageCount} events. * * @throws NullPointerException if {@code maximumWaitTime} is null. * @throws IllegalArgumentException if {@code maximumMessageCount} is less than 1 or {@code maximumWaitTime} is * zero or a negative duration. */ /** * Given an {@code emitter}, queues that work in {@link SynchronousEventSubscriber}. If the synchronous job has not * been created, will initialise it. */ private void queueWork(String partitionId, int maximumMessageCount, Duration maximumWaitTime, FluxSink<PartitionEvent> emitter) { final long id = idGenerator.getAndIncrement(); final SynchronousReceiveWork work = new SynchronousReceiveWork(id, maximumMessageCount, maximumWaitTime, emitter); final SynchronousEventSubscriber subscriber = openSubscribers.computeIfAbsent(partitionId, key -> { SynchronousEventSubscriber syncSubscriber = new SynchronousEventSubscriber(); if (RECEIVE_ALL_KEY.equals(key)) { logger.info("Started synchronous event subscriber for all partitions"); consumer.receive().subscribeWith(syncSubscriber); } else { logger.info("Started synchronous event subscriber for partition '{}'.", key); consumer.receive(key).subscribeWith(syncSubscriber); } return syncSubscriber; }); logger.info("Queueing work item in SynchronousEventSubscriber."); subscriber.queueReceiveWork(work); } /** * {@inheritDoc} */ @Override public void close() { consumer.close(); } }
class EventHubConsumerClient implements Closeable { private static final String RECEIVE_ALL_KEY = "receive-all"; private final ClientLogger logger = new ClientLogger(EventHubConsumerClient.class); private final ConcurrentHashMap<String, SynchronousEventSubscriber> openSubscribers = new ConcurrentHashMap<>(); private final AtomicLong idGenerator = new AtomicLong(); private final EventHubConsumerAsyncClient consumer; private final Duration timeout; EventHubConsumerClient(EventHubConsumerAsyncClient consumer, Duration tryTimeout) { Objects.requireNonNull(tryTimeout, "'tryTimeout' cannot be null."); this.consumer = Objects.requireNonNull(consumer, "'consumer' cannot be null."); this.timeout = tryTimeout; } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with */ public String getFullyQualifiedNamespace() { return consumer.getFullyQualifiedNamespace(); } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return consumer.getEventHubName(); } /** * Gets the position of the event in the partition where the consumer should begin reading. * * @return The position of the event in the partition where the consumer should begin reading. */ public EventPosition getStartingPosition() { return consumer.getStartingPosition(); } /** * Gets the consumer group this consumer is reading events as a part of. * * @return The consumer group this consumer is reading events as a part of. */ public String getConsumerGroup() { return consumer.getConsumerGroup(); } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ public EventHubProperties getProperties() { return consumer.getProperties().block(timeout); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public IterableStream<String> getPartitionIds() { return new IterableStream<>(consumer.getPartitionIds()); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * * @return The set of information for the requested partition under the Event Hub this client is associated with. */ public PartitionProperties getPartitionProperties(String partitionId) { return consumer.getPartitionProperties(partitionId).block(timeout); } /** * Receives a batch of {@link PartitionEvent events} from the Event Hub partition. * * @param maximumMessageCount The maximum number of messages to receive in this batch. * @param partitionId Identifier of the partition to read events from. * * @return A set of {@link PartitionEvent} that was received. The iterable contains up to * {@code maximumMessageCount} events. If a stream for the events was opened before, the same position within * that partition is returned. Otherwise, events are read starting from {@link * * @throws IllegalArgumentException if {@code maximumMessageCount} is less than 1. */ public IterableStream<PartitionEvent> receive(String partitionId, int maximumMessageCount) { return receive(partitionId, maximumMessageCount, timeout); } /** * Receives a batch of {@link PartitionEvent events} from the Event Hub partition. * * @param partitionId Identifier of the partition to read events from. * @param maximumMessageCount The maximum number of messages to receive in this batch. * @param maximumWaitTime The maximum amount of time to wait to build up the requested message count for the * batch; if not specified, the default wait time specified when the consumer was created will be used. * * @return A set of {@link PartitionEvent} that was received. The iterable contains up to * {@code maximumMessageCount} events. * * @throws NullPointerException if {@code maximumWaitTime} is null. * @throws IllegalArgumentException if {@code maximumMessageCount} is less than 1 or {@code maximumWaitTime} is * zero or a negative duration. */ public IterableStream<PartitionEvent> receive(String partitionId, int maximumMessageCount, Duration maximumWaitTime) { Objects.requireNonNull(maximumWaitTime, "'maximumWaitTime' cannot be null."); if (maximumMessageCount < 1) { throw logger.logExceptionAsError( new IllegalArgumentException("'maximumMessageCount' cannot be less than 1.")); } else if (maximumWaitTime.isNegative() || maximumWaitTime.isZero()) { throw logger.logExceptionAsError( new IllegalArgumentException("'maximumWaitTime' cannot be zero or less.")); } final Flux<PartitionEvent> events = Flux.create(emitter -> { queueWork(partitionId, maximumMessageCount, maximumWaitTime, emitter); }); return new IterableStream<>(events); } /** * Receives a batch of {@link PartitionEvent events} from all partitions. The batch is returned after * {@code maximumMessageCount} events are received or after the * {@link RetryOptions * * @param maximumMessageCount The maximum number of messages to receive in this batch. * * @return A set of {@link PartitionEvent} that was received. The iterable contains up to * {@code maximumMessageCount} events. * * @throws IllegalArgumentException if {@code maximumMessageCount} is less than 1. */ public IterableStream<PartitionEvent> receive(int maximumMessageCount) { return receive(maximumMessageCount, timeout); } /** * Receives a batch of {@link PartitionEvent events} from all partitions. The batch is returned after * {@code maximumMessageCount} events are received or after {@code maximumWaitTime} has elapsed. * * @param maximumMessageCount The maximum number of messages to receive in this batch. * @param maximumWaitTime The maximum amount of time to wait to build up the requested message count for the * batch; if not specified, the default wait time specified when the consumer was created will be used. * * @return A set of {@link PartitionEvent} that was received. The iterable contains up to * {@code maximumMessageCount} events. * * @throws NullPointerException if {@code maximumWaitTime} is null. * @throws IllegalArgumentException if {@code maximumMessageCount} is less than 1 or {@code maximumWaitTime} is * zero or a negative duration. */ /** * Given an {@code emitter}, queues that work in {@link SynchronousEventSubscriber}. If the synchronous job has not * been created, will initialise it. */ private void queueWork(String partitionId, int maximumMessageCount, Duration maximumWaitTime, FluxSink<PartitionEvent> emitter) { final long id = idGenerator.getAndIncrement(); final SynchronousReceiveWork work = new SynchronousReceiveWork(id, maximumMessageCount, maximumWaitTime, emitter); final SynchronousEventSubscriber subscriber = openSubscribers.computeIfAbsent(partitionId, key -> { SynchronousEventSubscriber syncSubscriber = new SynchronousEventSubscriber(); if (RECEIVE_ALL_KEY.equals(key)) { logger.info("Started synchronous event subscriber for all partitions"); consumer.receive().subscribeWith(syncSubscriber); } else { logger.info("Started synchronous event subscriber for partition '{}'.", key); consumer.receive(key).subscribeWith(syncSubscriber); } return syncSubscriber; }); logger.info("Queueing work item in SynchronousEventSubscriber."); subscriber.queueReceiveWork(work); } /** * {@inheritDoc} */ @Override public void close() { consumer.close(); } }
The `map` operation can be removed in `receive(String partitionId, int maximumMessageCount, Duration maximumWaitTime)` overload as well.
public IterableStream<PartitionEvent> receive(int maximumMessageCount, Duration maximumWaitTime) { Objects.requireNonNull(maximumWaitTime, "'maximumWaitTime' cannot be null."); if (maximumMessageCount < 1) { throw logger.logExceptionAsError( new IllegalArgumentException("'maximumMessageCount' cannot be less than 1.")); } else if (maximumWaitTime.isNegative() || maximumWaitTime.isZero()) { throw logger.logExceptionAsError( new IllegalArgumentException("'maximumWaitTime' cannot be zero or less.")); } final Flux<PartitionEvent> events = Flux.create(emitter -> { queueWork(RECEIVE_ALL_KEY, maximumMessageCount, maximumWaitTime, emitter); }); return new IterableStream<>(events); }
final Flux<PartitionEvent> events = Flux.create(emitter -> {
public IterableStream<PartitionEvent> receive(int maximumMessageCount, Duration maximumWaitTime) { Objects.requireNonNull(maximumWaitTime, "'maximumWaitTime' cannot be null."); if (maximumMessageCount < 1) { throw logger.logExceptionAsError( new IllegalArgumentException("'maximumMessageCount' cannot be less than 1.")); } else if (maximumWaitTime.isNegative() || maximumWaitTime.isZero()) { throw logger.logExceptionAsError( new IllegalArgumentException("'maximumWaitTime' cannot be zero or less.")); } final Flux<PartitionEvent> events = Flux.create(emitter -> { queueWork(RECEIVE_ALL_KEY, maximumMessageCount, maximumWaitTime, emitter); }); return new IterableStream<>(events); }
class EventHubConsumerClient implements Closeable { private static final String RECEIVE_ALL_KEY = "receive-all"; private final ClientLogger logger = new ClientLogger(EventHubConsumerClient.class); private final ConcurrentHashMap<String, SynchronousEventSubscriber> openSubscribers = new ConcurrentHashMap<>(); private final AtomicLong idGenerator = new AtomicLong(); private final EventHubConsumerAsyncClient consumer; private final Duration timeout; EventHubConsumerClient(EventHubConsumerAsyncClient consumer, Duration tryTimeout) { Objects.requireNonNull(tryTimeout, "'tryTimeout' cannot be null."); this.consumer = Objects.requireNonNull(consumer, "'consumer' cannot be null."); this.timeout = tryTimeout; } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with */ public String getFullyQualifiedNamespace() { return consumer.getFullyQualifiedNamespace(); } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return consumer.getEventHubName(); } /** * Gets the position of the event in the partition where the consumer should begin reading. * * @return The position of the event in the partition where the consumer should begin reading. */ public EventPosition getStartingPosition() { return consumer.getStartingPosition(); } /** * Gets the consumer group this consumer is reading events as a part of. * * @return The consumer group this consumer is reading events as a part of. */ public String getConsumerGroup() { return consumer.getConsumerGroup(); } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ public EventHubProperties getProperties() { return consumer.getProperties().block(timeout); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public IterableStream<String> getPartitionIds() { return new IterableStream<>(consumer.getPartitionIds()); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * * @return The set of information for the requested partition under the Event Hub this client is associated with. */ public PartitionProperties getPartitionProperties(String partitionId) { return consumer.getPartitionProperties(partitionId).block(timeout); } /** * Receives a batch of {@link PartitionEvent events} from the Event Hub partition. * * @param maximumMessageCount The maximum number of messages to receive in this batch. * @param partitionId Identifier of the partition to read events from. * * @return A set of {@link PartitionEvent} that was received. The iterable contains up to * {@code maximumMessageCount} events. If a stream for the events was opened before, the same position within * that partition is returned. Otherwise, events are read starting from {@link * * @throws IllegalArgumentException if {@code maximumMessageCount} is less than 1. */ public IterableStream<PartitionEvent> receive(String partitionId, int maximumMessageCount) { return receive(partitionId, maximumMessageCount, timeout); } /** * Receives a batch of {@link PartitionEvent events} from the Event Hub partition. * * @param partitionId Identifier of the partition to read events from. * @param maximumMessageCount The maximum number of messages to receive in this batch. * @param maximumWaitTime The maximum amount of time to wait to build up the requested message count for the * batch; if not specified, the default wait time specified when the consumer was created will be used. * * @return A set of {@link PartitionEvent} that was received. The iterable contains up to * {@code maximumMessageCount} events. * * @throws NullPointerException if {@code maximumWaitTime} is null. * @throws IllegalArgumentException if {@code maximumMessageCount} is less than 1 or {@code maximumWaitTime} is * zero or a negative duration. */ public IterableStream<PartitionEvent> receive(String partitionId, int maximumMessageCount, Duration maximumWaitTime) { Objects.requireNonNull(maximumWaitTime, "'maximumWaitTime' cannot be null."); if (maximumMessageCount < 1) { throw logger.logExceptionAsError( new IllegalArgumentException("'maximumMessageCount' cannot be less than 1.")); } else if (maximumWaitTime.isNegative() || maximumWaitTime.isZero()) { throw logger.logExceptionAsError( new IllegalArgumentException("'maximumWaitTime' cannot be zero or less.")); } final Flux<PartitionEvent> events = Flux.create(emitter -> { queueWork(partitionId, maximumMessageCount, maximumWaitTime, emitter); }); final Flux<PartitionEvent> map = events.collectList().map(x -> { logger.info("Number of events received: {}", x.size()); return Flux.fromIterable(x); }).block(); return new IterableStream<>(map); } /** * Receives a batch of {@link PartitionEvent events} from all partitions. The batch is returned after * {@code maximumMessageCount} events are received or after the * {@link RetryOptions * * @param maximumMessageCount The maximum number of messages to receive in this batch. * * @return A set of {@link PartitionEvent} that was received. The iterable contains up to * {@code maximumMessageCount} events. * * @throws IllegalArgumentException if {@code maximumMessageCount} is less than 1. */ public IterableStream<PartitionEvent> receive(int maximumMessageCount) { return receive(maximumMessageCount, timeout); } /** * Receives a batch of {@link PartitionEvent events} from all partitions. The batch is returned after * {@code maximumMessageCount} events are received or after {@code maximumWaitTime} has elapsed. * * @param maximumMessageCount The maximum number of messages to receive in this batch. * @param maximumWaitTime The maximum amount of time to wait to build up the requested message count for the * batch; if not specified, the default wait time specified when the consumer was created will be used. * * @return A set of {@link PartitionEvent} that was received. The iterable contains up to * {@code maximumMessageCount} events. * * @throws NullPointerException if {@code maximumWaitTime} is null. * @throws IllegalArgumentException if {@code maximumMessageCount} is less than 1 or {@code maximumWaitTime} is * zero or a negative duration. */ /** * Given an {@code emitter}, queues that work in {@link SynchronousEventSubscriber}. If the synchronous job has not * been created, will initialise it. */ private void queueWork(String partitionId, int maximumMessageCount, Duration maximumWaitTime, FluxSink<PartitionEvent> emitter) { final long id = idGenerator.getAndIncrement(); final SynchronousReceiveWork work = new SynchronousReceiveWork(id, maximumMessageCount, maximumWaitTime, emitter); final SynchronousEventSubscriber subscriber = openSubscribers.computeIfAbsent(partitionId, key -> { SynchronousEventSubscriber syncSubscriber = new SynchronousEventSubscriber(); if (RECEIVE_ALL_KEY.equals(key)) { logger.info("Started synchronous event subscriber for all partitions"); consumer.receive().subscribeWith(syncSubscriber); } else { logger.info("Started synchronous event subscriber for partition '{}'.", key); consumer.receive(key).subscribeWith(syncSubscriber); } return syncSubscriber; }); logger.info("Queueing work item in SynchronousEventSubscriber."); subscriber.queueReceiveWork(work); } /** * {@inheritDoc} */ @Override public void close() { consumer.close(); } }
class EventHubConsumerClient implements Closeable { private static final String RECEIVE_ALL_KEY = "receive-all"; private final ClientLogger logger = new ClientLogger(EventHubConsumerClient.class); private final ConcurrentHashMap<String, SynchronousEventSubscriber> openSubscribers = new ConcurrentHashMap<>(); private final AtomicLong idGenerator = new AtomicLong(); private final EventHubConsumerAsyncClient consumer; private final Duration timeout; EventHubConsumerClient(EventHubConsumerAsyncClient consumer, Duration tryTimeout) { Objects.requireNonNull(tryTimeout, "'tryTimeout' cannot be null."); this.consumer = Objects.requireNonNull(consumer, "'consumer' cannot be null."); this.timeout = tryTimeout; } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with */ public String getFullyQualifiedNamespace() { return consumer.getFullyQualifiedNamespace(); } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return consumer.getEventHubName(); } /** * Gets the position of the event in the partition where the consumer should begin reading. * * @return The position of the event in the partition where the consumer should begin reading. */ public EventPosition getStartingPosition() { return consumer.getStartingPosition(); } /** * Gets the consumer group this consumer is reading events as a part of. * * @return The consumer group this consumer is reading events as a part of. */ public String getConsumerGroup() { return consumer.getConsumerGroup(); } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ public EventHubProperties getProperties() { return consumer.getProperties().block(timeout); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public IterableStream<String> getPartitionIds() { return new IterableStream<>(consumer.getPartitionIds()); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * * @return The set of information for the requested partition under the Event Hub this client is associated with. */ public PartitionProperties getPartitionProperties(String partitionId) { return consumer.getPartitionProperties(partitionId).block(timeout); } /** * Receives a batch of {@link PartitionEvent events} from the Event Hub partition. * * @param maximumMessageCount The maximum number of messages to receive in this batch. * @param partitionId Identifier of the partition to read events from. * * @return A set of {@link PartitionEvent} that was received. The iterable contains up to * {@code maximumMessageCount} events. If a stream for the events was opened before, the same position within * that partition is returned. Otherwise, events are read starting from {@link * * @throws IllegalArgumentException if {@code maximumMessageCount} is less than 1. */ public IterableStream<PartitionEvent> receive(String partitionId, int maximumMessageCount) { return receive(partitionId, maximumMessageCount, timeout); } /** * Receives a batch of {@link PartitionEvent events} from the Event Hub partition. * * @param partitionId Identifier of the partition to read events from. * @param maximumMessageCount The maximum number of messages to receive in this batch. * @param maximumWaitTime The maximum amount of time to wait to build up the requested message count for the * batch; if not specified, the default wait time specified when the consumer was created will be used. * * @return A set of {@link PartitionEvent} that was received. The iterable contains up to * {@code maximumMessageCount} events. * * @throws NullPointerException if {@code maximumWaitTime} is null. * @throws IllegalArgumentException if {@code maximumMessageCount} is less than 1 or {@code maximumWaitTime} is * zero or a negative duration. */ public IterableStream<PartitionEvent> receive(String partitionId, int maximumMessageCount, Duration maximumWaitTime) { Objects.requireNonNull(maximumWaitTime, "'maximumWaitTime' cannot be null."); if (maximumMessageCount < 1) { throw logger.logExceptionAsError( new IllegalArgumentException("'maximumMessageCount' cannot be less than 1.")); } else if (maximumWaitTime.isNegative() || maximumWaitTime.isZero()) { throw logger.logExceptionAsError( new IllegalArgumentException("'maximumWaitTime' cannot be zero or less.")); } final Flux<PartitionEvent> events = Flux.create(emitter -> { queueWork(partitionId, maximumMessageCount, maximumWaitTime, emitter); }); return new IterableStream<>(events); } /** * Receives a batch of {@link PartitionEvent events} from all partitions. The batch is returned after * {@code maximumMessageCount} events are received or after the * {@link RetryOptions * * @param maximumMessageCount The maximum number of messages to receive in this batch. * * @return A set of {@link PartitionEvent} that was received. The iterable contains up to * {@code maximumMessageCount} events. * * @throws IllegalArgumentException if {@code maximumMessageCount} is less than 1. */ public IterableStream<PartitionEvent> receive(int maximumMessageCount) { return receive(maximumMessageCount, timeout); } /** * Receives a batch of {@link PartitionEvent events} from all partitions. The batch is returned after * {@code maximumMessageCount} events are received or after {@code maximumWaitTime} has elapsed. * * @param maximumMessageCount The maximum number of messages to receive in this batch. * @param maximumWaitTime The maximum amount of time to wait to build up the requested message count for the * batch; if not specified, the default wait time specified when the consumer was created will be used. * * @return A set of {@link PartitionEvent} that was received. The iterable contains up to * {@code maximumMessageCount} events. * * @throws NullPointerException if {@code maximumWaitTime} is null. * @throws IllegalArgumentException if {@code maximumMessageCount} is less than 1 or {@code maximumWaitTime} is * zero or a negative duration. */ /** * Given an {@code emitter}, queues that work in {@link SynchronousEventSubscriber}. If the synchronous job has not * been created, will initialise it. */ private void queueWork(String partitionId, int maximumMessageCount, Duration maximumWaitTime, FluxSink<PartitionEvent> emitter) { final long id = idGenerator.getAndIncrement(); final SynchronousReceiveWork work = new SynchronousReceiveWork(id, maximumMessageCount, maximumWaitTime, emitter); final SynchronousEventSubscriber subscriber = openSubscribers.computeIfAbsent(partitionId, key -> { SynchronousEventSubscriber syncSubscriber = new SynchronousEventSubscriber(); if (RECEIVE_ALL_KEY.equals(key)) { logger.info("Started synchronous event subscriber for all partitions"); consumer.receive().subscribeWith(syncSubscriber); } else { logger.info("Started synchronous event subscriber for partition '{}'.", key); consumer.receive(key).subscribeWith(syncSubscriber); } return syncSubscriber; }); logger.info("Queueing work item in SynchronousEventSubscriber."); subscriber.queueReceiveWork(work); } /** * {@inheritDoc} */ @Override public void close() { consumer.close(); } }
Why do we set this to `null` then pass it into the builder?
public void setup() throws InvalidKeyException, NoSuchAlgorithmException { if (interceptorManager.isPlaybackMode()) { connectionString = "Endpoint=http: tokenCredential = null; String endpoint = new ConfigurationClientCredentials(connectionString).getBaseUri(); client = new ConfigurationClientBuilder() .credential(tokenCredential) .endpoint(endpoint) .httpClient(interceptorManager.getPlaybackClient()) .buildClient(); } else { connectionString = Configuration.getGlobalConfiguration().get(AZURE_APPCONFIG_CONNECTION_STRING); tokenCredential = new DefaultAzureCredentialBuilder().build(); String endpoint = new ConfigurationClientCredentials(connectionString).getBaseUri(); client = new ConfigurationClientBuilder() .credential(tokenCredential) .endpoint(endpoint) .addPolicy(interceptorManager.getRecordPolicy()) .buildClient(); } }
tokenCredential = null;
public void setup() throws InvalidKeyException, NoSuchAlgorithmException { if (interceptorManager.isPlaybackMode()) { connectionString = "Endpoint=http: String endpoint = new ConfigurationClientCredentials(connectionString).getBaseUri(); client = new ConfigurationClientBuilder() .connectionString(connectionString) .endpoint(endpoint) .httpClient(interceptorManager.getPlaybackClient()) .buildClient(); } else { connectionString = Configuration.getGlobalConfiguration().get(AZURE_APPCONFIG_CONNECTION_STRING); tokenCredential = new DefaultAzureCredentialBuilder().build(); String endpoint = new ConfigurationClientCredentials(connectionString).getBaseUri(); client = new ConfigurationClientBuilder() .credential(tokenCredential) .endpoint(endpoint) .addPolicy(interceptorManager.getRecordPolicy()) .buildClient(); } }
class AadCredentialTest extends TestBase { private static ConfigurationClient client; private static final String AZURE_APPCONFIG_CONNECTION_STRING = "AZURE_APPCONFIG_CONNECTION_STRING"; static String connectionString; static TokenCredential tokenCredential; @BeforeEach @Test public void aadAuthenticationAzConfigClient() { final String key = "newKey"; final String value = "newValue"; ConfigurationSetting addedSetting = client.setConfigurationSetting(key, null, value); Assertions.assertEquals(addedSetting.getKey(), key); Assertions.assertEquals(addedSetting.getValue(), value); } }
class AadCredentialTest extends TestBase { private static ConfigurationClient client; private static final String AZURE_APPCONFIG_CONNECTION_STRING = "AZURE_APPCONFIG_CONNECTION_STRING"; static String connectionString; static TokenCredential tokenCredential; @BeforeEach @Test public void aadAuthenticationAzConfigClient() { final String key = "newKey"; final String value = "newValue"; ConfigurationSetting addedSetting = client.setConfigurationSetting(key, null, value); Assertions.assertEquals(addedSetting.getKey(), key); Assertions.assertEquals(addedSetting.getValue(), value); } }
Same comment as the README, let's talk about accessing the portal when discussing getting endpoint for TokenCredential, at least make it more prominent and talk about it before connection strings.
public static void main(String[] args) { String endpoint = "{endpoint_value}"; TokenCredential tokenCredential = null; final ConfigurationClient client = new ConfigurationClientBuilder() .credential(tokenCredential) .endpoint(endpoint) .buildClient(); final String key = "hello"; final String value = "world"; System.out.println("Beginning of synchronous sample..."); ConfigurationSetting setting = client.setConfigurationSetting(key, null, value); System.out.printf(String.format("[SetConfigurationSetting] Key: %s, Value: %s", setting.getKey(), setting.getValue())); setting = client.getConfigurationSetting(key, null, null); System.out.printf(String.format("[GetConfigurationSetting] Key: %s, Value: %s", setting.getKey(), setting.getValue())); setting = client.deleteConfigurationSetting(key, null); System.out.printf(String.format("[DeleteConfigurationSetting] Key: %s, Value: %s", setting.getKey(), setting.getValue())); System.out.println("End of synchronous sample."); }
public static void main(String[] args) { String endpoint = "{endpoint_value}"; TokenCredential tokenCredential = new DefaultAzureCredentialBuilder().build(); final ConfigurationClient client = new ConfigurationClientBuilder() .credential(tokenCredential) .endpoint(endpoint) .buildClient(); final String key = "hello"; final String value = "world"; System.out.println("Beginning of synchronous sample..."); ConfigurationSetting setting = client.setConfigurationSetting(key, null, value); System.out.printf(String.format("[SetConfigurationSetting] Key: %s, Value: %s", setting.getKey(), setting.getValue())); setting = client.getConfigurationSetting(key, null, null); System.out.printf(String.format("[GetConfigurationSetting] Key: %s, Value: %s", setting.getKey(), setting.getValue())); setting = client.deleteConfigurationSetting(key, null); System.out.printf(String.format("[DeleteConfigurationSetting] Key: %s, Value: %s", setting.getKey(), setting.getValue())); System.out.println("End of synchronous sample."); }
class AadAuthentication { /** * Sample for how to use AAD token Authentication. * * @param args Unused. Arguments to the program. */ }
class AadAuthentication { /** * Sample for how to use AAD token Authentication. * * @param args Unused. Arguments to the program. */ }
Let's use a mutli-line comment for this
public static void main(String[] args) { String endpoint = "{endpoint_value}"; TokenCredential tokenCredential = null; final ConfigurationClient client = new ConfigurationClientBuilder() .credential(tokenCredential) .endpoint(endpoint) .buildClient(); final String key = "hello"; final String value = "world"; System.out.println("Beginning of synchronous sample..."); ConfigurationSetting setting = client.setConfigurationSetting(key, null, value); System.out.printf(String.format("[SetConfigurationSetting] Key: %s, Value: %s", setting.getKey(), setting.getValue())); setting = client.getConfigurationSetting(key, null, null); System.out.printf(String.format("[GetConfigurationSetting] Key: %s, Value: %s", setting.getKey(), setting.getValue())); setting = client.deleteConfigurationSetting(key, null); System.out.printf(String.format("[DeleteConfigurationSetting] Key: %s, Value: %s", setting.getKey(), setting.getValue())); System.out.println("End of synchronous sample."); }
public static void main(String[] args) { String endpoint = "{endpoint_value}"; TokenCredential tokenCredential = new DefaultAzureCredentialBuilder().build(); final ConfigurationClient client = new ConfigurationClientBuilder() .credential(tokenCredential) .endpoint(endpoint) .buildClient(); final String key = "hello"; final String value = "world"; System.out.println("Beginning of synchronous sample..."); ConfigurationSetting setting = client.setConfigurationSetting(key, null, value); System.out.printf(String.format("[SetConfigurationSetting] Key: %s, Value: %s", setting.getKey(), setting.getValue())); setting = client.getConfigurationSetting(key, null, null); System.out.printf(String.format("[GetConfigurationSetting] Key: %s, Value: %s", setting.getKey(), setting.getValue())); setting = client.deleteConfigurationSetting(key, null); System.out.printf(String.format("[DeleteConfigurationSetting] Key: %s, Value: %s", setting.getKey(), setting.getValue())); System.out.println("End of synchronous sample."); }
class AadAuthentication { /** * Sample for how to use AAD token Authentication. * * @param args Unused. Arguments to the program. */ }
class AadAuthentication { /** * Sample for how to use AAD token Authentication. * * @param args Unused. Arguments to the program. */ }
We are mocking the tokenCredentail which is not use anyway. So I think null value will be sufficient
public void setup() throws InvalidKeyException, NoSuchAlgorithmException { if (interceptorManager.isPlaybackMode()) { connectionString = "Endpoint=http: tokenCredential = null; String endpoint = new ConfigurationClientCredentials(connectionString).getBaseUri(); client = new ConfigurationClientBuilder() .credential(tokenCredential) .endpoint(endpoint) .httpClient(interceptorManager.getPlaybackClient()) .buildClient(); } else { connectionString = Configuration.getGlobalConfiguration().get(AZURE_APPCONFIG_CONNECTION_STRING); tokenCredential = new DefaultAzureCredentialBuilder().build(); String endpoint = new ConfigurationClientCredentials(connectionString).getBaseUri(); client = new ConfigurationClientBuilder() .credential(tokenCredential) .endpoint(endpoint) .addPolicy(interceptorManager.getRecordPolicy()) .buildClient(); } }
tokenCredential = null;
public void setup() throws InvalidKeyException, NoSuchAlgorithmException { if (interceptorManager.isPlaybackMode()) { connectionString = "Endpoint=http: String endpoint = new ConfigurationClientCredentials(connectionString).getBaseUri(); client = new ConfigurationClientBuilder() .connectionString(connectionString) .endpoint(endpoint) .httpClient(interceptorManager.getPlaybackClient()) .buildClient(); } else { connectionString = Configuration.getGlobalConfiguration().get(AZURE_APPCONFIG_CONNECTION_STRING); tokenCredential = new DefaultAzureCredentialBuilder().build(); String endpoint = new ConfigurationClientCredentials(connectionString).getBaseUri(); client = new ConfigurationClientBuilder() .credential(tokenCredential) .endpoint(endpoint) .addPolicy(interceptorManager.getRecordPolicy()) .buildClient(); } }
class AadCredentialTest extends TestBase { private static ConfigurationClient client; private static final String AZURE_APPCONFIG_CONNECTION_STRING = "AZURE_APPCONFIG_CONNECTION_STRING"; static String connectionString; static TokenCredential tokenCredential; @BeforeEach @Test public void aadAuthenticationAzConfigClient() { final String key = "newKey"; final String value = "newValue"; ConfigurationSetting addedSetting = client.setConfigurationSetting(key, null, value); Assertions.assertEquals(addedSetting.getKey(), key); Assertions.assertEquals(addedSetting.getValue(), value); } }
class AadCredentialTest extends TestBase { private static ConfigurationClient client; private static final String AZURE_APPCONFIG_CONNECTION_STRING = "AZURE_APPCONFIG_CONNECTION_STRING"; static String connectionString; static TokenCredential tokenCredential; @BeforeEach @Test public void aadAuthenticationAzConfigClient() { final String key = "newKey"; final String value = "newValue"; ConfigurationSetting addedSetting = client.setConfigurationSetting(key, null, value); Assertions.assertEquals(addedSetting.getKey(), key); Assertions.assertEquals(addedSetting.getValue(), value); } }
Gotcha, didn't notice that this was a sample.
public void setup() throws InvalidKeyException, NoSuchAlgorithmException { if (interceptorManager.isPlaybackMode()) { connectionString = "Endpoint=http: tokenCredential = null; String endpoint = new ConfigurationClientCredentials(connectionString).getBaseUri(); client = new ConfigurationClientBuilder() .credential(tokenCredential) .endpoint(endpoint) .httpClient(interceptorManager.getPlaybackClient()) .buildClient(); } else { connectionString = Configuration.getGlobalConfiguration().get(AZURE_APPCONFIG_CONNECTION_STRING); tokenCredential = new DefaultAzureCredentialBuilder().build(); String endpoint = new ConfigurationClientCredentials(connectionString).getBaseUri(); client = new ConfigurationClientBuilder() .credential(tokenCredential) .endpoint(endpoint) .addPolicy(interceptorManager.getRecordPolicy()) .buildClient(); } }
tokenCredential = null;
public void setup() throws InvalidKeyException, NoSuchAlgorithmException { if (interceptorManager.isPlaybackMode()) { connectionString = "Endpoint=http: String endpoint = new ConfigurationClientCredentials(connectionString).getBaseUri(); client = new ConfigurationClientBuilder() .connectionString(connectionString) .endpoint(endpoint) .httpClient(interceptorManager.getPlaybackClient()) .buildClient(); } else { connectionString = Configuration.getGlobalConfiguration().get(AZURE_APPCONFIG_CONNECTION_STRING); tokenCredential = new DefaultAzureCredentialBuilder().build(); String endpoint = new ConfigurationClientCredentials(connectionString).getBaseUri(); client = new ConfigurationClientBuilder() .credential(tokenCredential) .endpoint(endpoint) .addPolicy(interceptorManager.getRecordPolicy()) .buildClient(); } }
class AadCredentialTest extends TestBase { private static ConfigurationClient client; private static final String AZURE_APPCONFIG_CONNECTION_STRING = "AZURE_APPCONFIG_CONNECTION_STRING"; static String connectionString; static TokenCredential tokenCredential; @BeforeEach @Test public void aadAuthenticationAzConfigClient() { final String key = "newKey"; final String value = "newValue"; ConfigurationSetting addedSetting = client.setConfigurationSetting(key, null, value); Assertions.assertEquals(addedSetting.getKey(), key); Assertions.assertEquals(addedSetting.getValue(), value); } }
class AadCredentialTest extends TestBase { private static ConfigurationClient client; private static final String AZURE_APPCONFIG_CONNECTION_STRING = "AZURE_APPCONFIG_CONNECTION_STRING"; static String connectionString; static TokenCredential tokenCredential; @BeforeEach @Test public void aadAuthenticationAzConfigClient() { final String key = "newKey"; final String value = "newValue"; ConfigurationSetting addedSetting = client.setConfigurationSetting(key, null, value); Assertions.assertEquals(addedSetting.getKey(), key); Assertions.assertEquals(addedSetting.getValue(), value); } }
```suggestion // In playback mode use connection string because CI environment doesn't set up to support AAD ```
public void setup() throws InvalidKeyException, NoSuchAlgorithmException { if (interceptorManager.isPlaybackMode()) { connectionString = "Endpoint=http: String endpoint = new ConfigurationClientCredentials(connectionString).getBaseUri(); client = new ConfigurationClientBuilder() .connectionString(connectionString) .endpoint(endpoint) .httpClient(interceptorManager.getPlaybackClient()) .buildClient(); } else { connectionString = Configuration.getGlobalConfiguration().get(AZURE_APPCONFIG_CONNECTION_STRING); tokenCredential = new DefaultAzureCredentialBuilder().build(); String endpoint = new ConfigurationClientCredentials(connectionString).getBaseUri(); client = new ConfigurationClientBuilder() .credential(tokenCredential) .endpoint(endpoint) .addPolicy(interceptorManager.getRecordPolicy()) .buildClient(); } }
public void setup() throws InvalidKeyException, NoSuchAlgorithmException { if (interceptorManager.isPlaybackMode()) { connectionString = "Endpoint=http: String endpoint = new ConfigurationClientCredentials(connectionString).getBaseUri(); client = new ConfigurationClientBuilder() .connectionString(connectionString) .endpoint(endpoint) .httpClient(interceptorManager.getPlaybackClient()) .buildClient(); } else { connectionString = Configuration.getGlobalConfiguration().get(AZURE_APPCONFIG_CONNECTION_STRING); tokenCredential = new DefaultAzureCredentialBuilder().build(); String endpoint = new ConfigurationClientCredentials(connectionString).getBaseUri(); client = new ConfigurationClientBuilder() .credential(tokenCredential) .endpoint(endpoint) .addPolicy(interceptorManager.getRecordPolicy()) .buildClient(); } }
class AadCredentialTest extends TestBase { private static ConfigurationClient client; private static final String AZURE_APPCONFIG_CONNECTION_STRING = "AZURE_APPCONFIG_CONNECTION_STRING"; static String connectionString; static TokenCredential tokenCredential; @BeforeEach @Test public void aadAuthenticationAzConfigClient() { final String key = "newKey"; final String value = "newValue"; ConfigurationSetting addedSetting = client.setConfigurationSetting(key, null, value); Assertions.assertEquals(addedSetting.getKey(), key); Assertions.assertEquals(addedSetting.getValue(), value); } }
class AadCredentialTest extends TestBase { private static ConfigurationClient client; private static final String AZURE_APPCONFIG_CONNECTION_STRING = "AZURE_APPCONFIG_CONNECTION_STRING"; static String connectionString; static TokenCredential tokenCredential; @BeforeEach @Test public void aadAuthenticationAzConfigClient() { final String key = "newKey"; final String value = "newValue"; ConfigurationSetting addedSetting = client.setConfigurationSetting(key, null, value); Assertions.assertEquals(addedSetting.getKey(), key); Assertions.assertEquals(addedSetting.getValue(), value); } }
This should be `connection.getFullyQualifiedNamespace()`
String getFullyQualifiedNamespace() { return connection.getFullyQualifiedDomainName(); }
return connection.getFullyQualifiedDomainName();
String getFullyQualifiedNamespace() { return connection.getFullyQualifiedNamespace(); }
class EventHubAsyncClient implements Closeable { private final ClientLogger logger = new ClientLogger(EventHubAsyncClient.class); private final MessageSerializer messageSerializer; private final EventHubConnection connection; private final boolean isSharedConnection; private final EventHubConsumerOptions defaultConsumerOptions; private final TracerProvider tracerProvider; EventHubAsyncClient(EventHubConnection connection, TracerProvider tracerProvider, MessageSerializer messageSerializer, boolean isSharedConnection) { this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.connection = Objects.requireNonNull(connection, "'connection' cannot be null."); this.isSharedConnection = isSharedConnection; this.defaultConsumerOptions = new EventHubConsumerOptions(); } /** * Gets the fully qualified namespace of this Event Hub. * * @return The fully qualified namespace of this Event Hub. */ /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ String getEventHubName() { return connection.getEventHubName(); } /** * Gets information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ Mono<EventHubProperties> getProperties() { return connection.getManagementNode().flatMap(EventHubManagementNode::getEventHubProperties); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ Flux<String> getPartitionIds() { return getProperties().flatMapMany(properties -> Flux.fromArray(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * @return The set of information for the requested partition under the Event Hub this client is associated with. */ Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connection.getManagementNode().flatMap(node -> node.getPartitionProperties(partitionId)); } /** * Creates an Event Hub producer responsible for transmitting {@link EventData} to the Event Hub, grouped together * in batches. * * @return A new {@link EventHubProducerAsyncClient}. */ EventHubProducerAsyncClient createProducer() { return new EventHubProducerAsyncClient(connection.getFullyQualifiedDomainName(), getEventHubName(), connection, connection.getRetryOptions(), tracerProvider, messageSerializer, isSharedConnection); } /** * Creates an Event Hub consumer responsible for reading {@link EventData} from a specific Event Hub partition, as a * member of the specified consumer group, and begins reading events from the {@code eventPosition}. * * The consumer created is non-exclusive, allowing multiple consumers from the same consumer group to be actively * reading events from the partition. These non-exclusive consumers are sometimes referred to as "Non-epoch * Consumers". * * @param consumerGroup The name of the consumer group this consumer is associated with. Events are read in the * context of this group. The name of the consumer group that is created by default is {@link * EventHubClientBuilder * @param eventPosition The position within the partition where the consumer should begin reading events. * @return A new {@link EventHubConsumerAsyncClient} that receives events from the partition at the given position. * @throws NullPointerException If {@code eventPosition}, or {@code options} is {@code null}. * @throws IllegalArgumentException If {@code consumerGroup} or {@code partitionId} is {@code null} or an empty * string. */ EventHubConsumerAsyncClient createConsumer(String consumerGroup, EventPosition eventPosition) { return createConsumer(consumerGroup, eventPosition, defaultConsumerOptions); } /** * Creates an Event Hub consumer responsible for reading {@link EventData} from a specific Event Hub partition, as a * member of the configured consumer group, and begins reading events from the specified {@code eventPosition}. * * <p> * A consumer may be exclusive, which asserts ownership over the partition for the consumer group to ensure that * only one consumer from that group is reading the from the partition. These exclusive consumers are sometimes * referred to as "Epoch Consumers." * * A consumer may also be non-exclusive, allowing multiple consumers from the same consumer group to be actively * reading events from the partition. These non-exclusive consumers are sometimes referred to as "Non-epoch * Consumers." * * Designating a consumer as exclusive may be specified in the {@code options}, by setting {@link * EventHubConsumerOptions * non-exclusive. * </p> * * @param consumerGroup The name of the consumer group this consumer is associated with. Events are read in the * context of this group. The name of the consumer group that is created by default is * {@link EventHubClientBuilder * @param eventPosition The position within the partition where the consumer should begin reading events. * @param options The set of options to apply when creating the consumer. * @return An new {@link EventHubConsumerAsyncClient} that receives events from the partition with all configured * {@link EventHubConsumerOptions}. * @throws NullPointerException If {@code eventPosition}, {@code consumerGroup}, {@code partitionId}, or * {@code options} is {@code null}. * @throws IllegalArgumentException If {@code consumerGroup} or {@code partitionId} is an empty string. */ EventHubConsumerAsyncClient createConsumer(String consumerGroup, EventPosition eventPosition, EventHubConsumerOptions options) { Objects.requireNonNull(eventPosition, "'eventPosition' cannot be null."); Objects.requireNonNull(options, "'options' cannot be null."); Objects.requireNonNull(consumerGroup, "'consumerGroup' cannot be null."); if (consumerGroup.isEmpty()) { throw logger.logExceptionAsError( new IllegalArgumentException("'consumerGroup' cannot be an empty string.")); } final EventHubConsumerOptions clonedOptions = options.clone(); return new EventHubConsumerAsyncClient(connection.getFullyQualifiedDomainName(), getEventHubName(), connection, messageSerializer, consumerGroup, eventPosition, clonedOptions, isSharedConnection); } /** * Closes and disposes of connection to service. Any {@link EventHubConsumerAsyncClient EventHubConsumers} and * {@link EventHubProducerAsyncClient EventHubProducers} created with this instance will have their connections * closed. */ @Override public void close() { connection.close(); } }
class EventHubAsyncClient implements Closeable { private final ClientLogger logger = new ClientLogger(EventHubAsyncClient.class); private final MessageSerializer messageSerializer; private final EventHubConnection connection; private final boolean isSharedConnection; private final EventHubConsumerOptions defaultConsumerOptions; private final TracerProvider tracerProvider; EventHubAsyncClient(EventHubConnection connection, TracerProvider tracerProvider, MessageSerializer messageSerializer, boolean isSharedConnection) { this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.connection = Objects.requireNonNull(connection, "'connection' cannot be null."); this.isSharedConnection = isSharedConnection; this.defaultConsumerOptions = new EventHubConsumerOptions(); } /** * Gets the fully qualified namespace of this Event Hub. * * @return The fully qualified namespace of this Event Hub. */ /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ String getEventHubName() { return connection.getEventHubName(); } /** * Gets information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ Mono<EventHubProperties> getProperties() { return connection.getManagementNode().flatMap(EventHubManagementNode::getEventHubProperties); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ Flux<String> getPartitionIds() { return getProperties().flatMapMany(properties -> Flux.fromArray(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * @return The set of information for the requested partition under the Event Hub this client is associated with. */ Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connection.getManagementNode().flatMap(node -> node.getPartitionProperties(partitionId)); } /** * Creates an Event Hub producer responsible for transmitting {@link EventData} to the Event Hub, grouped together * in batches. * * @return A new {@link EventHubProducerAsyncClient}. */ EventHubProducerAsyncClient createProducer() { return new EventHubProducerAsyncClient(connection.getFullyQualifiedNamespace(), getEventHubName(), connection, connection.getRetryOptions(), tracerProvider, messageSerializer, isSharedConnection); } /** * Creates an Event Hub consumer responsible for reading {@link EventData} from a specific Event Hub partition, as a * member of the specified consumer group, and begins reading events from the {@code eventPosition}. * * The consumer created is non-exclusive, allowing multiple consumers from the same consumer group to be actively * reading events from the partition. These non-exclusive consumers are sometimes referred to as "Non-epoch * Consumers". * * @param consumerGroup The name of the consumer group this consumer is associated with. Events are read in the * context of this group. The name of the consumer group that is created by default is {@link * EventHubClientBuilder * @param eventPosition The position within the partition where the consumer should begin reading events. * @return A new {@link EventHubConsumerAsyncClient} that receives events from the partition at the given position. * @throws NullPointerException If {@code eventPosition}, or {@code options} is {@code null}. * @throws IllegalArgumentException If {@code consumerGroup} or {@code partitionId} is {@code null} or an empty * string. */ EventHubConsumerAsyncClient createConsumer(String consumerGroup, EventPosition eventPosition) { return createConsumer(consumerGroup, eventPosition, defaultConsumerOptions); } /** * Creates an Event Hub consumer responsible for reading {@link EventData} from a specific Event Hub partition, as a * member of the configured consumer group, and begins reading events from the specified {@code eventPosition}. * * <p> * A consumer may be exclusive, which asserts ownership over the partition for the consumer group to ensure that * only one consumer from that group is reading the from the partition. These exclusive consumers are sometimes * referred to as "Epoch Consumers." * * A consumer may also be non-exclusive, allowing multiple consumers from the same consumer group to be actively * reading events from the partition. These non-exclusive consumers are sometimes referred to as "Non-epoch * Consumers." * * Designating a consumer as exclusive may be specified in the {@code options}, by setting {@link * EventHubConsumerOptions * non-exclusive. * </p> * * @param consumerGroup The name of the consumer group this consumer is associated with. Events are read in the * context of this group. The name of the consumer group that is created by default is * {@link EventHubClientBuilder * @param eventPosition The position within the partition where the consumer should begin reading events. * @param options The set of options to apply when creating the consumer. * @return An new {@link EventHubConsumerAsyncClient} that receives events from the partition with all configured * {@link EventHubConsumerOptions}. * @throws NullPointerException If {@code eventPosition}, {@code consumerGroup}, {@code partitionId}, or * {@code options} is {@code null}. * @throws IllegalArgumentException If {@code consumerGroup} or {@code partitionId} is an empty string. */ EventHubConsumerAsyncClient createConsumer(String consumerGroup, EventPosition eventPosition, EventHubConsumerOptions options) { Objects.requireNonNull(eventPosition, "'eventPosition' cannot be null."); Objects.requireNonNull(options, "'options' cannot be null."); Objects.requireNonNull(consumerGroup, "'consumerGroup' cannot be null."); if (consumerGroup.isEmpty()) { throw logger.logExceptionAsError( new IllegalArgumentException("'consumerGroup' cannot be an empty string.")); } final EventHubConsumerOptions clonedOptions = options.clone(); return new EventHubConsumerAsyncClient(connection.getFullyQualifiedNamespace(), getEventHubName(), connection, messageSerializer, consumerGroup, eventPosition, clonedOptions, isSharedConnection); } /** * Closes and disposes of connection to service. Any {@link EventHubConsumerAsyncClient EventHubConsumers} and * {@link EventHubProducerAsyncClient EventHubProducers} created with this instance will have their connections * closed. */ @Override public void close() { connection.close(); } }
Why isn't this just called CertificateIssuer~~Set~~Parameters ?
Mono<Response<CertificateIssuer>> createIssuerWithResponse(CertificateIssuer issuer, Context context) { CertificateIssuerSetParameters parameters = new CertificateIssuerSetParameters() .provider(issuer.getProperties().getProvider()) .credentials(new IssuerCredentials().accountId(issuer.getAccountId()).password(issuer.getPassword())) .organizationDetails(new OrganizationDetails().adminDetails(issuer.getAdministratorContacts())) .credentials(new IssuerCredentials().password(issuer.getPassword()).accountId(issuer.getAccountId())); return service.setCertificateIssuer(vaultUrl, API_VERSION, ACCEPT_LANGUAGE, issuer.getName(), parameters, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Creating certificate issuer - {}", issuer.getName())) .doOnSuccess(response -> logger.info("Created the certificate issuer - {}", response.getValue().getName())) .doOnError(error -> logger.warning("Failed to create the certificate issuer - {}", issuer.getName(), error)); }
CertificateIssuerSetParameters parameters = new CertificateIssuerSetParameters()
new CertificateIssuerSetParameters() .provider(provider); return service.setCertificateIssuer(vaultUrl, API_VERSION, ACCEPT_LANGUAGE, name, parameters, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Creating certificate issuer - {}
class CertificateAsyncClient { static final String API_VERSION = "7.0"; static final String ACCEPT_LANGUAGE = "en-US"; static final int DEFAULT_MAX_PAGE_RESULTS = 25; static final String CONTENT_TYPE_HEADER_VALUE = "application/json"; private final String vaultUrl; private final CertificateService service; private final ClientLogger logger = new ClientLogger(CertificateAsyncClient.class); /** * Creates a CertificateAsyncClient that uses {@code pipeline} to service requests * * @param vaultUrl URL for the Azure KeyVault service. * @param pipeline HttpPipeline that the HTTP requests and responses flow through. * @param version {@link CertificateServiceVersion} of the service to be used when making requests. */ CertificateAsyncClient(URL vaultUrl, HttpPipeline pipeline, CertificateServiceVersion version) { Objects.requireNonNull(vaultUrl, KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.VAULT_END_POINT_REQUIRED)); this.vaultUrl = vaultUrl.toString(); this.service = RestProxy.create(CertificateService.class, pipeline); } /** * Get the vault endpoint url to which service requests are sent to. * @return the vault endpoint url */ public String getVaultUrl() { return vaultUrl; } /** * Creates a new certificate. If this is the first version, the certificate resource is created. This operation requires * the certificates/create permission. * * <p><strong>Code Samples</strong></p> * <p>Create certificate is a long running operation. The {@link PollerFlux poller} allows users to automatically poll on the create certificate * operation status. It is possible to monitor each intermediate poll response during the poll operation.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.createCertificate * * @param name The name of the certificate to be created. * @param policy The policy of the certificate to be created. * @param enabled The enabled status for the certificate. * @param tags The application specific metadata to set. * @throws ResourceModifiedException when invalid certificate policy configuration is provided. * @return A {@link PollerFlux} polling on the create certificate operation status. */ public PollerFlux<CertificateOperation, KeyVaultCertificate> beginCreateCertificate(String name, CertificatePolicy policy, boolean enabled, Map<String, String> tags) { return new PollerFlux<>(Duration.ofSeconds(1), activationOperation(name, policy, enabled, tags), createPollOperation(name), cancelOperation(name), fetchResultOperation(name)); } private BiFunction<PollingContext<CertificateOperation>, PollResponse<CertificateOperation>, Mono<CertificateOperation>> cancelOperation(String name) { return (pollingContext, firstResponse) -> withContext(context -> cancelCertificateOperationWithResponse(name, context)).flatMap(FluxUtil::toMono); } private Function<PollingContext<CertificateOperation>, Mono<CertificateOperation>> activationOperation(String name, CertificatePolicy policy, boolean enabled, Map<String, String> tags) { return (pollingContext) -> withContext(context -> createCertificateWithResponse(name, policy, enabled, tags, context) .flatMap(certificateOperationResponse -> Mono.just(certificateOperationResponse.getValue()))); } private Function<PollingContext<CertificateOperation>, Mono<KeyVaultCertificate>> fetchResultOperation(String name) { return (pollingContext) -> withContext(context -> getCertificateWithResponse(name, "", context) .flatMap(certificateResponse -> Mono.just(certificateResponse.getValue()))); } /** * Creates a new certificate. If this is the first version, the certificate resource is created. This operation requires * the certificates/create permission. * * <p><strong>Code Samples</strong></p> * <p>Create certificate is a long running operation. The {@link PollerFlux poller} allows users to automatically poll on the create certificate * operation status. It is possible to monitor each intermediate poll response during the poll operation.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.createCertificate * * @param name The name of the certificate to be created. * @param policy The policy of the certificate to be created. * @throws ResourceModifiedException when invalid certificate policy configuration is provided. * @return A {@link PollerFlux} polling on the create certificate operation status. */ public PollerFlux<CertificateOperation, KeyVaultCertificate> beginCreateCertificate(String name, CertificatePolicy policy) { return beginCreateCertificate(name, policy, true, null); } /* Polling operation to poll on create certificate operation status. */ private Function<PollingContext<CertificateOperation>, Mono<PollResponse<CertificateOperation>>> createPollOperation(String certificateName) { return (pollingContext) -> { try { return withContext(context -> service.getCertificateOperation(vaultUrl, certificateName, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .flatMap(this::processCertificateOperationResponse)); } catch (HttpRequestException e) { logger.logExceptionAsError(e); return Mono.just(new PollResponse<>(LongRunningOperationStatus.FAILED, null)); } }; } private Mono<PollResponse<CertificateOperation>> processCertificateOperationResponse(Response<CertificateOperation> certificateOperationResponse) { LongRunningOperationStatus status = null; switch (certificateOperationResponse.getValue().getStatus()) { case "inProgress": status = LongRunningOperationStatus.IN_PROGRESS; break; case "completed": status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; break; case "failed": status = LongRunningOperationStatus.FAILED; break; default: break; } return Mono.just(new PollResponse<>(status, certificateOperationResponse.getValue())); } Mono<Response<CertificateOperation>> createCertificateWithResponse(String name, CertificatePolicy certificatePolicy, boolean enabled, Map<String, String> tags, Context context) { CertificateRequestParameters certificateRequestParameters = new CertificateRequestParameters() .certificatePolicy(new CertificatePolicyRequest(certificatePolicy)) .certificateAttributes(new CertificateRequestAttributes().enabled(enabled)) .tags(tags); return service.createCertificate(vaultUrl, name, API_VERSION, ACCEPT_LANGUAGE, certificateRequestParameters, CONTENT_TYPE_HEADER_VALUE, context); } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificate * * @param name The name of the certificate to retrieve, cannot be null * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing the requested {@link KeyVaultCertificateWithPolicy certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<KeyVaultCertificateWithPolicy> getCertificate(String name) { try { return withContext(context -> getCertificateWithResponse(name, "", context)).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificateWithResponse * * @param name The name of the certificate to retrieve, cannot be null * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<KeyVaultCertificateWithPolicy>> getCertificateWithResponse(String name) { try { return withContext(context -> getCertificateWithResponse(name, "", context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<KeyVaultCertificateWithPolicy>> getCertificateWithResponse(String name, String version, Context context) { return service.getCertificateWithPolicy(vaultUrl, name, version, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Retrieving certificate - {}", name)) .doOnSuccess(response -> logger.info("Retrieved the certificate - {}", response.getValue().getProperties().getName())) .doOnError(error -> logger.warning("Failed to Retrieve the certificate - {}", name, error)); } Mono<Response<KeyVaultCertificate>> getCertificateVersionWithResponse(String name, String version, Context context) { return service.getCertificate(vaultUrl, name, version, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Retrieving certificate - {}", name)) .doOnSuccess(response -> logger.info("Retrieved the certificate - {}", response.getValue().getProperties().getName())) .doOnError(error -> logger.warning("Failed to Retrieve the certificate - {}", name, error)); } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificateVersionWithResponse * * @param name The name of the certificate to retrieve, cannot be null * @param version The version of the certificate to retrieve. If this is an empty String or null then latest version of the certificate is retrieved. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<KeyVaultCertificate>> getCertificateVersionWithResponse(String name, String version) { try { return withContext(context -> getCertificateVersionWithResponse(name, version == null ? "" : version, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets information about the specified version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificateVersion * * @param name The name of the certificate to retrieve, cannot be null * @param version The version of the certificate to retrieve. If this is an empty String or null then latest version of the certificate is retrieved. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing the requested {@link KeyVaultCertificate certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<KeyVaultCertificate> getCertificateVersion(String name, String version) { try { return withContext(context -> getCertificateVersionWithResponse(name, version == null ? "" : version, context)).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Updates the specified attributes associated with the specified certificate. The update operation changes specified attributes of an existing * stored certificate and attributes that are not specified in the request are left unchanged. This operation requires the certificates/update permission. * * <p><strong>Code Samples</strong></p> * <p>Gets latest version of the certificate, changes its tags and enabled status and then updates it in the Azure Key Vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.updateCertificateProperties * * @param certificateProperties The {@link CertificateProperties} object with updated properties. * @throws NullPointerException if {@code certificate} is {@code null}. * @throws ResourceNotFoundException when a certificate with {@link CertificateProperties * @throws HttpRequestException if {@link CertificateProperties * @return A {@link Mono} containing the {@link CertificateProperties updated certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<KeyVaultCertificate> updateCertificateProperties(CertificateProperties certificateProperties) { try { return withContext(context -> updateCertificatePropertiesWithResponse(certificateProperties, context)).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Updates the specified attributes associated with the specified certificate. The update operation changes specified attributes of an existing * stored certificate and attributes that are not specified in the request are left unchanged. This operation requires the certificates/update permission. * * <p><strong>Code Samples</strong></p> * <p>Gets latest version of the certificate, changes its enabled status and then updates it in the Azure Key Vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.updateCertificatePropertiesWithResponse * * @param certificateProperties The {@link CertificateProperties} object with updated properties. * @throws NullPointerException if {@code certificate} is {@code null}. * @throws ResourceNotFoundException when a certificate with {@link CertificateProperties * @throws HttpRequestException if {@link CertificateProperties * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<KeyVaultCertificate>> updateCertificatePropertiesWithResponse(CertificateProperties certificateProperties) { try { return withContext(context -> updateCertificatePropertiesWithResponse(certificateProperties, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<KeyVaultCertificate>> updateCertificatePropertiesWithResponse(CertificateProperties certificateProperties, Context context) { Objects.requireNonNull(certificateProperties, "certificateProperties' cannot be null."); CertificateUpdateParameters parameters = new CertificateUpdateParameters() .tags(certificateProperties.getTags()) .certificateAttributes(new CertificateRequestAttributes(certificateProperties)); return service.updateCertificate(vaultUrl, certificateProperties.getName(), API_VERSION, ACCEPT_LANGUAGE, parameters, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Updating certificate - {}", certificateProperties.getName())) .doOnSuccess(response -> logger.info("Updated the certificate - {}", certificateProperties.getName())) .doOnError(error -> logger.warning("Failed to update the certificate - {}", certificateProperties.getName(), error)); } /** * Gets information about the certificate which represents the {@link CertificateProperties} from the key vault. This * operation requires the certificates/get permission. * * <p>The list operations {@link CertificateAsyncClient * the {@link Flux} containing {@link CertificateProperties} as output excluding the properties like secretId and keyId of the certificate. * This operation can then be used to get the full certificate with its properties excluding the policy from {@code certificateProperties}.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificate * * @param certificateProperties The {@link CertificateProperties} holding attributes of the certificate being requested. * @throws ResourceNotFoundException when a certificate with {@link CertificateProperties * @throws HttpRequestException if {@link CertificateProperties
class CertificateAsyncClient { static final String API_VERSION = "7.0"; static final String ACCEPT_LANGUAGE = "en-US"; static final int DEFAULT_MAX_PAGE_RESULTS = 25; static final String CONTENT_TYPE_HEADER_VALUE = "application/json"; static final String KEY_VAULT_SCOPE = "https: private final String vaultUrl; private final CertificateService service; private final ClientLogger logger = new ClientLogger(CertificateAsyncClient.class); /** * Creates a CertificateAsyncClient that uses {@code pipeline} to service requests * * @param vaultUrl URL for the Azure KeyVault service. * @param pipeline HttpPipeline that the HTTP requests and responses flow through. * @param version {@link CertificateServiceVersion} of the service to be used when making requests. */ CertificateAsyncClient(URL vaultUrl, HttpPipeline pipeline, CertificateServiceVersion version) { Objects.requireNonNull(vaultUrl, KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.VAULT_END_POINT_REQUIRED)); this.vaultUrl = vaultUrl.toString(); this.service = RestProxy.create(CertificateService.class, pipeline); } /** * Get the vault endpoint url to which service requests are sent to. * @return the vault endpoint url */ public String getVaultUrl() { return vaultUrl; } /** * Creates a new certificate. If this is the first version, the certificate resource is created. This operation requires * the certificates/create permission. * * <p><strong>Code Samples</strong></p> * <p>Create certificate is a long running operation. The {@link PollerFlux poller} allows users to automatically poll on the create certificate * operation status. It is possible to monitor each intermediate poll response during the poll operation.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.beginCreateCertificate * * @param name The name of the certificate to be created. * @param policy The policy of the certificate to be created. * @param enabled The enabled status for the certificate. * @param tags The application specific metadata to set. * @throws ResourceModifiedException when invalid certificate policy configuration is provided. * @return A {@link PollerFlux} polling on the create certificate operation status. */ public PollerFlux<CertificateOperation, KeyVaultCertificate> beginCreateCertificate(String name, CertificatePolicy policy, boolean enabled, Map<String, String> tags) { return new PollerFlux<>(Duration.ofSeconds(1), activationOperation(name, policy, enabled, tags), createPollOperation(name), cancelOperation(name), fetchResultOperation(name)); } private BiFunction<PollingContext<CertificateOperation>, PollResponse<CertificateOperation>, Mono<CertificateOperation>> cancelOperation(String name) { return (pollingContext, firstResponse) -> withContext(context -> cancelCertificateOperationWithResponse(name, context)).flatMap(FluxUtil::toMono); } private Function<PollingContext<CertificateOperation>, Mono<CertificateOperation>> activationOperation(String name, CertificatePolicy policy, boolean enabled, Map<String, String> tags) { return (pollingContext) -> withContext(context -> createCertificateWithResponse(name, policy, enabled, tags, context) .flatMap(certificateOperationResponse -> Mono.just(certificateOperationResponse.getValue()))); } private Function<PollingContext<CertificateOperation>, Mono<KeyVaultCertificate>> fetchResultOperation(String name) { return (pollingContext) -> withContext(context -> getCertificateWithResponse(name, "", context) .flatMap(certificateResponse -> Mono.just(certificateResponse.getValue()))); } /** * Creates a new certificate. If this is the first version, the certificate resource is created. This operation requires * the certificates/create permission. * * <p><strong>Code Samples</strong></p> * <p>Create certificate is a long running operation. The {@link PollerFlux poller} allows users to automatically poll on the create certificate * operation status. It is possible to monitor each intermediate poll response during the poll operation.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.beginCreateCertificate * * @param name The name of the certificate to be created. * @param policy The policy of the certificate to be created. * @throws ResourceModifiedException when invalid certificate policy configuration is provided. * @return A {@link PollerFlux} polling on the create certificate operation status. */ public PollerFlux<CertificateOperation, KeyVaultCertificate> beginCreateCertificate(String name, CertificatePolicy policy) { return beginCreateCertificate(name, policy, true, null); } /* Polling operation to poll on create certificate operation status. */ private Function<PollingContext<CertificateOperation>, Mono<PollResponse<CertificateOperation>>> createPollOperation(String certificateName) { return (pollingContext) -> { try { return withContext(context -> service.getCertificateOperation(vaultUrl, certificateName, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .flatMap(this::processCertificateOperationResponse)); } catch (HttpRequestException e) { logger.logExceptionAsError(e); return Mono.just(new PollResponse<>(LongRunningOperationStatus.FAILED, null)); } }; } private Mono<PollResponse<CertificateOperation>> processCertificateOperationResponse(Response<CertificateOperation> certificateOperationResponse) { LongRunningOperationStatus status = null; switch (certificateOperationResponse.getValue().getStatus()) { case "inProgress": status = LongRunningOperationStatus.IN_PROGRESS; break; case "completed": status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; break; case "failed": status = LongRunningOperationStatus.FAILED; break; default: status = LongRunningOperationStatus.fromString(certificateOperationResponse.getValue().getStatus(), true); break; } return Mono.just(new PollResponse<>(status, certificateOperationResponse.getValue())); } Mono<Response<CertificateOperation>> createCertificateWithResponse(String name, CertificatePolicy certificatePolicy, boolean enabled, Map<String, String> tags, Context context) { CertificateRequestParameters certificateRequestParameters = new CertificateRequestParameters() .certificatePolicy(new CertificatePolicyRequest(certificatePolicy)) .certificateAttributes(new CertificateRequestAttributes().enabled(enabled)) .tags(tags); return service.createCertificate(vaultUrl, name, API_VERSION, ACCEPT_LANGUAGE, certificateRequestParameters, CONTENT_TYPE_HEADER_VALUE, context); } /** * Gets a pending {@link CertificateOperation} from the key vault. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Get a pending certificate operation. The {@link PollerFlux poller} allows users to automatically poll on the certificate * operation status. It is possible to monitor each intermediate poll response during the poll operation.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificateOperation * * @param name The name of the certificate. * @throws ResourceNotFoundException when a certificate operation for a certificate with {@code name} doesn't exist. * @return A {@link PollerFlux} polling on the certificate operation status. */ public PollerFlux<CertificateOperation, KeyVaultCertificate> getCertificateOperation(String name) { return new PollerFlux<>(Duration.ofSeconds(1), (pollingContext) -> Mono.empty(), createPollOperation(name), cancelOperation(name), fetchResultOperation(name)); } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificate * * @param name The name of the certificate to retrieve, cannot be null * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing the requested {@link KeyVaultCertificateWithPolicy certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<KeyVaultCertificateWithPolicy> getCertificate(String name) { try { return withContext(context -> getCertificateWithResponse(name, "", context)).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificateWithResponse * * @param name The name of the certificate to retrieve, cannot be null * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<KeyVaultCertificateWithPolicy>> getCertificateWithResponse(String name) { try { return withContext(context -> getCertificateWithResponse(name, "", context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<KeyVaultCertificateWithPolicy>> getCertificateWithResponse(String name, String version, Context context) { return service.getCertificateWithPolicy(vaultUrl, name, version, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Retrieving certificate - {}", name)) .doOnSuccess(response -> logger.info("Retrieved the certificate - {}", response.getValue().getProperties().getName())) .doOnError(error -> logger.warning("Failed to Retrieve the certificate - {}", name, error)); } Mono<Response<KeyVaultCertificate>> getCertificateVersionWithResponse(String name, String version, Context context) { return service.getCertificate(vaultUrl, name, version, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Retrieving certificate - {}", name)) .doOnSuccess(response -> logger.info("Retrieved the certificate - {}", response.getValue().getProperties().getName())) .doOnError(error -> logger.warning("Failed to Retrieve the certificate - {}", name, error)); } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificateVersionWithResponse * * @param name The name of the certificate to retrieve, cannot be null * @param version The version of the certificate to retrieve. If this is an empty String or null then latest version of the certificate is retrieved. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<KeyVaultCertificate>> getCertificateVersionWithResponse(String name, String version) { try { return withContext(context -> getCertificateVersionWithResponse(name, version == null ? "" : version, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets information about the specified version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificateVersion * * @param name The name of the certificate to retrieve, cannot be null * @param version The version of the certificate to retrieve. If this is an empty String or null then latest version of the certificate is retrieved. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing the requested {@link KeyVaultCertificate certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<KeyVaultCertificate> getCertificateVersion(String name, String version) { try { return withContext(context -> getCertificateVersionWithResponse(name, version == null ? "" : version, context)).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Updates the specified attributes associated with the specified certificate. The update operation changes specified attributes of an existing * stored certificate and attributes that are not specified in the request are left unchanged. This operation requires the certificates/update permission. * * <p><strong>Code Samples</strong></p> * <p>Gets latest version of the certificate, changes its tags and enabled status and then updates it in the Azure Key Vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.updateCertificateProperties * * @param certificateProperties The {@link CertificateProperties} object with updated properties. * @throws NullPointerException if {@code certificate} is {@code null}. * @throws ResourceNotFoundException when a certificate with {@link CertificateProperties * @throws HttpRequestException if {@link CertificateProperties * @return A {@link Mono} containing the {@link CertificateProperties updated certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<KeyVaultCertificate> updateCertificateProperties(CertificateProperties certificateProperties) { try { return withContext(context -> updateCertificatePropertiesWithResponse(certificateProperties, context)).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Updates the specified attributes associated with the specified certificate. The update operation changes specified attributes of an existing * stored certificate and attributes that are not specified in the request are left unchanged. This operation requires the certificates/update permission. * * <p><strong>Code Samples</strong></p> * <p>Gets latest version of the certificate, changes its enabled status and then updates it in the Azure Key Vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.updateCertificatePropertiesWithResponse * * @param certificateProperties The {@link CertificateProperties} object with updated properties. * @throws NullPointerException if {@code certificate} is {@code null}. * @throws ResourceNotFoundException when a certificate with {@link CertificateProperties * @throws HttpRequestException if {@link CertificateProperties * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<KeyVaultCertificate>> updateCertificatePropertiesWithResponse(CertificateProperties certificateProperties) { try { return withContext(context -> updateCertificatePropertiesWithResponse(certificateProperties, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<KeyVaultCertificate>> updateCertificatePropertiesWithResponse(CertificateProperties certificateProperties, Context context) { Objects.requireNonNull(certificateProperties, "certificateProperties' cannot be null."); CertificateUpdateParameters parameters = new CertificateUpdateParameters() .tags(certificateProperties.getTags()) .certificateAttributes(new CertificateRequestAttributes(certificateProperties)); return service.updateCertificate(vaultUrl, certificateProperties.getName(), API_VERSION, ACCEPT_LANGUAGE, parameters, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Updating certificate - {}", certificateProperties.getName())) .doOnSuccess(response -> logger.info("Updated the certificate - {}", certificateProperties.getName())) .doOnError(error -> logger.warning("Failed to update the certificate - {}", certificateProperties.getName(), error)); } /** * Deletes a certificate from a specified key vault. All the versions of the certificate along with its associated policy * get deleted. If soft-delete is enabled on the key vault then the certificate is placed in the deleted state and requires to be * purged for permanent deletion else the certificate is permanently deleted. The delete operation applies to any certificate stored in * Azure Key Vault but it cannot be applied to an individual version of a certificate. This operation requires the certificates/delete permission. * * <p><strong>Code Samples</strong></p> * <p>Deletes the certificate in the Azure Key Vault. Prints out the deleted certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.beginDeleteCertificate * * @param name The name of the certificate to be deleted. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException when a certificate with {@code name} is empty string. * @return A {@link PollerFlux} to poll on the {@link DeletedCertificate deleted certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public PollerFlux<DeletedCertificate, Void> beginDeleteCertificate(String name) { return new PollerFlux<>(Duration.ofSeconds(1), activationOperation(name), createDeletePollOperation(name), (context, firstResponse) -> Mono.empty(), (context) -> Mono.empty()); } private Function<PollingContext<DeletedCertificate>, Mono<DeletedCertificate>> activationOperation(String name) { return (pollingContext) -> withContext(context -> deleteCertificateWithResponse(name, context) .flatMap(deletedCertificateResponse -> Mono.just(deletedCertificateResponse.getValue()))); } /* Polling operation to poll on create delete certificate operation status. */ private Function<PollingContext<DeletedCertificate>, Mono<PollResponse<DeletedCertificate>>> createDeletePollOperation(String keyName) { return pollingContext -> withContext(context -> service.getDeletedCertificatePoller(vaultUrl, keyName, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .flatMap(deletedCertificateResponse -> { if (deletedCertificateResponse.getStatusCode() == HttpURLConnection.HTTP_NOT_FOUND) { return Mono.defer(() -> Mono.just(new PollResponse<>(LongRunningOperationStatus.IN_PROGRESS, pollingContext.getLatestResponse().getValue()))); } return Mono.defer(() -> Mono.just(new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, deletedCertificateResponse.getValue()))); })) .onErrorReturn(new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, pollingContext.getLatestResponse().getValue())); } Mono<Response<DeletedCertificate>> deleteCertificateWithResponse(String name, Context context) { return service.deleteCertificate(vaultUrl, name, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Deleting certificate - {}", name)) .doOnSuccess(response -> logger.info("Deleted the certificate - {}", response.getValue().getProperties().getName())) .doOnError(error -> logger.warning("Failed to delete the certificate - {}", name, error)); } /** * Retrieves information about the specified deleted certificate. The GetDeletedCertificate operation is applicable for soft-delete * enabled vaults and additionally retrieves deleted certificate's attributes, such as retention interval, scheduled permanent deletion and the current deletion recovery level. This operation * requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p> Gets the deleted certificate from the key vault enabled for soft-delete. Prints out the * deleted certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getDeletedCertificate * * @param name The name of the deleted certificate. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException when a certificate with {@code name} is empty string. * @return A {@link Mono} containing the {@link DeletedCertificate deleted certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<DeletedCertificate> getDeletedCertificate(String name) { try { return withContext(context -> getDeletedCertificateWithResponse(name, context)).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Retrieves information about the specified deleted certificate. The GetDeletedCertificate operation is applicable for soft-delete * enabled vaults and additionally retrieves deleted certificate's attributes, such as retention interval, scheduled permanent deletion and the current deletion recovery level. This operation * requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p> Gets the deleted certificate from the key vault enabled for soft-delete. Prints out the * deleted certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getDeletedCertificateWithResponse * * @param name The name of the deleted certificate. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException when a certificate with {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<DeletedCertificate>> getDeletedCertificateWithResponse(String name) { try { return withContext(context -> getDeletedCertificateWithResponse(name, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<DeletedCertificate>> getDeletedCertificateWithResponse(String name, Context context) { return service.getDeletedCertificate(vaultUrl, name, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Retrieving deleted certificate - {}", name)) .doOnSuccess(response -> logger.info("Retrieved the deleted certificate - {}", response.getValue().getProperties().getName())) .doOnError(error -> logger.warning("Failed to Retrieve the deleted certificate - {}", name, error)); } /** * Permanently deletes the specified deleted certificate without possibility for recovery. The Purge Deleted Certificate operation is applicable for * soft-delete enabled vaults and is not available if the recovery level does not specify 'Purgeable'. This operation requires the certificate/purge permission. * * <p><strong>Code Samples</strong></p> * <p>Purges the deleted certificate from the key vault enabled for soft-delete. Prints out the * status code from the server response when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.purgeDeletedCertificateWithResponse * * @param name The name of the deleted certificate. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException when a certificate with {@code name} is empty string. * @return An empty {@link Mono}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> purgeDeletedCertificate(String name) { try { return purgeDeletedCertificateWithResponse(name).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Permanently deletes the specified deleted certificate without possibility for recovery. The Purge Deleted Certificate operation is applicable for * soft-delete enabled vaults and is not available if the recovery level does not specify 'Purgeable'. This operation requires the certificate/purge permission. * * <p><strong>Code Samples</strong></p> * <p>Purges the deleted certificate from the key vault enabled for soft-delete. Prints out the * status code from the server response when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.purgeDeletedCertificateWithResponse * * @param name The name of the deleted certificate. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException when a certificate with {@code name} is empty string. * @return A {@link Mono} containing a Void Response}
internal class, made to match Rest API
Mono<Response<CertificateIssuer>> createIssuerWithResponse(CertificateIssuer issuer, Context context) { CertificateIssuerSetParameters parameters = new CertificateIssuerSetParameters() .provider(issuer.getProperties().getProvider()) .credentials(new IssuerCredentials().accountId(issuer.getAccountId()).password(issuer.getPassword())) .organizationDetails(new OrganizationDetails().adminDetails(issuer.getAdministratorContacts())) .credentials(new IssuerCredentials().password(issuer.getPassword()).accountId(issuer.getAccountId())); return service.setCertificateIssuer(vaultUrl, API_VERSION, ACCEPT_LANGUAGE, issuer.getName(), parameters, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Creating certificate issuer - {}", issuer.getName())) .doOnSuccess(response -> logger.info("Created the certificate issuer - {}", response.getValue().getName())) .doOnError(error -> logger.warning("Failed to create the certificate issuer - {}", issuer.getName(), error)); }
CertificateIssuerSetParameters parameters = new CertificateIssuerSetParameters()
new CertificateIssuerSetParameters() .provider(provider); return service.setCertificateIssuer(vaultUrl, API_VERSION, ACCEPT_LANGUAGE, name, parameters, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Creating certificate issuer - {}
class CertificateAsyncClient { static final String API_VERSION = "7.0"; static final String ACCEPT_LANGUAGE = "en-US"; static final int DEFAULT_MAX_PAGE_RESULTS = 25; static final String CONTENT_TYPE_HEADER_VALUE = "application/json"; private final String vaultUrl; private final CertificateService service; private final ClientLogger logger = new ClientLogger(CertificateAsyncClient.class); /** * Creates a CertificateAsyncClient that uses {@code pipeline} to service requests * * @param vaultUrl URL for the Azure KeyVault service. * @param pipeline HttpPipeline that the HTTP requests and responses flow through. * @param version {@link CertificateServiceVersion} of the service to be used when making requests. */ CertificateAsyncClient(URL vaultUrl, HttpPipeline pipeline, CertificateServiceVersion version) { Objects.requireNonNull(vaultUrl, KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.VAULT_END_POINT_REQUIRED)); this.vaultUrl = vaultUrl.toString(); this.service = RestProxy.create(CertificateService.class, pipeline); } /** * Get the vault endpoint url to which service requests are sent to. * @return the vault endpoint url */ public String getVaultUrl() { return vaultUrl; } /** * Creates a new certificate. If this is the first version, the certificate resource is created. This operation requires * the certificates/create permission. * * <p><strong>Code Samples</strong></p> * <p>Create certificate is a long running operation. The {@link PollerFlux poller} allows users to automatically poll on the create certificate * operation status. It is possible to monitor each intermediate poll response during the poll operation.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.createCertificate * * @param name The name of the certificate to be created. * @param policy The policy of the certificate to be created. * @param enabled The enabled status for the certificate. * @param tags The application specific metadata to set. * @throws ResourceModifiedException when invalid certificate policy configuration is provided. * @return A {@link PollerFlux} polling on the create certificate operation status. */ public PollerFlux<CertificateOperation, KeyVaultCertificate> beginCreateCertificate(String name, CertificatePolicy policy, boolean enabled, Map<String, String> tags) { return new PollerFlux<>(Duration.ofSeconds(1), activationOperation(name, policy, enabled, tags), createPollOperation(name), cancelOperation(name), fetchResultOperation(name)); } private BiFunction<PollingContext<CertificateOperation>, PollResponse<CertificateOperation>, Mono<CertificateOperation>> cancelOperation(String name) { return (pollingContext, firstResponse) -> withContext(context -> cancelCertificateOperationWithResponse(name, context)).flatMap(FluxUtil::toMono); } private Function<PollingContext<CertificateOperation>, Mono<CertificateOperation>> activationOperation(String name, CertificatePolicy policy, boolean enabled, Map<String, String> tags) { return (pollingContext) -> withContext(context -> createCertificateWithResponse(name, policy, enabled, tags, context) .flatMap(certificateOperationResponse -> Mono.just(certificateOperationResponse.getValue()))); } private Function<PollingContext<CertificateOperation>, Mono<KeyVaultCertificate>> fetchResultOperation(String name) { return (pollingContext) -> withContext(context -> getCertificateWithResponse(name, "", context) .flatMap(certificateResponse -> Mono.just(certificateResponse.getValue()))); } /** * Creates a new certificate. If this is the first version, the certificate resource is created. This operation requires * the certificates/create permission. * * <p><strong>Code Samples</strong></p> * <p>Create certificate is a long running operation. The {@link PollerFlux poller} allows users to automatically poll on the create certificate * operation status. It is possible to monitor each intermediate poll response during the poll operation.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.createCertificate * * @param name The name of the certificate to be created. * @param policy The policy of the certificate to be created. * @throws ResourceModifiedException when invalid certificate policy configuration is provided. * @return A {@link PollerFlux} polling on the create certificate operation status. */ public PollerFlux<CertificateOperation, KeyVaultCertificate> beginCreateCertificate(String name, CertificatePolicy policy) { return beginCreateCertificate(name, policy, true, null); } /* Polling operation to poll on create certificate operation status. */ private Function<PollingContext<CertificateOperation>, Mono<PollResponse<CertificateOperation>>> createPollOperation(String certificateName) { return (pollingContext) -> { try { return withContext(context -> service.getCertificateOperation(vaultUrl, certificateName, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .flatMap(this::processCertificateOperationResponse)); } catch (HttpRequestException e) { logger.logExceptionAsError(e); return Mono.just(new PollResponse<>(LongRunningOperationStatus.FAILED, null)); } }; } private Mono<PollResponse<CertificateOperation>> processCertificateOperationResponse(Response<CertificateOperation> certificateOperationResponse) { LongRunningOperationStatus status = null; switch (certificateOperationResponse.getValue().getStatus()) { case "inProgress": status = LongRunningOperationStatus.IN_PROGRESS; break; case "completed": status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; break; case "failed": status = LongRunningOperationStatus.FAILED; break; default: break; } return Mono.just(new PollResponse<>(status, certificateOperationResponse.getValue())); } Mono<Response<CertificateOperation>> createCertificateWithResponse(String name, CertificatePolicy certificatePolicy, boolean enabled, Map<String, String> tags, Context context) { CertificateRequestParameters certificateRequestParameters = new CertificateRequestParameters() .certificatePolicy(new CertificatePolicyRequest(certificatePolicy)) .certificateAttributes(new CertificateRequestAttributes().enabled(enabled)) .tags(tags); return service.createCertificate(vaultUrl, name, API_VERSION, ACCEPT_LANGUAGE, certificateRequestParameters, CONTENT_TYPE_HEADER_VALUE, context); } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificate * * @param name The name of the certificate to retrieve, cannot be null * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing the requested {@link KeyVaultCertificateWithPolicy certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<KeyVaultCertificateWithPolicy> getCertificate(String name) { try { return withContext(context -> getCertificateWithResponse(name, "", context)).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificateWithResponse * * @param name The name of the certificate to retrieve, cannot be null * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<KeyVaultCertificateWithPolicy>> getCertificateWithResponse(String name) { try { return withContext(context -> getCertificateWithResponse(name, "", context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<KeyVaultCertificateWithPolicy>> getCertificateWithResponse(String name, String version, Context context) { return service.getCertificateWithPolicy(vaultUrl, name, version, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Retrieving certificate - {}", name)) .doOnSuccess(response -> logger.info("Retrieved the certificate - {}", response.getValue().getProperties().getName())) .doOnError(error -> logger.warning("Failed to Retrieve the certificate - {}", name, error)); } Mono<Response<KeyVaultCertificate>> getCertificateVersionWithResponse(String name, String version, Context context) { return service.getCertificate(vaultUrl, name, version, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Retrieving certificate - {}", name)) .doOnSuccess(response -> logger.info("Retrieved the certificate - {}", response.getValue().getProperties().getName())) .doOnError(error -> logger.warning("Failed to Retrieve the certificate - {}", name, error)); } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificateVersionWithResponse * * @param name The name of the certificate to retrieve, cannot be null * @param version The version of the certificate to retrieve. If this is an empty String or null then latest version of the certificate is retrieved. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<KeyVaultCertificate>> getCertificateVersionWithResponse(String name, String version) { try { return withContext(context -> getCertificateVersionWithResponse(name, version == null ? "" : version, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets information about the specified version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificateVersion * * @param name The name of the certificate to retrieve, cannot be null * @param version The version of the certificate to retrieve. If this is an empty String or null then latest version of the certificate is retrieved. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing the requested {@link KeyVaultCertificate certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<KeyVaultCertificate> getCertificateVersion(String name, String version) { try { return withContext(context -> getCertificateVersionWithResponse(name, version == null ? "" : version, context)).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Updates the specified attributes associated with the specified certificate. The update operation changes specified attributes of an existing * stored certificate and attributes that are not specified in the request are left unchanged. This operation requires the certificates/update permission. * * <p><strong>Code Samples</strong></p> * <p>Gets latest version of the certificate, changes its tags and enabled status and then updates it in the Azure Key Vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.updateCertificateProperties * * @param certificateProperties The {@link CertificateProperties} object with updated properties. * @throws NullPointerException if {@code certificate} is {@code null}. * @throws ResourceNotFoundException when a certificate with {@link CertificateProperties * @throws HttpRequestException if {@link CertificateProperties * @return A {@link Mono} containing the {@link CertificateProperties updated certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<KeyVaultCertificate> updateCertificateProperties(CertificateProperties certificateProperties) { try { return withContext(context -> updateCertificatePropertiesWithResponse(certificateProperties, context)).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Updates the specified attributes associated with the specified certificate. The update operation changes specified attributes of an existing * stored certificate and attributes that are not specified in the request are left unchanged. This operation requires the certificates/update permission. * * <p><strong>Code Samples</strong></p> * <p>Gets latest version of the certificate, changes its enabled status and then updates it in the Azure Key Vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.updateCertificatePropertiesWithResponse * * @param certificateProperties The {@link CertificateProperties} object with updated properties. * @throws NullPointerException if {@code certificate} is {@code null}. * @throws ResourceNotFoundException when a certificate with {@link CertificateProperties * @throws HttpRequestException if {@link CertificateProperties * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<KeyVaultCertificate>> updateCertificatePropertiesWithResponse(CertificateProperties certificateProperties) { try { return withContext(context -> updateCertificatePropertiesWithResponse(certificateProperties, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<KeyVaultCertificate>> updateCertificatePropertiesWithResponse(CertificateProperties certificateProperties, Context context) { Objects.requireNonNull(certificateProperties, "certificateProperties' cannot be null."); CertificateUpdateParameters parameters = new CertificateUpdateParameters() .tags(certificateProperties.getTags()) .certificateAttributes(new CertificateRequestAttributes(certificateProperties)); return service.updateCertificate(vaultUrl, certificateProperties.getName(), API_VERSION, ACCEPT_LANGUAGE, parameters, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Updating certificate - {}", certificateProperties.getName())) .doOnSuccess(response -> logger.info("Updated the certificate - {}", certificateProperties.getName())) .doOnError(error -> logger.warning("Failed to update the certificate - {}", certificateProperties.getName(), error)); } /** * Gets information about the certificate which represents the {@link CertificateProperties} from the key vault. This * operation requires the certificates/get permission. * * <p>The list operations {@link CertificateAsyncClient * the {@link Flux} containing {@link CertificateProperties} as output excluding the properties like secretId and keyId of the certificate. * This operation can then be used to get the full certificate with its properties excluding the policy from {@code certificateProperties}.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificate * * @param certificateProperties The {@link CertificateProperties} holding attributes of the certificate being requested. * @throws ResourceNotFoundException when a certificate with {@link CertificateProperties * @throws HttpRequestException if {@link CertificateProperties
class CertificateAsyncClient { static final String API_VERSION = "7.0"; static final String ACCEPT_LANGUAGE = "en-US"; static final int DEFAULT_MAX_PAGE_RESULTS = 25; static final String CONTENT_TYPE_HEADER_VALUE = "application/json"; static final String KEY_VAULT_SCOPE = "https: private final String vaultUrl; private final CertificateService service; private final ClientLogger logger = new ClientLogger(CertificateAsyncClient.class); /** * Creates a CertificateAsyncClient that uses {@code pipeline} to service requests * * @param vaultUrl URL for the Azure KeyVault service. * @param pipeline HttpPipeline that the HTTP requests and responses flow through. * @param version {@link CertificateServiceVersion} of the service to be used when making requests. */ CertificateAsyncClient(URL vaultUrl, HttpPipeline pipeline, CertificateServiceVersion version) { Objects.requireNonNull(vaultUrl, KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.VAULT_END_POINT_REQUIRED)); this.vaultUrl = vaultUrl.toString(); this.service = RestProxy.create(CertificateService.class, pipeline); } /** * Get the vault endpoint url to which service requests are sent to. * @return the vault endpoint url */ public String getVaultUrl() { return vaultUrl; } /** * Creates a new certificate. If this is the first version, the certificate resource is created. This operation requires * the certificates/create permission. * * <p><strong>Code Samples</strong></p> * <p>Create certificate is a long running operation. The {@link PollerFlux poller} allows users to automatically poll on the create certificate * operation status. It is possible to monitor each intermediate poll response during the poll operation.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.beginCreateCertificate * * @param name The name of the certificate to be created. * @param policy The policy of the certificate to be created. * @param enabled The enabled status for the certificate. * @param tags The application specific metadata to set. * @throws ResourceModifiedException when invalid certificate policy configuration is provided. * @return A {@link PollerFlux} polling on the create certificate operation status. */ public PollerFlux<CertificateOperation, KeyVaultCertificate> beginCreateCertificate(String name, CertificatePolicy policy, boolean enabled, Map<String, String> tags) { return new PollerFlux<>(Duration.ofSeconds(1), activationOperation(name, policy, enabled, tags), createPollOperation(name), cancelOperation(name), fetchResultOperation(name)); } private BiFunction<PollingContext<CertificateOperation>, PollResponse<CertificateOperation>, Mono<CertificateOperation>> cancelOperation(String name) { return (pollingContext, firstResponse) -> withContext(context -> cancelCertificateOperationWithResponse(name, context)).flatMap(FluxUtil::toMono); } private Function<PollingContext<CertificateOperation>, Mono<CertificateOperation>> activationOperation(String name, CertificatePolicy policy, boolean enabled, Map<String, String> tags) { return (pollingContext) -> withContext(context -> createCertificateWithResponse(name, policy, enabled, tags, context) .flatMap(certificateOperationResponse -> Mono.just(certificateOperationResponse.getValue()))); } private Function<PollingContext<CertificateOperation>, Mono<KeyVaultCertificate>> fetchResultOperation(String name) { return (pollingContext) -> withContext(context -> getCertificateWithResponse(name, "", context) .flatMap(certificateResponse -> Mono.just(certificateResponse.getValue()))); } /** * Creates a new certificate. If this is the first version, the certificate resource is created. This operation requires * the certificates/create permission. * * <p><strong>Code Samples</strong></p> * <p>Create certificate is a long running operation. The {@link PollerFlux poller} allows users to automatically poll on the create certificate * operation status. It is possible to monitor each intermediate poll response during the poll operation.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.beginCreateCertificate * * @param name The name of the certificate to be created. * @param policy The policy of the certificate to be created. * @throws ResourceModifiedException when invalid certificate policy configuration is provided. * @return A {@link PollerFlux} polling on the create certificate operation status. */ public PollerFlux<CertificateOperation, KeyVaultCertificate> beginCreateCertificate(String name, CertificatePolicy policy) { return beginCreateCertificate(name, policy, true, null); } /* Polling operation to poll on create certificate operation status. */ private Function<PollingContext<CertificateOperation>, Mono<PollResponse<CertificateOperation>>> createPollOperation(String certificateName) { return (pollingContext) -> { try { return withContext(context -> service.getCertificateOperation(vaultUrl, certificateName, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .flatMap(this::processCertificateOperationResponse)); } catch (HttpRequestException e) { logger.logExceptionAsError(e); return Mono.just(new PollResponse<>(LongRunningOperationStatus.FAILED, null)); } }; } private Mono<PollResponse<CertificateOperation>> processCertificateOperationResponse(Response<CertificateOperation> certificateOperationResponse) { LongRunningOperationStatus status = null; switch (certificateOperationResponse.getValue().getStatus()) { case "inProgress": status = LongRunningOperationStatus.IN_PROGRESS; break; case "completed": status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; break; case "failed": status = LongRunningOperationStatus.FAILED; break; default: status = LongRunningOperationStatus.fromString(certificateOperationResponse.getValue().getStatus(), true); break; } return Mono.just(new PollResponse<>(status, certificateOperationResponse.getValue())); } Mono<Response<CertificateOperation>> createCertificateWithResponse(String name, CertificatePolicy certificatePolicy, boolean enabled, Map<String, String> tags, Context context) { CertificateRequestParameters certificateRequestParameters = new CertificateRequestParameters() .certificatePolicy(new CertificatePolicyRequest(certificatePolicy)) .certificateAttributes(new CertificateRequestAttributes().enabled(enabled)) .tags(tags); return service.createCertificate(vaultUrl, name, API_VERSION, ACCEPT_LANGUAGE, certificateRequestParameters, CONTENT_TYPE_HEADER_VALUE, context); } /** * Gets a pending {@link CertificateOperation} from the key vault. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Get a pending certificate operation. The {@link PollerFlux poller} allows users to automatically poll on the certificate * operation status. It is possible to monitor each intermediate poll response during the poll operation.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificateOperation * * @param name The name of the certificate. * @throws ResourceNotFoundException when a certificate operation for a certificate with {@code name} doesn't exist. * @return A {@link PollerFlux} polling on the certificate operation status. */ public PollerFlux<CertificateOperation, KeyVaultCertificate> getCertificateOperation(String name) { return new PollerFlux<>(Duration.ofSeconds(1), (pollingContext) -> Mono.empty(), createPollOperation(name), cancelOperation(name), fetchResultOperation(name)); } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificate * * @param name The name of the certificate to retrieve, cannot be null * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing the requested {@link KeyVaultCertificateWithPolicy certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<KeyVaultCertificateWithPolicy> getCertificate(String name) { try { return withContext(context -> getCertificateWithResponse(name, "", context)).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificateWithResponse * * @param name The name of the certificate to retrieve, cannot be null * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<KeyVaultCertificateWithPolicy>> getCertificateWithResponse(String name) { try { return withContext(context -> getCertificateWithResponse(name, "", context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<KeyVaultCertificateWithPolicy>> getCertificateWithResponse(String name, String version, Context context) { return service.getCertificateWithPolicy(vaultUrl, name, version, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Retrieving certificate - {}", name)) .doOnSuccess(response -> logger.info("Retrieved the certificate - {}", response.getValue().getProperties().getName())) .doOnError(error -> logger.warning("Failed to Retrieve the certificate - {}", name, error)); } Mono<Response<KeyVaultCertificate>> getCertificateVersionWithResponse(String name, String version, Context context) { return service.getCertificate(vaultUrl, name, version, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Retrieving certificate - {}", name)) .doOnSuccess(response -> logger.info("Retrieved the certificate - {}", response.getValue().getProperties().getName())) .doOnError(error -> logger.warning("Failed to Retrieve the certificate - {}", name, error)); } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificateVersionWithResponse * * @param name The name of the certificate to retrieve, cannot be null * @param version The version of the certificate to retrieve. If this is an empty String or null then latest version of the certificate is retrieved. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<KeyVaultCertificate>> getCertificateVersionWithResponse(String name, String version) { try { return withContext(context -> getCertificateVersionWithResponse(name, version == null ? "" : version, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets information about the specified version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificateVersion * * @param name The name of the certificate to retrieve, cannot be null * @param version The version of the certificate to retrieve. If this is an empty String or null then latest version of the certificate is retrieved. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing the requested {@link KeyVaultCertificate certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<KeyVaultCertificate> getCertificateVersion(String name, String version) { try { return withContext(context -> getCertificateVersionWithResponse(name, version == null ? "" : version, context)).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Updates the specified attributes associated with the specified certificate. The update operation changes specified attributes of an existing * stored certificate and attributes that are not specified in the request are left unchanged. This operation requires the certificates/update permission. * * <p><strong>Code Samples</strong></p> * <p>Gets latest version of the certificate, changes its tags and enabled status and then updates it in the Azure Key Vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.updateCertificateProperties * * @param certificateProperties The {@link CertificateProperties} object with updated properties. * @throws NullPointerException if {@code certificate} is {@code null}. * @throws ResourceNotFoundException when a certificate with {@link CertificateProperties * @throws HttpRequestException if {@link CertificateProperties * @return A {@link Mono} containing the {@link CertificateProperties updated certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<KeyVaultCertificate> updateCertificateProperties(CertificateProperties certificateProperties) { try { return withContext(context -> updateCertificatePropertiesWithResponse(certificateProperties, context)).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Updates the specified attributes associated with the specified certificate. The update operation changes specified attributes of an existing * stored certificate and attributes that are not specified in the request are left unchanged. This operation requires the certificates/update permission. * * <p><strong>Code Samples</strong></p> * <p>Gets latest version of the certificate, changes its enabled status and then updates it in the Azure Key Vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.updateCertificatePropertiesWithResponse * * @param certificateProperties The {@link CertificateProperties} object with updated properties. * @throws NullPointerException if {@code certificate} is {@code null}. * @throws ResourceNotFoundException when a certificate with {@link CertificateProperties * @throws HttpRequestException if {@link CertificateProperties * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<KeyVaultCertificate>> updateCertificatePropertiesWithResponse(CertificateProperties certificateProperties) { try { return withContext(context -> updateCertificatePropertiesWithResponse(certificateProperties, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<KeyVaultCertificate>> updateCertificatePropertiesWithResponse(CertificateProperties certificateProperties, Context context) { Objects.requireNonNull(certificateProperties, "certificateProperties' cannot be null."); CertificateUpdateParameters parameters = new CertificateUpdateParameters() .tags(certificateProperties.getTags()) .certificateAttributes(new CertificateRequestAttributes(certificateProperties)); return service.updateCertificate(vaultUrl, certificateProperties.getName(), API_VERSION, ACCEPT_LANGUAGE, parameters, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Updating certificate - {}", certificateProperties.getName())) .doOnSuccess(response -> logger.info("Updated the certificate - {}", certificateProperties.getName())) .doOnError(error -> logger.warning("Failed to update the certificate - {}", certificateProperties.getName(), error)); } /** * Deletes a certificate from a specified key vault. All the versions of the certificate along with its associated policy * get deleted. If soft-delete is enabled on the key vault then the certificate is placed in the deleted state and requires to be * purged for permanent deletion else the certificate is permanently deleted. The delete operation applies to any certificate stored in * Azure Key Vault but it cannot be applied to an individual version of a certificate. This operation requires the certificates/delete permission. * * <p><strong>Code Samples</strong></p> * <p>Deletes the certificate in the Azure Key Vault. Prints out the deleted certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.beginDeleteCertificate * * @param name The name of the certificate to be deleted. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException when a certificate with {@code name} is empty string. * @return A {@link PollerFlux} to poll on the {@link DeletedCertificate deleted certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public PollerFlux<DeletedCertificate, Void> beginDeleteCertificate(String name) { return new PollerFlux<>(Duration.ofSeconds(1), activationOperation(name), createDeletePollOperation(name), (context, firstResponse) -> Mono.empty(), (context) -> Mono.empty()); } private Function<PollingContext<DeletedCertificate>, Mono<DeletedCertificate>> activationOperation(String name) { return (pollingContext) -> withContext(context -> deleteCertificateWithResponse(name, context) .flatMap(deletedCertificateResponse -> Mono.just(deletedCertificateResponse.getValue()))); } /* Polling operation to poll on create delete certificate operation status. */ private Function<PollingContext<DeletedCertificate>, Mono<PollResponse<DeletedCertificate>>> createDeletePollOperation(String keyName) { return pollingContext -> withContext(context -> service.getDeletedCertificatePoller(vaultUrl, keyName, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .flatMap(deletedCertificateResponse -> { if (deletedCertificateResponse.getStatusCode() == HttpURLConnection.HTTP_NOT_FOUND) { return Mono.defer(() -> Mono.just(new PollResponse<>(LongRunningOperationStatus.IN_PROGRESS, pollingContext.getLatestResponse().getValue()))); } return Mono.defer(() -> Mono.just(new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, deletedCertificateResponse.getValue()))); })) .onErrorReturn(new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, pollingContext.getLatestResponse().getValue())); } Mono<Response<DeletedCertificate>> deleteCertificateWithResponse(String name, Context context) { return service.deleteCertificate(vaultUrl, name, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Deleting certificate - {}", name)) .doOnSuccess(response -> logger.info("Deleted the certificate - {}", response.getValue().getProperties().getName())) .doOnError(error -> logger.warning("Failed to delete the certificate - {}", name, error)); } /** * Retrieves information about the specified deleted certificate. The GetDeletedCertificate operation is applicable for soft-delete * enabled vaults and additionally retrieves deleted certificate's attributes, such as retention interval, scheduled permanent deletion and the current deletion recovery level. This operation * requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p> Gets the deleted certificate from the key vault enabled for soft-delete. Prints out the * deleted certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getDeletedCertificate * * @param name The name of the deleted certificate. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException when a certificate with {@code name} is empty string. * @return A {@link Mono} containing the {@link DeletedCertificate deleted certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<DeletedCertificate> getDeletedCertificate(String name) { try { return withContext(context -> getDeletedCertificateWithResponse(name, context)).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Retrieves information about the specified deleted certificate. The GetDeletedCertificate operation is applicable for soft-delete * enabled vaults and additionally retrieves deleted certificate's attributes, such as retention interval, scheduled permanent deletion and the current deletion recovery level. This operation * requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p> Gets the deleted certificate from the key vault enabled for soft-delete. Prints out the * deleted certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getDeletedCertificateWithResponse * * @param name The name of the deleted certificate. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException when a certificate with {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<DeletedCertificate>> getDeletedCertificateWithResponse(String name) { try { return withContext(context -> getDeletedCertificateWithResponse(name, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<DeletedCertificate>> getDeletedCertificateWithResponse(String name, Context context) { return service.getDeletedCertificate(vaultUrl, name, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Retrieving deleted certificate - {}", name)) .doOnSuccess(response -> logger.info("Retrieved the deleted certificate - {}", response.getValue().getProperties().getName())) .doOnError(error -> logger.warning("Failed to Retrieve the deleted certificate - {}", name, error)); } /** * Permanently deletes the specified deleted certificate without possibility for recovery. The Purge Deleted Certificate operation is applicable for * soft-delete enabled vaults and is not available if the recovery level does not specify 'Purgeable'. This operation requires the certificate/purge permission. * * <p><strong>Code Samples</strong></p> * <p>Purges the deleted certificate from the key vault enabled for soft-delete. Prints out the * status code from the server response when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.purgeDeletedCertificateWithResponse * * @param name The name of the deleted certificate. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException when a certificate with {@code name} is empty string. * @return An empty {@link Mono}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> purgeDeletedCertificate(String name) { try { return purgeDeletedCertificateWithResponse(name).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Permanently deletes the specified deleted certificate without possibility for recovery. The Purge Deleted Certificate operation is applicable for * soft-delete enabled vaults and is not available if the recovery level does not specify 'Purgeable'. This operation requires the certificate/purge permission. * * <p><strong>Code Samples</strong></p> * <p>Purges the deleted certificate from the key vault enabled for soft-delete. Prints out the * status code from the server response when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.purgeDeletedCertificateWithResponse * * @param name The name of the deleted certificate. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException when a certificate with {@code name} is empty string. * @return A {@link Mono} containing a Void Response}
code snippets should be renamed to beginCreateCertificate
Mono<Response<CertificateOperation>> createCertificateWithResponse(String name, CertificatePolicy certificatePolicy, boolean enabled, Map<String, String> tags, Context context) { CertificateRequestParameters certificateRequestParameters = new CertificateRequestParameters() .certificatePolicy(new CertificatePolicyRequest(certificatePolicy)) .certificateAttributes(new CertificateRequestAttributes().enabled(enabled)) .tags(tags); return service.createCertificate(vaultUrl, name, API_VERSION, ACCEPT_LANGUAGE, certificateRequestParameters, CONTENT_TYPE_HEADER_VALUE, context); }
return service.createCertificate(vaultUrl, name, API_VERSION, ACCEPT_LANGUAGE, certificateRequestParameters, CONTENT_TYPE_HEADER_VALUE, context);
new CertificateRequestParameters() .certificatePolicy(new CertificatePolicyRequest(certificatePolicy)) .certificateAttributes(new CertificateRequestAttributes().enabled(enabled)) .tags(tags); return service.createCertificate(vaultUrl, name, API_VERSION, ACCEPT_LANGUAGE, certificateRequestParameters, CONTENT_TYPE_HEADER_VALUE, context); } /** * Gets a pending {@link CertificateOperation}
class CertificateAsyncClient { static final String API_VERSION = "7.0"; static final String ACCEPT_LANGUAGE = "en-US"; static final int DEFAULT_MAX_PAGE_RESULTS = 25; static final String CONTENT_TYPE_HEADER_VALUE = "application/json"; private final String vaultUrl; private final CertificateService service; private final ClientLogger logger = new ClientLogger(CertificateAsyncClient.class); /** * Creates a CertificateAsyncClient that uses {@code pipeline} to service requests * * @param vaultUrl URL for the Azure KeyVault service. * @param pipeline HttpPipeline that the HTTP requests and responses flow through. * @param version {@link CertificateServiceVersion} of the service to be used when making requests. */ CertificateAsyncClient(URL vaultUrl, HttpPipeline pipeline, CertificateServiceVersion version) { Objects.requireNonNull(vaultUrl, KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.VAULT_END_POINT_REQUIRED)); this.vaultUrl = vaultUrl.toString(); this.service = RestProxy.create(CertificateService.class, pipeline); } /** * Get the vault endpoint url to which service requests are sent to. * @return the vault endpoint url */ public String getVaultUrl() { return vaultUrl; } /** * Creates a new certificate. If this is the first version, the certificate resource is created. This operation requires * the certificates/create permission. * * <p><strong>Code Samples</strong></p> * <p>Create certificate is a long running operation. The {@link PollerFlux poller} allows users to automatically poll on the create certificate * operation status. It is possible to monitor each intermediate poll response during the poll operation.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.createCertificate * * @param name The name of the certificate to be created. * @param policy The policy of the certificate to be created. * @param enabled The enabled status for the certificate. * @param tags The application specific metadata to set. * @throws ResourceModifiedException when invalid certificate policy configuration is provided. * @return A {@link PollerFlux} polling on the create certificate operation status. */ public PollerFlux<CertificateOperation, KeyVaultCertificate> beginCreateCertificate(String name, CertificatePolicy policy, boolean enabled, Map<String, String> tags) { return new PollerFlux<>(Duration.ofSeconds(1), activationOperation(name, policy, enabled, tags), createPollOperation(name), cancelOperation(name), fetchResultOperation(name)); } private BiFunction<PollingContext<CertificateOperation>, PollResponse<CertificateOperation>, Mono<CertificateOperation>> cancelOperation(String name) { return (pollingContext, firstResponse) -> withContext(context -> cancelCertificateOperationWithResponse(name, context)).flatMap(FluxUtil::toMono); } private Function<PollingContext<CertificateOperation>, Mono<CertificateOperation>> activationOperation(String name, CertificatePolicy policy, boolean enabled, Map<String, String> tags) { return (pollingContext) -> withContext(context -> createCertificateWithResponse(name, policy, enabled, tags, context) .flatMap(certificateOperationResponse -> Mono.just(certificateOperationResponse.getValue()))); } private Function<PollingContext<CertificateOperation>, Mono<KeyVaultCertificate>> fetchResultOperation(String name) { return (pollingContext) -> withContext(context -> getCertificateWithResponse(name, "", context) .flatMap(certificateResponse -> Mono.just(certificateResponse.getValue()))); } /** * Creates a new certificate. If this is the first version, the certificate resource is created. This operation requires * the certificates/create permission. * * <p><strong>Code Samples</strong></p> * <p>Create certificate is a long running operation. The {@link PollerFlux poller} allows users to automatically poll on the create certificate * operation status. It is possible to monitor each intermediate poll response during the poll operation.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.createCertificate * * @param name The name of the certificate to be created. * @param policy The policy of the certificate to be created. * @throws ResourceModifiedException when invalid certificate policy configuration is provided. * @return A {@link PollerFlux} polling on the create certificate operation status. */ public PollerFlux<CertificateOperation, KeyVaultCertificate> beginCreateCertificate(String name, CertificatePolicy policy) { return beginCreateCertificate(name, policy, true, null); } /* Polling operation to poll on create certificate operation status. */ private Function<PollingContext<CertificateOperation>, Mono<PollResponse<CertificateOperation>>> createPollOperation(String certificateName) { return (pollingContext) -> { try { return withContext(context -> service.getCertificateOperation(vaultUrl, certificateName, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .flatMap(this::processCertificateOperationResponse)); } catch (HttpRequestException e) { logger.logExceptionAsError(e); return Mono.just(new PollResponse<>(LongRunningOperationStatus.FAILED, null)); } }; } private Mono<PollResponse<CertificateOperation>> processCertificateOperationResponse(Response<CertificateOperation> certificateOperationResponse) { LongRunningOperationStatus status = null; switch (certificateOperationResponse.getValue().getStatus()) { case "inProgress": status = LongRunningOperationStatus.IN_PROGRESS; break; case "completed": status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; break; case "failed": status = LongRunningOperationStatus.FAILED; break; default: break; } return Mono.just(new PollResponse<>(status, certificateOperationResponse.getValue())); } Mono<Response<CertificateOperation>> createCertificateWithResponse(String name, CertificatePolicy certificatePolicy, boolean enabled, Map<String, String> tags, Context context) { CertificateRequestParameters certificateRequestParameters = from the key vault. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Geta a pending certificate operation. The {@link PollerFlux poller} allows users to automatically poll on the certificate * operation status. It is possible to monitor each intermediate poll response during the poll operation.</p> * * @param name The name of the certificate. * @throws ResourceNotFoundException when a certificate operation for a certificate with {@code name} doesn't exist. * @return A {@link PollerFlux} polling on the certificate operation status. */ public PollerFlux<CertificateOperation, KeyVaultCertificate> getCertificateOperation(String name) { return new PollerFlux<>(Duration.ofSeconds(1), null, createPollOperation(name), cancelOperation(name), fetchResultOperation(name)); } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificate * * @param name The name of the certificate to retrieve, cannot be null * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing the requested {@link KeyVaultCertificateWithPolicy certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<KeyVaultCertificateWithPolicy> getCertificate(String name) { try { return withContext(context -> getCertificateWithResponse(name, "", context)).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificateWithResponse * * @param name The name of the certificate to retrieve, cannot be null * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<KeyVaultCertificateWithPolicy>> getCertificateWithResponse(String name) { try { return withContext(context -> getCertificateWithResponse(name, "", context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<KeyVaultCertificateWithPolicy>> getCertificateWithResponse(String name, String version, Context context) { return service.getCertificateWithPolicy(vaultUrl, name, version, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Retrieving certificate - {}", name)) .doOnSuccess(response -> logger.info("Retrieved the certificate - {}", response.getValue().getProperties().getName())) .doOnError(error -> logger.warning("Failed to Retrieve the certificate - {}", name, error)); } Mono<Response<KeyVaultCertificate>> getCertificateVersionWithResponse(String name, String version, Context context) { return service.getCertificate(vaultUrl, name, version, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Retrieving certificate - {}", name)) .doOnSuccess(response -> logger.info("Retrieved the certificate - {}", response.getValue().getProperties().getName())) .doOnError(error -> logger.warning("Failed to Retrieve the certificate - {}", name, error)); } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificateVersionWithResponse * * @param name The name of the certificate to retrieve, cannot be null * @param version The version of the certificate to retrieve. If this is an empty String or null then latest version of the certificate is retrieved. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<KeyVaultCertificate>> getCertificateVersionWithResponse(String name, String version) { try { return withContext(context -> getCertificateVersionWithResponse(name, version == null ? "" : version, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets information about the specified version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificateVersion * * @param name The name of the certificate to retrieve, cannot be null * @param version The version of the certificate to retrieve. If this is an empty String or null then latest version of the certificate is retrieved. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing the requested {@link KeyVaultCertificate certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<KeyVaultCertificate> getCertificateVersion(String name, String version) { try { return withContext(context -> getCertificateVersionWithResponse(name, version == null ? "" : version, context)).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Updates the specified attributes associated with the specified certificate. The update operation changes specified attributes of an existing * stored certificate and attributes that are not specified in the request are left unchanged. This operation requires the certificates/update permission. * * <p><strong>Code Samples</strong></p> * <p>Gets latest version of the certificate, changes its tags and enabled status and then updates it in the Azure Key Vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.updateCertificateProperties * * @param certificateProperties The {@link CertificateProperties} object with updated properties. * @throws NullPointerException if {@code certificate} is {@code null}. * @throws ResourceNotFoundException when a certificate with {@link CertificateProperties * @throws HttpRequestException if {@link CertificateProperties * @return A {@link Mono} containing the {@link CertificateProperties updated certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<KeyVaultCertificate> updateCertificateProperties(CertificateProperties certificateProperties) { try { return withContext(context -> updateCertificatePropertiesWithResponse(certificateProperties, context)).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Updates the specified attributes associated with the specified certificate. The update operation changes specified attributes of an existing * stored certificate and attributes that are not specified in the request are left unchanged. This operation requires the certificates/update permission. * * <p><strong>Code Samples</strong></p> * <p>Gets latest version of the certificate, changes its enabled status and then updates it in the Azure Key Vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.updateCertificatePropertiesWithResponse * * @param certificateProperties The {@link CertificateProperties} object with updated properties. * @throws NullPointerException if {@code certificate} is {@code null}. * @throws ResourceNotFoundException when a certificate with {@link CertificateProperties * @throws HttpRequestException if {@link CertificateProperties * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<KeyVaultCertificate>> updateCertificatePropertiesWithResponse(CertificateProperties certificateProperties) { try { return withContext(context -> updateCertificatePropertiesWithResponse(certificateProperties, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<KeyVaultCertificate>> updateCertificatePropertiesWithResponse(CertificateProperties certificateProperties, Context context) { Objects.requireNonNull(certificateProperties, "certificateProperties' cannot be null."); CertificateUpdateParameters parameters = new CertificateUpdateParameters() .tags(certificateProperties.getTags()) .certificateAttributes(new CertificateRequestAttributes(certificateProperties)); return service.updateCertificate(vaultUrl, certificateProperties.getName(), API_VERSION, ACCEPT_LANGUAGE, parameters, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Updating certificate - {}", certificateProperties.getName())) .doOnSuccess(response -> logger.info("Updated the certificate - {}", certificateProperties.getName())) .doOnError(error -> logger.warning("Failed to update the certificate - {}", certificateProperties.getName(), error)); } /** * Deletes a certificate from a specified key vault. All the versions of the certificate along with its associated policy * get deleted. If soft-delete is enabled on the key vault then the certificate is placed in the deleted state and requires to be * purged for permanent deletion else the certificate is permanently deleted. The delete operation applies to any certificate stored in * Azure Key Vault but it cannot be applied to an individual version of a certificate. This operation requires the certificates/delete permission. * * <p><strong>Code Samples</strong></p> * <p>Deletes the certificate in the Azure Key Vault. Prints out the deleted certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.beginDeleteCertificate * * @param name The name of the certificate to be deleted. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException when a certificate with {@code name} is empty string. * @return A {@link PollerFlux} to poll on the {@link DeletedCertificate deleted certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public PollerFlux<DeletedCertificate, Void> beginDeleteCertificate(String name) { return new PollerFlux<>(Duration.ofSeconds(1), activationOperation(name), createDeletePollOperation(name), (context, firstResponse) -> Mono.empty(), (context) -> Mono.empty()); } private Function<PollingContext<DeletedCertificate>, Mono<DeletedCertificate>> activationOperation(String name) { return (pollingContext) -> withContext(context -> deleteCertificateWithResponse(name, context) .flatMap(deletedCertificateResponse -> Mono.just(deletedCertificateResponse.getValue()))); } /* Polling operation to poll on create delete certificate operation status. */ private Function<PollingContext<DeletedCertificate>, Mono<PollResponse<DeletedCertificate>>> createDeletePollOperation(String keyName) { return pollingContext -> withContext(context -> service.getDeletedCertificatePoller(vaultUrl, keyName, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .flatMap(deletedCertificateResponse -> { if (deletedCertificateResponse.getStatusCode() == HttpURLConnection.HTTP_NOT_FOUND) { return Mono.defer(() -> Mono.just(new PollResponse<>(LongRunningOperationStatus.IN_PROGRESS, pollingContext.getLatestResponse().getValue()))); } return Mono.defer(() -> Mono.just(new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, deletedCertificateResponse.getValue()))); })) .onErrorReturn(new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, pollingContext.getLatestResponse().getValue())); } Mono<Response<DeletedCertificate>> deleteCertificateWithResponse(String name, Context context) { return service.deleteCertificate(vaultUrl, name, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Deleting certificate - {}", name)) .doOnSuccess(response -> logger.info("Deleted the certificate - {}", response.getValue().getProperties().getName())) .doOnError(error -> logger.warning("Failed to delete the certificate - {}", name, error)); } /** * Retrieves information about the specified deleted certificate. The GetDeletedCertificate operation is applicable for soft-delete * enabled vaults and additionally retrieves deleted certificate's attributes, such as retention interval, scheduled permanent deletion and the current deletion recovery level. This operation * requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p> Gets the deleted certificate from the key vault enabled for soft-delete. Prints out the * deleted certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getDeletedCertificate * * @param name The name of the deleted certificate. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException when a certificate with {@code name} is empty string. * @return A {@link Mono} containing the {@link DeletedCertificate deleted certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<DeletedCertificate> getDeletedCertificate(String name) { try { return withContext(context -> getDeletedCertificateWithResponse(name, context)).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Retrieves information about the specified deleted certificate. The GetDeletedCertificate operation is applicable for soft-delete * enabled vaults and additionally retrieves deleted certificate's attributes, such as retention interval, scheduled permanent deletion and the current deletion recovery level. This operation * requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p> Gets the deleted certificate from the key vault enabled for soft-delete. Prints out the * deleted certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getDeletedCertificateWithResponse * * @param name The name of the deleted certificate. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException when a certificate with {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<DeletedCertificate>> getDeletedCertificateWithResponse(String name) { try { return withContext(context -> getDeletedCertificateWithResponse(name, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<DeletedCertificate>> getDeletedCertificateWithResponse(String name, Context context) { return service.getDeletedCertificate(vaultUrl, name, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Retrieving deleted certificate - {}", name)) .doOnSuccess(response -> logger.info("Retrieved the deleted certificate - {}", response.getValue().getProperties().getName())) .doOnError(error -> logger.warning("Failed to Retrieve the deleted certificate - {}", name, error)); } /** * Permanently deletes the specified deleted certificate without possibility for recovery. The Purge Deleted Certificate operation is applicable for * soft-delete enabled vaults and is not available if the recovery level does not specify 'Purgeable'. This operation requires the certificate/purge permission. * * <p><strong>Code Samples</strong></p> * <p>Purges the deleted certificate from the key vault enabled for soft-delete. Prints out the * status code from the server response when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.purgeDeletedCertificateWithResponse * * @param name The name of the deleted certificate. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException when a certificate with {@code name} is empty string. * @return An empty {@link Mono}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> purgeDeletedCertificate(String name) { try { return purgeDeletedCertificateWithResponse(name).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Permanently deletes the specified deleted certificate without possibility for recovery. The Purge Deleted Certificate operation is applicable for * soft-delete enabled vaults and is not available if the recovery level does not specify 'Purgeable'. This operation requires the certificate/purge permission. * * <p><strong>Code Samples</strong></p> * <p>Purges the deleted certificate from the key vault enabled for soft-delete. Prints out the * status code from the server response when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.purgeDeletedCertificateWithResponse * * @param name The name of the deleted certificate. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException when a certificate with {@code name} is empty string. * @return A {@link Mono} containing a Void Response}
class CertificateAsyncClient { static final String API_VERSION = "7.0"; static final String ACCEPT_LANGUAGE = "en-US"; static final int DEFAULT_MAX_PAGE_RESULTS = 25; static final String CONTENT_TYPE_HEADER_VALUE = "application/json"; static final String KEY_VAULT_SCOPE = "https: private final String vaultUrl; private final CertificateService service; private final ClientLogger logger = new ClientLogger(CertificateAsyncClient.class); /** * Creates a CertificateAsyncClient that uses {@code pipeline} to service requests * * @param vaultUrl URL for the Azure KeyVault service. * @param pipeline HttpPipeline that the HTTP requests and responses flow through. * @param version {@link CertificateServiceVersion} of the service to be used when making requests. */ CertificateAsyncClient(URL vaultUrl, HttpPipeline pipeline, CertificateServiceVersion version) { Objects.requireNonNull(vaultUrl, KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.VAULT_END_POINT_REQUIRED)); this.vaultUrl = vaultUrl.toString(); this.service = RestProxy.create(CertificateService.class, pipeline); } /** * Get the vault endpoint url to which service requests are sent to. * @return the vault endpoint url */ public String getVaultUrl() { return vaultUrl; } /** * Creates a new certificate. If this is the first version, the certificate resource is created. This operation requires * the certificates/create permission. * * <p><strong>Code Samples</strong></p> * <p>Create certificate is a long running operation. The {@link PollerFlux poller} allows users to automatically poll on the create certificate * operation status. It is possible to monitor each intermediate poll response during the poll operation.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.beginCreateCertificate * * @param name The name of the certificate to be created. * @param policy The policy of the certificate to be created. * @param enabled The enabled status for the certificate. * @param tags The application specific metadata to set. * @throws ResourceModifiedException when invalid certificate policy configuration is provided. * @return A {@link PollerFlux} polling on the create certificate operation status. */ public PollerFlux<CertificateOperation, KeyVaultCertificate> beginCreateCertificate(String name, CertificatePolicy policy, boolean enabled, Map<String, String> tags) { return new PollerFlux<>(Duration.ofSeconds(1), activationOperation(name, policy, enabled, tags), createPollOperation(name), cancelOperation(name), fetchResultOperation(name)); } private BiFunction<PollingContext<CertificateOperation>, PollResponse<CertificateOperation>, Mono<CertificateOperation>> cancelOperation(String name) { return (pollingContext, firstResponse) -> withContext(context -> cancelCertificateOperationWithResponse(name, context)).flatMap(FluxUtil::toMono); } private Function<PollingContext<CertificateOperation>, Mono<CertificateOperation>> activationOperation(String name, CertificatePolicy policy, boolean enabled, Map<String, String> tags) { return (pollingContext) -> withContext(context -> createCertificateWithResponse(name, policy, enabled, tags, context) .flatMap(certificateOperationResponse -> Mono.just(certificateOperationResponse.getValue()))); } private Function<PollingContext<CertificateOperation>, Mono<KeyVaultCertificate>> fetchResultOperation(String name) { return (pollingContext) -> withContext(context -> getCertificateWithResponse(name, "", context) .flatMap(certificateResponse -> Mono.just(certificateResponse.getValue()))); } /** * Creates a new certificate. If this is the first version, the certificate resource is created. This operation requires * the certificates/create permission. * * <p><strong>Code Samples</strong></p> * <p>Create certificate is a long running operation. The {@link PollerFlux poller} allows users to automatically poll on the create certificate * operation status. It is possible to monitor each intermediate poll response during the poll operation.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.beginCreateCertificate * * @param name The name of the certificate to be created. * @param policy The policy of the certificate to be created. * @throws ResourceModifiedException when invalid certificate policy configuration is provided. * @return A {@link PollerFlux} polling on the create certificate operation status. */ public PollerFlux<CertificateOperation, KeyVaultCertificate> beginCreateCertificate(String name, CertificatePolicy policy) { return beginCreateCertificate(name, policy, true, null); } /* Polling operation to poll on create certificate operation status. */ private Function<PollingContext<CertificateOperation>, Mono<PollResponse<CertificateOperation>>> createPollOperation(String certificateName) { return (pollingContext) -> { try { return withContext(context -> service.getCertificateOperation(vaultUrl, certificateName, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .flatMap(this::processCertificateOperationResponse)); } catch (HttpRequestException e) { logger.logExceptionAsError(e); return Mono.just(new PollResponse<>(LongRunningOperationStatus.FAILED, null)); } }; } private Mono<PollResponse<CertificateOperation>> processCertificateOperationResponse(Response<CertificateOperation> certificateOperationResponse) { LongRunningOperationStatus status = null; switch (certificateOperationResponse.getValue().getStatus()) { case "inProgress": status = LongRunningOperationStatus.IN_PROGRESS; break; case "completed": status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; break; case "failed": status = LongRunningOperationStatus.FAILED; break; default: status = LongRunningOperationStatus.fromString(certificateOperationResponse.getValue().getStatus(), true); break; } return Mono.just(new PollResponse<>(status, certificateOperationResponse.getValue())); } Mono<Response<CertificateOperation>> createCertificateWithResponse(String name, CertificatePolicy certificatePolicy, boolean enabled, Map<String, String> tags, Context context) { CertificateRequestParameters certificateRequestParameters = from the key vault. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Get a pending certificate operation. The {@link PollerFlux poller} allows users to automatically poll on the certificate * operation status. It is possible to monitor each intermediate poll response during the poll operation.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificateOperation * * @param name The name of the certificate. * @throws ResourceNotFoundException when a certificate operation for a certificate with {@code name} doesn't exist. * @return A {@link PollerFlux} polling on the certificate operation status. */ public PollerFlux<CertificateOperation, KeyVaultCertificate> getCertificateOperation(String name) { return new PollerFlux<>(Duration.ofSeconds(1), (pollingContext) -> Mono.empty(), createPollOperation(name), cancelOperation(name), fetchResultOperation(name)); } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificate * * @param name The name of the certificate to retrieve, cannot be null * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing the requested {@link KeyVaultCertificateWithPolicy certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<KeyVaultCertificateWithPolicy> getCertificate(String name) { try { return withContext(context -> getCertificateWithResponse(name, "", context)).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificateWithResponse * * @param name The name of the certificate to retrieve, cannot be null * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<KeyVaultCertificateWithPolicy>> getCertificateWithResponse(String name) { try { return withContext(context -> getCertificateWithResponse(name, "", context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<KeyVaultCertificateWithPolicy>> getCertificateWithResponse(String name, String version, Context context) { return service.getCertificateWithPolicy(vaultUrl, name, version, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Retrieving certificate - {}", name)) .doOnSuccess(response -> logger.info("Retrieved the certificate - {}", response.getValue().getProperties().getName())) .doOnError(error -> logger.warning("Failed to Retrieve the certificate - {}", name, error)); } Mono<Response<KeyVaultCertificate>> getCertificateVersionWithResponse(String name, String version, Context context) { return service.getCertificate(vaultUrl, name, version, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Retrieving certificate - {}", name)) .doOnSuccess(response -> logger.info("Retrieved the certificate - {}", response.getValue().getProperties().getName())) .doOnError(error -> logger.warning("Failed to Retrieve the certificate - {}", name, error)); } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificateVersionWithResponse * * @param name The name of the certificate to retrieve, cannot be null * @param version The version of the certificate to retrieve. If this is an empty String or null then latest version of the certificate is retrieved. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<KeyVaultCertificate>> getCertificateVersionWithResponse(String name, String version) { try { return withContext(context -> getCertificateVersionWithResponse(name, version == null ? "" : version, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets information about the specified version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificateVersion * * @param name The name of the certificate to retrieve, cannot be null * @param version The version of the certificate to retrieve. If this is an empty String or null then latest version of the certificate is retrieved. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing the requested {@link KeyVaultCertificate certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<KeyVaultCertificate> getCertificateVersion(String name, String version) { try { return withContext(context -> getCertificateVersionWithResponse(name, version == null ? "" : version, context)).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Updates the specified attributes associated with the specified certificate. The update operation changes specified attributes of an existing * stored certificate and attributes that are not specified in the request are left unchanged. This operation requires the certificates/update permission. * * <p><strong>Code Samples</strong></p> * <p>Gets latest version of the certificate, changes its tags and enabled status and then updates it in the Azure Key Vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.updateCertificateProperties * * @param certificateProperties The {@link CertificateProperties} object with updated properties. * @throws NullPointerException if {@code certificate} is {@code null}. * @throws ResourceNotFoundException when a certificate with {@link CertificateProperties * @throws HttpRequestException if {@link CertificateProperties * @return A {@link Mono} containing the {@link CertificateProperties updated certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<KeyVaultCertificate> updateCertificateProperties(CertificateProperties certificateProperties) { try { return withContext(context -> updateCertificatePropertiesWithResponse(certificateProperties, context)).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Updates the specified attributes associated with the specified certificate. The update operation changes specified attributes of an existing * stored certificate and attributes that are not specified in the request are left unchanged. This operation requires the certificates/update permission. * * <p><strong>Code Samples</strong></p> * <p>Gets latest version of the certificate, changes its enabled status and then updates it in the Azure Key Vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.updateCertificatePropertiesWithResponse * * @param certificateProperties The {@link CertificateProperties} object with updated properties. * @throws NullPointerException if {@code certificate} is {@code null}. * @throws ResourceNotFoundException when a certificate with {@link CertificateProperties * @throws HttpRequestException if {@link CertificateProperties * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<KeyVaultCertificate>> updateCertificatePropertiesWithResponse(CertificateProperties certificateProperties) { try { return withContext(context -> updateCertificatePropertiesWithResponse(certificateProperties, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<KeyVaultCertificate>> updateCertificatePropertiesWithResponse(CertificateProperties certificateProperties, Context context) { Objects.requireNonNull(certificateProperties, "certificateProperties' cannot be null."); CertificateUpdateParameters parameters = new CertificateUpdateParameters() .tags(certificateProperties.getTags()) .certificateAttributes(new CertificateRequestAttributes(certificateProperties)); return service.updateCertificate(vaultUrl, certificateProperties.getName(), API_VERSION, ACCEPT_LANGUAGE, parameters, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Updating certificate - {}", certificateProperties.getName())) .doOnSuccess(response -> logger.info("Updated the certificate - {}", certificateProperties.getName())) .doOnError(error -> logger.warning("Failed to update the certificate - {}", certificateProperties.getName(), error)); } /** * Deletes a certificate from a specified key vault. All the versions of the certificate along with its associated policy * get deleted. If soft-delete is enabled on the key vault then the certificate is placed in the deleted state and requires to be * purged for permanent deletion else the certificate is permanently deleted. The delete operation applies to any certificate stored in * Azure Key Vault but it cannot be applied to an individual version of a certificate. This operation requires the certificates/delete permission. * * <p><strong>Code Samples</strong></p> * <p>Deletes the certificate in the Azure Key Vault. Prints out the deleted certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.beginDeleteCertificate * * @param name The name of the certificate to be deleted. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException when a certificate with {@code name} is empty string. * @return A {@link PollerFlux} to poll on the {@link DeletedCertificate deleted certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public PollerFlux<DeletedCertificate, Void> beginDeleteCertificate(String name) { return new PollerFlux<>(Duration.ofSeconds(1), activationOperation(name), createDeletePollOperation(name), (context, firstResponse) -> Mono.empty(), (context) -> Mono.empty()); } private Function<PollingContext<DeletedCertificate>, Mono<DeletedCertificate>> activationOperation(String name) { return (pollingContext) -> withContext(context -> deleteCertificateWithResponse(name, context) .flatMap(deletedCertificateResponse -> Mono.just(deletedCertificateResponse.getValue()))); } /* Polling operation to poll on create delete certificate operation status. */ private Function<PollingContext<DeletedCertificate>, Mono<PollResponse<DeletedCertificate>>> createDeletePollOperation(String keyName) { return pollingContext -> withContext(context -> service.getDeletedCertificatePoller(vaultUrl, keyName, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .flatMap(deletedCertificateResponse -> { if (deletedCertificateResponse.getStatusCode() == HttpURLConnection.HTTP_NOT_FOUND) { return Mono.defer(() -> Mono.just(new PollResponse<>(LongRunningOperationStatus.IN_PROGRESS, pollingContext.getLatestResponse().getValue()))); } return Mono.defer(() -> Mono.just(new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, deletedCertificateResponse.getValue()))); })) .onErrorReturn(new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, pollingContext.getLatestResponse().getValue())); } Mono<Response<DeletedCertificate>> deleteCertificateWithResponse(String name, Context context) { return service.deleteCertificate(vaultUrl, name, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Deleting certificate - {}", name)) .doOnSuccess(response -> logger.info("Deleted the certificate - {}", response.getValue().getProperties().getName())) .doOnError(error -> logger.warning("Failed to delete the certificate - {}", name, error)); } /** * Retrieves information about the specified deleted certificate. The GetDeletedCertificate operation is applicable for soft-delete * enabled vaults and additionally retrieves deleted certificate's attributes, such as retention interval, scheduled permanent deletion and the current deletion recovery level. This operation * requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p> Gets the deleted certificate from the key vault enabled for soft-delete. Prints out the * deleted certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getDeletedCertificate * * @param name The name of the deleted certificate. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException when a certificate with {@code name} is empty string. * @return A {@link Mono} containing the {@link DeletedCertificate deleted certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<DeletedCertificate> getDeletedCertificate(String name) { try { return withContext(context -> getDeletedCertificateWithResponse(name, context)).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Retrieves information about the specified deleted certificate. The GetDeletedCertificate operation is applicable for soft-delete * enabled vaults and additionally retrieves deleted certificate's attributes, such as retention interval, scheduled permanent deletion and the current deletion recovery level. This operation * requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p> Gets the deleted certificate from the key vault enabled for soft-delete. Prints out the * deleted certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getDeletedCertificateWithResponse * * @param name The name of the deleted certificate. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException when a certificate with {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<DeletedCertificate>> getDeletedCertificateWithResponse(String name) { try { return withContext(context -> getDeletedCertificateWithResponse(name, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<DeletedCertificate>> getDeletedCertificateWithResponse(String name, Context context) { return service.getDeletedCertificate(vaultUrl, name, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Retrieving deleted certificate - {}", name)) .doOnSuccess(response -> logger.info("Retrieved the deleted certificate - {}", response.getValue().getProperties().getName())) .doOnError(error -> logger.warning("Failed to Retrieve the deleted certificate - {}", name, error)); } /** * Permanently deletes the specified deleted certificate without possibility for recovery. The Purge Deleted Certificate operation is applicable for * soft-delete enabled vaults and is not available if the recovery level does not specify 'Purgeable'. This operation requires the certificate/purge permission. * * <p><strong>Code Samples</strong></p> * <p>Purges the deleted certificate from the key vault enabled for soft-delete. Prints out the * status code from the server response when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.purgeDeletedCertificateWithResponse * * @param name The name of the deleted certificate. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException when a certificate with {@code name} is empty string. * @return An empty {@link Mono}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> purgeDeletedCertificate(String name) { try { return purgeDeletedCertificateWithResponse(name).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Permanently deletes the specified deleted certificate without possibility for recovery. The Purge Deleted Certificate operation is applicable for * soft-delete enabled vaults and is not available if the recovery level does not specify 'Purgeable'. This operation requires the certificate/purge permission. * * <p><strong>Code Samples</strong></p> * <p>Purges the deleted certificate from the key vault enabled for soft-delete. Prints out the * status code from the server response when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.purgeDeletedCertificateWithResponse * * @param name The name of the deleted certificate. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException when a certificate with {@code name} is empty string. * @return A {@link Mono} containing a Void Response}