comment
stringlengths
1
45k
method_body
stringlengths
23
281k
target_code
stringlengths
0
5.16k
method_body_after
stringlengths
12
281k
context_before
stringlengths
8
543k
context_after
stringlengths
8
543k
fixed.
Mono<DecryptResult> decryptAsync(EncryptionAlgorithm algorithm, byte[] cipherText, byte[] iv, byte[] authenticationData, byte[] authenticationTag, Context context, JsonWebKey key) { throw new UnsupportedOperationException("Decrypt operaiton is not supported for EC key"); }
throw new UnsupportedOperationException("Decrypt operaiton is not supported for EC key");
Mono<DecryptResult> decryptAsync(EncryptionAlgorithm algorithm, byte[] cipherText, byte[] iv, byte[] authenticationData, byte[] authenticationTag, Context context, JsonWebKey key) { throw new UnsupportedOperationException("Decrypt operation is not supported for EC key"); }
class EcKeyCryptographyClient { private KeyPair keyPair; private CryptographyServiceClient serviceClient; private Provider provider; /** * Creates a EcKeyCryptographyClient that uses {@code service} to service requests * * @param serviceClient the client to use for service side cryptography operations. */ EcKeyCryptographyClient( CryptographyServiceClient serviceClient) { this.serviceClient = serviceClient; } EcKeyCryptographyClient(JsonWebKey key, CryptographyServiceClient serviceClient) { this.provider = Security.getProvider("SunEC"); this.keyPair = key.toEC(key.hasPrivateKey(), provider); this.serviceClient = serviceClient; } private KeyPair getKeyPair(JsonWebKey key) { if(keyPair == null){ keyPair = key.toEC(key.hasPrivateKey()); } return keyPair; } Mono<EncryptResult> encryptAsync(EncryptionAlgorithm algorithm, byte[] plaintext, byte[] iv, byte[] authenticationData, Context context, JsonWebKey key) { throw new UnsupportedOperationException("Encrypt operation is not supported for EC key"); } Mono<SignResult> signAsync(SignatureAlgorithm algorithm, byte[] digest, Context context, JsonWebKey key) { keyPair = getKeyPair(key); Algorithm baseAlgorithm = AlgorithmResolver.Default.get(algorithm.toString()); if (baseAlgorithm == null) { if(serviceCryptoAvailable()) { return serviceClient.sign(algorithm, digest, context); } return Mono.error(new NoSuchAlgorithmException(algorithm.toString())); } else if (!(baseAlgorithm instanceof AsymmetricSignatureAlgorithm)) { return Mono.error(new NoSuchAlgorithmException(algorithm.toString())); } if (keyPair.getPrivate() == null){ if(serviceCryptoAvailable()) { return serviceClient.sign(algorithm, digest, context); } return Mono.error(new IllegalArgumentException("Private portion of the key not available to perform sign operation")); } Ecdsa algo = (Ecdsa) baseAlgorithm; ISignatureTransform signer = algo.createSignatureTransform(keyPair, provider); try { return Mono.just(new SignResult(signer.sign(digest), algorithm)); } catch (Exception e) { return Mono.error(e); } } Mono<VerifyResult> verifyAsync(SignatureAlgorithm algorithm, byte[] digest, byte[] signature, Context context, JsonWebKey key) { keyPair = getKeyPair(key); Algorithm baseAlgorithm = AlgorithmResolver.Default.get(algorithm.toString()); if (baseAlgorithm == null) { if(serviceCryptoAvailable()) { return serviceClient.verify(algorithm, digest, signature, context); } return Mono.error(new NoSuchAlgorithmException(algorithm.toString())); } else if (!(baseAlgorithm instanceof AsymmetricSignatureAlgorithm)) { return Mono.error(new NoSuchAlgorithmException(algorithm.toString())); } if (keyPair.getPublic() == null){ if(serviceCryptoAvailable()) { return serviceClient.verify(algorithm, digest, signature, context); } return Mono.error(new IllegalArgumentException("Public portion of the key not available to perform verify operation")); } Ecdsa algo = (Ecdsa) baseAlgorithm; ISignatureTransform signer = algo.createSignatureTransform(keyPair, provider); try { return Mono.just(new VerifyResult(signer.verify(digest, signature))); } catch (Exception e) { return Mono.error(e); } } Mono<KeyWrapResult> wrapKeyAsync(KeyWrapAlgorithm algorithm, byte[] key, Context context, JsonWebKey webKey) { return Mono.error(new UnsupportedOperationException("Wrap key operation is not supported for EC key")); } Mono<KeyUnwrapResult> unwrapKeyAsync(KeyWrapAlgorithm algorithm, byte[] encryptedKey, Context context, JsonWebKey key) { throw new UnsupportedOperationException("Unwrap key operation is not supported for Ec key"); } Mono<SignResult> signDataAsync(SignatureAlgorithm algorithm, byte[] data, Context context, JsonWebKey key) { try { HashAlgorithm hashAlgorithm = SignatureHashResolver.Default.get(algorithm); MessageDigest md = MessageDigest.getInstance(hashAlgorithm.toString()); md.update(data); byte[] digest = md.digest(); return signAsync(algorithm, digest, context, key); } catch (NoSuchAlgorithmException e){ return Mono.error(e); } } Mono<VerifyResult> verifyDataAsync(SignatureAlgorithm algorithm, byte[] data, byte[] signature, Context context, JsonWebKey key) { try { HashAlgorithm hashAlgorithm = SignatureHashResolver.Default.get(algorithm); MessageDigest md = MessageDigest.getInstance(hashAlgorithm.toString()); md.update(data); byte[] digest = md.digest(); return verifyAsync(algorithm, digest, signature, context, key); } catch (NoSuchAlgorithmException e) { return Mono.error(e); } } private boolean serviceCryptoAvailable(){ return serviceClient != null ; } }
class EcKeyCryptographyClient extends LocalKeyCryptographyClient { private KeyPair keyPair; private CryptographyServiceClient serviceClient; private Provider provider; /** * Creates a EcKeyCryptographyClient that uses {@code service} to service requests * * @param serviceClient the client to use for service side cryptography operations. */ EcKeyCryptographyClient(CryptographyServiceClient serviceClient) { super(serviceClient); this.serviceClient = serviceClient; } EcKeyCryptographyClient(JsonWebKey key, CryptographyServiceClient serviceClient) { super(serviceClient); this.provider = Security.getProvider("SunEC"); this.keyPair = key.toEC(key.hasPrivateKey(), provider); this.serviceClient = serviceClient; } private KeyPair getKeyPair(JsonWebKey key) { if (keyPair == null) { keyPair = key.toEC(key.hasPrivateKey()); } return keyPair; } @Override Mono<EncryptResult> encryptAsync(EncryptionAlgorithm algorithm, byte[] plaintext, byte[] iv, byte[] authenticationData, Context context, JsonWebKey key) { throw new UnsupportedOperationException("Encrypt operation is not supported for EC key"); } @Override @Override Mono<SignResult> signAsync(SignatureAlgorithm algorithm, byte[] digest, Context context, JsonWebKey key) { keyPair = getKeyPair(key); Algorithm baseAlgorithm = AlgorithmResolver.Default.get(algorithm.toString()); if (baseAlgorithm == null) { if (serviceCryptoAvailable()) { return serviceClient.sign(algorithm, digest, context); } return Mono.error(new NoSuchAlgorithmException(algorithm.toString())); } else if (!(baseAlgorithm instanceof AsymmetricSignatureAlgorithm)) { return Mono.error(new NoSuchAlgorithmException(algorithm.toString())); } if (keyPair.getPrivate() == null) { if (serviceCryptoAvailable()) { return serviceClient.sign(algorithm, digest, context); } return Mono.error(new IllegalArgumentException("Private portion of the key not available to perform sign operation")); } Ecdsa algo; if (baseAlgorithm instanceof Ecdsa) { algo = (Ecdsa) baseAlgorithm; } else { return Mono.error(new NoSuchAlgorithmException(algorithm.toString())); } ISignatureTransform signer = algo.createSignatureTransform(keyPair, provider); try { return Mono.just(new SignResult(signer.sign(digest), algorithm)); } catch (Exception e) { return Mono.error(e); } } @Override Mono<VerifyResult> verifyAsync(SignatureAlgorithm algorithm, byte[] digest, byte[] signature, Context context, JsonWebKey key) { keyPair = getKeyPair(key); Algorithm baseAlgorithm = AlgorithmResolver.Default.get(algorithm.toString()); if (baseAlgorithm == null) { if (serviceCryptoAvailable()) { return serviceClient.verify(algorithm, digest, signature, context); } return Mono.error(new NoSuchAlgorithmException(algorithm.toString())); } else if (!(baseAlgorithm instanceof AsymmetricSignatureAlgorithm)) { return Mono.error(new NoSuchAlgorithmException(algorithm.toString())); } if (keyPair.getPublic() == null) { if (serviceCryptoAvailable()) { return serviceClient.verify(algorithm, digest, signature, context); } return Mono.error(new IllegalArgumentException("Public portion of the key not available to perform verify operation")); } Ecdsa algo; if (baseAlgorithm instanceof Ecdsa) { algo = (Ecdsa) baseAlgorithm; } else { return Mono.error(new NoSuchAlgorithmException(algorithm.toString())); } ISignatureTransform signer = algo.createSignatureTransform(keyPair, provider); try { return Mono.just(new VerifyResult(signer.verify(digest, signature))); } catch (Exception e) { return Mono.error(e); } } @Override Mono<KeyWrapResult> wrapKeyAsync(KeyWrapAlgorithm algorithm, byte[] key, Context context, JsonWebKey webKey) { return Mono.error(new UnsupportedOperationException("Wrap key operation is not supported for EC key")); } @Override Mono<KeyUnwrapResult> unwrapKeyAsync(KeyWrapAlgorithm algorithm, byte[] encryptedKey, Context context, JsonWebKey key) { throw new UnsupportedOperationException("Unwrap key operation is not supported for Ec key"); } @Override Mono<SignResult> signDataAsync(SignatureAlgorithm algorithm, byte[] data, Context context, JsonWebKey key) { try { HashAlgorithm hashAlgorithm = SignatureHashResolver.DEFAULT.get(algorithm); MessageDigest md = MessageDigest.getInstance(hashAlgorithm.toString()); md.update(data); byte[] digest = md.digest(); return signAsync(algorithm, digest, context, key); } catch (NoSuchAlgorithmException e) { return Mono.error(e); } } @Override Mono<VerifyResult> verifyDataAsync(SignatureAlgorithm algorithm, byte[] data, byte[] signature, Context context, JsonWebKey key) { try { HashAlgorithm hashAlgorithm = SignatureHashResolver.DEFAULT.get(algorithm); MessageDigest md = MessageDigest.getInstance(hashAlgorithm.toString()); md.update(data); byte[] digest = md.digest(); return verifyAsync(algorithm, digest, signature, context, key); } catch (NoSuchAlgorithmException e) { return Mono.error(e); } } private boolean serviceCryptoAvailable() { return serviceClient != null; } }
updated.
public KeyWrapResult wrapKey(KeyWrapAlgorithm algorithm, byte[] key) { return client.wrapKey(algorithm, key, Context.NONE).block(); }
return client.wrapKey(algorithm, key, Context.NONE).block();
public KeyWrapResult wrapKey(KeyWrapAlgorithm algorithm, byte[] key) { return wrapKey(algorithm, key, Context.NONE); }
class CryptographyClient { private CryptographyAsyncClient client; /** * Creates a KeyClient that uses {@code pipeline} to service requests * * @param client The {@link CryptographyAsyncClient} that the client routes its request through. */ CryptographyClient(CryptographyAsyncClient client) { this.client = client; } /** * Gets the public part of the configured key. The get key operation is applicable to all key types and it requires the {@code keys/get} permission. * * @throws ResourceNotFoundException when the configured key doesn't exist in the key vault. * @return The requested {@link Key key}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Key getKey() { return getKeyWithResponse(Context.NONE).value(); } /** * Gets the public part of the configured key. The get key operation is applicable to all key types and it requires the {@code keys/get} permission. * * @param context Additional context that is passed through the Http pipeline during the service call. * @throws ResourceNotFoundException when the configured key doesn't exist in the key vault. * @return A {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Key> getKeyWithResponse(Context context) { return client.getKeyWithResponse(context).block(); } /** * Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports a * single block of data, the size of which is dependent on the target key and the encryption algorithm to be used. The encrypt * operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys public portion of the key is used * for encryption. This operation requires the keys/encrypt permission. * * <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting the specified encrypted content. Possible values * for assymetric keys include: {@link EncryptionAlgorithm * Possible values for symmetric keys include: {@link EncryptionAlgorithm * {@link EncryptionAlgorithm * * @param algorithm The algorithm to be used for encryption. * @param plaintext The content to be encrypted. * @param iv The initialization vector * @param authenticationData The authentication data * @throws ResourceNotFoundException if the key cannot be found for encryption. * @return A {@link EncryptResult} whose {@link EncryptResult */ public EncryptResult encrypt(EncryptionAlgorithm algorithm, byte[] plaintext, byte[] iv, byte[] authenticationData) { return encrypt(algorithm, plaintext, iv, authenticationData, Context.NONE); } /** * Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports a * single block of data, the size of which is dependent on the target key and the encryption algorithm to be used. The encrypt * operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys public portion of the key is used * for encryption. This operation requires the keys/encrypt permission. * * <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting the specified encrypted content. Possible values * for assymetric keys include: {@link EncryptionAlgorithm * Possible values for symmetric keys include: {@link EncryptionAlgorithm * {@link EncryptionAlgorithm * * @param algorithm The algorithm to be used for encryption. * @param plaintext The content to be encrypted. * @param iv The initialization vector * @param authenticationData The authentication data * @param context Additional context that is passed through the Http pipeline during the service call. * @throws ResourceNotFoundException if the key cannot be found for encryption. * @return A {@link EncryptResult} whose {@link EncryptResult */ public EncryptResult encrypt(EncryptionAlgorithm algorithm, byte[] plaintext, byte[] iv, byte[] authenticationData, Context context) { return client.encrypt(algorithm, plaintext, context, iv, authenticationData).block(); } /** * Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports a * single block of data, the size of which is dependent on the target key and the encryption algorithm to be used. The encrypt * operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys public portion of the key is used * for encryption. This operation requires the keys/encrypt permission. * * <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting the specified encrypted content. Possible values * for assymetric keys include: {@link EncryptionAlgorithm * Possible values for symmetric keys include: {@link EncryptionAlgorithm * {@link EncryptionAlgorithm * * @param algorithm The algorithm to be used for encryption. * @param plaintext The content to be encrypted. * @throws ResourceNotFoundException if the key cannot be found for encryption. * @return The {@link EncryptResult} whose {@link EncryptResult */ public EncryptResult encrypt(EncryptionAlgorithm algorithm, byte[] plaintext) { return encrypt(algorithm, plaintext, null, null, Context.NONE); } /** * Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a single block of data may be * decrypted, the size of this block is dependent on the target key and the algorithm to be used. The decrypt operation * is supported for both asymmetric and symmetric keys. This operation requires the keys/decrypt permission. * * <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting the specified encrypted content. Possible values * for assymetric keys include: {@link EncryptionAlgorithm * Possible values for symmetric keys include: {@link EncryptionAlgorithm * {@link EncryptionAlgorithm * * @param algorithm The algorithm to be used for decryption. * @param cipherText The content to be decrypted. * @param iv The initialization vector. * @param authenticationData The authentication data. * @param authenticationTag The authentication tag. * @throws ResourceNotFoundException if the key cannot be found for decryption. * @return The decrypted blob. */ public DecryptResult decrypt(EncryptionAlgorithm algorithm, byte[] cipherText, byte[] iv, byte[] authenticationData, byte[] authenticationTag) { return decrypt(algorithm, cipherText, iv, authenticationData, authenticationTag, Context.NONE); } /** * Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a single block of data may be * decrypted, the size of this block is dependent on the target key and the algorithm to be used. The decrypt operation * is supported for both asymmetric and symmetric keys. This operation requires the keys/decrypt permission. * * <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting the specified encrypted content. Possible values * for assymetric keys include: {@link EncryptionAlgorithm * Possible values for symmetric keys include: {@link EncryptionAlgorithm * {@link EncryptionAlgorithm * * @param algorithm The algorithm to be used for decryption. * @param cipherText The content to be decrypted. * @param iv The initialization vector. * @param authenticationData The authentication data. * @param authenticationTag The authentication tag. * @param context Additional context that is passed through the Http pipeline during the service call. * @throws ResourceNotFoundException if the key cannot be found for decryption. * @return The decrypted blob. */ public DecryptResult decrypt(EncryptionAlgorithm algorithm, byte[] cipherText, byte[] iv, byte[] authenticationData, byte[] authenticationTag, Context context) { return client.decrypt(algorithm, cipherText, iv, authenticationData, authenticationTag, context).block(); } /** * Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a single block of data may be * decrypted, the size of this block is dependent on the target key and the algorithm to be used. The decrypt operation * is supported for both asymmetric and symmetric keys. This operation requires the keys/decrypt permission. * * <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting the specified encrypted content. Possible values * for assymetric keys include: {@link EncryptionAlgorithm * Possible values for symmetric keys include: {@link EncryptionAlgorithm * {@link EncryptionAlgorithm * * @param algorithm The algorithm to be used for decryption. * @param cipherText The content to be decrypted. * @throws ResourceNotFoundException if the key cannot be found for decryption. * @return The decrypted blob. */ public DecryptResult decrypt(EncryptionAlgorithm algorithm, byte[] cipherText) { return decrypt(algorithm, cipherText, null, null, null, Context.NONE); } /** * Creates a signature from a digest using the configured key. The sign operation supports both asymmetric and * symmetric keys. This operation requires the keys/sign permission. * * <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to create the signature from the digest. Possible values include: * {@link SignatureAlgorithm * {@link SignatureAlgorithm * {@link SignatureAlgorithm * {@link SignatureAlgorithm * * @param algorithm The algorithm to use for signing. * @param digest The content from which signature is to be created. * @param context Additional context that is passed through the Http pipeline during the service call. * @throws ResourceNotFoundException if the key cannot be found for signing. * @return A {@link SignResult} whose {@link SignResult */ public SignResult sign(SignatureAlgorithm algorithm, byte[] digest, Context context) { return client.sign(algorithm, digest, context).block(); } /** * Creates a signature from a digest using the configured key. The sign operation supports both asymmetric and * symmetric keys. This operation requires the keys/sign permission. * * <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to create the signature from the digest. Possible values include: * {@link SignatureAlgorithm * {@link SignatureAlgorithm * {@link SignatureAlgorithm * {@link SignatureAlgorithm * * @param algorithm The algorithm to use for signing. * @param digest The content from which signature is to be created. * @throws ResourceNotFoundException if the key cannot be found for signing. * @return A {@link SignResult} whose {@link SignResult */ public SignResult sign(SignatureAlgorithm algorithm, byte[] digest) { return client.sign(algorithm, digest, Context.NONE).block(); } /** * Verifies a signature using the configured key. The verify operation supports both symmetric keys and asymmetric keys. * In case of asymmetric keys public portion of the key is used to verify the signature . This operation requires the keys/verify permission. * * <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the signature. Possible values include: * {@link SignatureAlgorithm * {@link SignatureAlgorithm * {@link SignatureAlgorithm * {@link SignatureAlgorithm * * @param algorithm The algorithm to use for signing. * @param digest The content from which signature is to be created. * @param signature The signature to be verified. * @throws ResourceNotFoundException if the key cannot be found for verifying. * @return The {@link Boolean} indicating the signature verification result. */ public VerifyResult verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature) { return verify(algorithm, digest, signature, Context.NONE); } /** * Verifies a signature using the configured key. The verify operation supports both symmetric keys and asymmetric keys. * In case of asymmetric keys public portion of the key is used to verify the signature . This operation requires the keys/verify permission. * * <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the signature. Possible values include: * {@link SignatureAlgorithm * {@link SignatureAlgorithm * {@link SignatureAlgorithm * {@link SignatureAlgorithm * * @param algorithm The algorithm to use for signing. * @param digest The content from which signature is to be created. * @param signature The signature to be verified. * @param context Additional context that is passed through the Http pipeline during the service call. * @throws ResourceNotFoundException if the key cannot be found for verifying. * @return The {@link Boolean} indicating the signature verification result. */ public VerifyResult verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature, Context context) { return client.verify(algorithm, digest, signature, context).block(); } /** * Wraps a symmetric key using the configured key. The wrap operation supports wrapping a symmetric key with both * symmetric and asymmetric keys. This operation requires the keys/wrapKey permission. * * <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for wrapping the specified key content. Possible values include: * {@link KeyWrapAlgorithm * * @param algorithm The encryption algorithm to use for wrapping the key. * @param key The key content to be wrapped * @throws ResourceNotFoundException if the key cannot be found for wrap operation. * @return The {@link KeyWrapResult} whose {@link KeyWrapResult */ /** * Wraps a symmetric key using the configured key. The wrap operation supports wrapping a symmetric key with both * symmetric and asymmetric keys. This operation requires the keys/wrapKey permission. * * <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for wrapping the specified key content. Possible values include: * {@link KeyWrapAlgorithm * * @param algorithm The encryption algorithm to use for wrapping the key. * @param key The key content to be wrapped * @param context Additional context that is passed through the Http pipeline during the service call. * @throws ResourceNotFoundException if the key cannot be found for wrap operation. * @return The {@link KeyWrapResult} whose {@link KeyWrapResult */ public KeyWrapResult wrapKey(KeyWrapAlgorithm algorithm, byte[] key, Context context) { return client.wrapKey(algorithm, key, context).block(); } /** * Unwraps a symmetric key using the configured key that was initially used for wrapping that key. This operation is the reverse of the wrap operation. * The unwrap operation supports asymmetric and symmetric keys to unwrap. This operation requires the keys/unwrapKey permission. * * <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for wrapping the specified key content. Possible values for asymmetric keys include: * {@link KeyWrapAlgorithm * Possible values for symmetric keys include: {@link KeyWrapAlgorithm * * @param algorithm The encryption algorithm to use for wrapping the key. * @param encryptedKey The encrypted key content to unwrap. * @throws ResourceNotFoundException if the key cannot be found for wrap operation. * @return The unwrapped key content. */ public KeyUnwrapResult unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey) { return unwrapKey(algorithm, encryptedKey, Context.NONE); } /** * Unwraps a symmetric key using the configured key that was initially used for wrapping that key. This operation is the reverse of the wrap operation. * The unwrap operation supports asymmetric and symmetric keys to unwrap. This operation requires the keys/unwrapKey permission. * * <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for wrapping the specified key content. Possible values for asymmetric keys include: * {@link KeyWrapAlgorithm * Possible values for symmetric keys include: {@link KeyWrapAlgorithm * * @param algorithm The encryption algorithm to use for wrapping the key. * @param encryptedKey The encrypted key content to unwrap. * @param context Additional context that is passed through the Http pipeline during the service call. * @throws ResourceNotFoundException if the key cannot be found for wrap operation. * @return The unwrapped key content. */ public KeyUnwrapResult unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey, Context context) { return client.unwrapKey(algorithm, encryptedKey, context).block(); } /** * Creates a signature from the raw data using the configured key. The sign data operation supports both asymmetric and * symmetric keys. This operation requires the keys/sign permission. * * <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to create the signature from the digest. Possible values include: * {@link SignatureAlgorithm * {@link SignatureAlgorithm * {@link SignatureAlgorithm * {@link SignatureAlgorithm * * @param algorithm The algorithm to use for signing. * @param data The content from which signature is to be created. * @throws ResourceNotFoundException if the key cannot be found for signing. * @return A {@link SignResult} whose {@link SignResult */ public SignResult signData(SignatureAlgorithm algorithm, byte[] data) { return signData(algorithm, data, Context.NONE); } /** * Creates a signature from the raw data using the configured key. The sign data operation supports both asymmetric and * symmetric keys. This operation requires the keys/sign permission. * * <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to create the signature from the digest. Possible values include: * {@link SignatureAlgorithm * {@link SignatureAlgorithm * {@link SignatureAlgorithm * {@link SignatureAlgorithm * * @param algorithm The algorithm to use for signing. * @param data The content from which signature is to be created. * @param context Additional context that is passed through the Http pipeline during the service call. * @throws ResourceNotFoundException if the key cannot be found for signing. * @return A {@link SignResult} whose {@link SignResult */ public SignResult signData(SignatureAlgorithm algorithm, byte[] data, Context context) { return client.signData(algorithm, data, context).block(); } /** * Verifies a signature against the raw data using the configured key. The verify operation supports both symmetric keys and asymmetric keys. * In case of asymmetric keys public portion of the key is used to verify the signature . This operation requires the keys/verify permission. * * <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the signature. Possible values include: * {@link SignatureAlgorithm * {@link SignatureAlgorithm * {@link SignatureAlgorithm * {@link SignatureAlgorithm * * @param algorithm The algorithm to use for signing. * @param data The raw content against which signature is to be verified. * @param signature The signature to be verified. * @throws ResourceNotFoundException if the key cannot be found for verifying. * @return The {@link Boolean} indicating the signature verification result. */ public VerifyResult verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature) { return verifyData(algorithm, data, signature, Context.NONE); } /** * Verifies a signature against the raw data using the configured key. The verify operation supports both symmetric keys and asymmetric keys. * In case of asymmetric keys public portion of the key is used to verify the signature . This operation requires the keys/verify permission. * * <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the signature. Possible values include: * {@link SignatureAlgorithm * {@link SignatureAlgorithm * {@link SignatureAlgorithm * {@link SignatureAlgorithm * * @param algorithm The algorithm to use for signing. * @param data The raw content against which signature is to be verified. * @param signature The signature to be verified. * @param context Additional context that is passed through the Http pipeline during the service call. * @throws ResourceNotFoundException if the key cannot be found for verifying. * @return The {@link Boolean} indicating the signature verification result. */ public VerifyResult verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature, Context context) { return client.verifyData(algorithm, data, signature, context).block(); } CryptographyServiceClient getServiceClient() { return client.getCryptographyServiceClient(); } }
class CryptographyClient { private final CryptographyAsyncClient client; /** * Creates a KeyClient that uses {@code pipeline} to service requests * * @param client The {@link CryptographyAsyncClient} that the client routes its request through. */ CryptographyClient(CryptographyAsyncClient client) { this.client = client; } /** * Gets the public part of the configured key. The get key operation is applicable to all key types and it requires the {@code keys/get} permission. * * <p><strong>Code Samples</strong></p> * <p>Gets the key configured in the client. Prints out the returned key details.</p> * {@codesnippet com.azure.security.keyvault.keys.cryptography.cryptographyclient.getKey} * * @throws ResourceNotFoundException when the configured key doesn't exist in the key vault. * @return The requested {@link Key key}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Key getKey() { return getKeyWithResponse(Context.NONE).value(); } /** * Gets the public part of the configured key. The get key operation is applicable to all key types and it requires the {@code keys/get} permission. * * <p><strong>Code Samples</strong></p> * <p>Gets the key configured in the client. Prints out the returned key details.</p> * {@codesnippet com.azure.security.keyvault.keys.cryptography.cryptographyclient.getKeyWithResponse * * @param context Additional context that is passed through the Http pipeline during the service call. * @throws ResourceNotFoundException when the configured key doesn't exist in the key vault. * @return A {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Key> getKeyWithResponse(Context context) { return client.getKeyWithResponse(context).block(); } /** * Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports a * single block of data, the size of which is dependent on the target key and the encryption algorithm to be used. The encrypt * operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys public portion of the key is used * for encryption. This operation requires the keys/encrypt permission. * * <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting the specified encrypted content. Possible values * for assymetric keys include: {@link EncryptionAlgorithm * Possible values for symmetric keys include: {@link EncryptionAlgorithm * {@link EncryptionAlgorithm * * <p><strong>Code Samples</strong></p> * <p>Encrypts the content. Prints out the encrypted content details.</p> * {@codesnippet com.azure.security.keyvault.keys.cryptography.cryptographyclient.encrypt * * @param algorithm The algorithm to be used for encryption. * @param plaintext The content to be encrypted. * @param iv The initialization vector * @param authenticationData The authentication data * @throws ResourceNotFoundException if the key cannot be found for encryption. * @throws NullPointerException if {@code algorithm} or {@code plainText} is null. * @return A {@link EncryptResult} whose {@link EncryptResult */ public EncryptResult encrypt(EncryptionAlgorithm algorithm, byte[] plaintext, byte[] iv, byte[] authenticationData) { return encrypt(algorithm, plaintext, iv, authenticationData, Context.NONE); } /** * Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports a * single block of data, the size of which is dependent on the target key and the encryption algorithm to be used. The encrypt * operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys public portion of the key is used * for encryption. This operation requires the keys/encrypt permission. * * <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting the specified encrypted content. Possible values * for assymetric keys include: {@link EncryptionAlgorithm * Possible values for symmetric keys include: {@link EncryptionAlgorithm * {@link EncryptionAlgorithm * * <p><strong>Code Samples</strong></p> * <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when a response has been received.</p> * {@codesnippet com.azure.security.keyvault.keys.cryptography.cryptographyclient.encrypt * * @param algorithm The algorithm to be used for encryption. * @param plaintext The content to be encrypted. * @param iv The initialization vector * @param authenticationData The authentication data * @param context Additional context that is passed through the Http pipeline during the service call. * @throws ResourceNotFoundException if the key cannot be found for encryption. * @throws NullPointerException if {@code algorithm} or {@code plainText} is null. * @return A {@link EncryptResult} whose {@link EncryptResult */ public EncryptResult encrypt(EncryptionAlgorithm algorithm, byte[] plaintext, byte[] iv, byte[] authenticationData, Context context) { return client.encrypt(algorithm, plaintext, context, iv, authenticationData).block(); } /** * Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports a * single block of data, the size of which is dependent on the target key and the encryption algorithm to be used. The encrypt * operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys public portion of the key is used * for encryption. This operation requires the keys/encrypt permission. * * <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting the specified encrypted content. Possible values * for assymetric keys include: {@link EncryptionAlgorithm * Possible values for symmetric keys include: {@link EncryptionAlgorithm * {@link EncryptionAlgorithm * * <p><strong>Code Samples</strong></p> * <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when a response has been received.</p> * {@codesnippet com.azure.security.keyvault.keys.cryptography.cryptographyclient.encrypt * * @param algorithm The algorithm to be used for encryption. * @param plaintext The content to be encrypted. * @throws ResourceNotFoundException if the key cannot be found for encryption. * @throws NullPointerException if {@code algorithm} or {@code plainText} is null. * @return The {@link EncryptResult} whose {@link EncryptResult */ public EncryptResult encrypt(EncryptionAlgorithm algorithm, byte[] plaintext) { return encrypt(algorithm, plaintext, null, null, Context.NONE); } /** * Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a single block of data may be * decrypted, the size of this block is dependent on the target key and the algorithm to be used. The decrypt operation * is supported for both asymmetric and symmetric keys. This operation requires the keys/decrypt permission. * * <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting the specified encrypted content. Possible values * for assymetric keys include: {@link EncryptionAlgorithm * Possible values for symmetric keys include: {@link EncryptionAlgorithm * {@link EncryptionAlgorithm * * <p><strong>Code Samples</strong></p> * <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content details when a response has been received.</p> * {@codesnippet com.azure.security.keyvault.keys.cryptography.cryptographyclient.decrypt * * @param algorithm The algorithm to be used for decryption. * @param cipherText The content to be decrypted. * @param iv The initialization vector. * @param authenticationData The authentication data. * @param authenticationTag The authentication tag. * @throws ResourceNotFoundException if the key cannot be found for decryption. * @throws NullPointerException if {@code algorithm} or {@code cipherText} is null. * @return The decrypted blob. */ public DecryptResult decrypt(EncryptionAlgorithm algorithm, byte[] cipherText, byte[] iv, byte[] authenticationData, byte[] authenticationTag) { return decrypt(algorithm, cipherText, iv, authenticationData, authenticationTag, Context.NONE); } /** * Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a single block of data may be * decrypted, the size of this block is dependent on the target key and the algorithm to be used. The decrypt operation * is supported for both asymmetric and symmetric keys. This operation requires the keys/decrypt permission. * * <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting the specified encrypted content. Possible values * for assymetric keys include: {@link EncryptionAlgorithm * Possible values for symmetric keys include: {@link EncryptionAlgorithm * {@link EncryptionAlgorithm * * <p><strong>Code Samples</strong></p> * <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content details when a response has been received.</p> * {@codesnippet com.azure.security.keyvault.keys.cryptography.cryptographyclient.decrypt * * @param algorithm The algorithm to be used for decryption. * @param cipherText The content to be decrypted. * @param iv The initialization vector. * @param authenticationData The authentication data. * @param authenticationTag The authentication tag. * @param context Additional context that is passed through the Http pipeline during the service call. * @throws ResourceNotFoundException if the key cannot be found for decryption. * @throws NullPointerException if {@code algorithm} or {@code cipherText} is null. * @return The decrypted blob. */ public DecryptResult decrypt(EncryptionAlgorithm algorithm, byte[] cipherText, byte[] iv, byte[] authenticationData, byte[] authenticationTag, Context context) { return client.decrypt(algorithm, cipherText, iv, authenticationData, authenticationTag, context).block(); } /** * Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a single block of data may be * decrypted, the size of this block is dependent on the target key and the algorithm to be used. The decrypt operation * is supported for both asymmetric and symmetric keys. This operation requires the keys/decrypt permission. * * <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting the specified encrypted content. Possible values * for assymetric keys include: {@link EncryptionAlgorithm * Possible values for symmetric keys include: {@link EncryptionAlgorithm * {@link EncryptionAlgorithm * * <p><strong>Code Samples</strong></p> * <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content details when a response has been received.</p> * {@codesnippet com.azure.security.keyvault.keys.cryptography.cryptographyclient.decrypt * * @param algorithm The algorithm to be used for decryption. * @param cipherText The content to be decrypted. * @throws ResourceNotFoundException if the key cannot be found for decryption. * @throws NullPointerException if {@code algorithm} or {@code cipherText} is null. * @return The decrypted blob. */ public DecryptResult decrypt(EncryptionAlgorithm algorithm, byte[] cipherText) { return decrypt(algorithm, cipherText, null, null, null, Context.NONE); } /** * Creates a signature from a digest using the configured key. The sign operation supports both asymmetric and * symmetric keys. This operation requires the keys/sign permission. * * <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to create the signature from the digest. Possible values include: * {@link SignatureAlgorithm * {@link SignatureAlgorithm * {@link SignatureAlgorithm * {@link SignatureAlgorithm * * <p><strong>Code Samples</strong></p> * <p>Sings the digest. Subscribes to the call asynchronously and prints out the signature details when a response has been received.</p> * {@codesnippet com.azure.security.keyvault.keys.cryptography.cryptographyclient.sign-Context} * * @param algorithm The algorithm to use for signing. * @param digest The content from which signature is to be created. * @param context Additional context that is passed through the Http pipeline during the service call. * @throws ResourceNotFoundException if the key cannot be found for signing. * @throws NullPointerException if {@code algorithm} or {@code digest} is null. * @return A {@link SignResult} whose {@link SignResult */ public SignResult sign(SignatureAlgorithm algorithm, byte[] digest, Context context) { return client.sign(algorithm, digest, context).block(); } /** * Creates a signature from a digest using the configured key. The sign operation supports both asymmetric and * symmetric keys. This operation requires the keys/sign permission. * * <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to create the signature from the digest. Possible values include: * {@link SignatureAlgorithm * {@link SignatureAlgorithm * {@link SignatureAlgorithm * {@link SignatureAlgorithm * * <p><strong>Code Samples</strong></p> * <p>Sings the digest. Subscribes to the call asynchronously and prints out the signature details when a response has been received.</p> * {@codesnippet com.azure.security.keyvault.keys.cryptography.cryptographyclient.sign} * * @param algorithm The algorithm to use for signing. * @param digest The content from which signature is to be created. * @throws ResourceNotFoundException if the key cannot be found for signing. * @throws NullPointerException if {@code algorithm} or {@code digest} is null. * @return A {@link SignResult} whose {@link SignResult */ public SignResult sign(SignatureAlgorithm algorithm, byte[] digest) { return client.sign(algorithm, digest, Context.NONE).block(); } /** * Verifies a signature using the configured key. The verify operation supports both symmetric keys and asymmetric keys. * In case of asymmetric keys public portion of the key is used to verify the signature . This operation requires the keys/verify permission. * * <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the signature. Possible values include: * {@link SignatureAlgorithm * {@link SignatureAlgorithm * {@link SignatureAlgorithm * {@link SignatureAlgorithm * * <p><strong>Code Samples</strong></p> * <p>Verifies the signature against the specified digest. Subscribes to the call asynchronously and prints out the verification details when a response has been received.</p> * {@codesnippet com.azure.security.keyvault.keys.cryptography.cryptographyclient.verify} * * @param algorithm The algorithm to use for signing. * @param digest The content from which signature was created. * @param signature The signature to be verified. * @throws ResourceNotFoundException if the key cannot be found for verifying. * @throws NullPointerException if {@code algorithm}, {@code digest} or {@code signature} is null. * @return The {@link Boolean} indicating the signature verification result. */ public VerifyResult verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature) { return verify(algorithm, digest, signature, Context.NONE); } /** * Verifies a signature using the configured key. The verify operation supports both symmetric keys and asymmetric keys. * In case of asymmetric keys public portion of the key is used to verify the signature . This operation requires the keys/verify permission. * * <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the signature. Possible values include: * {@link SignatureAlgorithm * {@link SignatureAlgorithm * {@link SignatureAlgorithm * {@link SignatureAlgorithm * * <p><strong>Code Samples</strong></p> * <p>Verifies the signature against the specified digest. Subscribes to the call asynchronously and prints out the verification details when a response has been received.</p> * {@codesnippet com.azure.security.keyvault.keys.cryptography.cryptographyclient.verify-Context} * * @param algorithm The algorithm to use for signing. * @param digest The content from which signature is to be created. * @param signature The signature to be verified. * @param context Additional context that is passed through the Http pipeline during the service call. * @throws ResourceNotFoundException if the key cannot be found for verifying. * @throws NullPointerException if {@code algorithm}, {@code digest} or {@code signature} is null. * @return The {@link Boolean} indicating the signature verification result. */ public VerifyResult verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature, Context context) { return client.verify(algorithm, digest, signature, context).block(); } /** * Wraps a symmetric key using the configured key. The wrap operation supports wrapping a symmetric key with both * symmetric and asymmetric keys. This operation requires the keys/wrapKey permission. * * <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for wrapping the specified key content. Possible values include: * {@link KeyWrapAlgorithm * * <p><strong>Code Samples</strong></p> * <p>Wraps the key content. Subscribes to the call asynchronously and prints out the wrapped key details when a response has been received.</p> * {@codesnippet com.azure.security.keyvault.keys.cryptography.cryptographyclient.wrap-key} * * @param algorithm The encryption algorithm to use for wrapping the key. * @param key The key content to be wrapped * @throws ResourceNotFoundException if the key cannot be found for wrap operation. * @throws NullPointerException if {@code algorithm} or {@code key} is null. * @return The {@link KeyWrapResult} whose {@link KeyWrapResult */ /** * Wraps a symmetric key using the configured key. The wrap operation supports wrapping a symmetric key with both * symmetric and asymmetric keys. This operation requires the keys/wrapKey permission. * * <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for wrapping the specified key content. Possible values include: * {@link KeyWrapAlgorithm * * <p><strong>Code Samples</strong></p> * <p>Wraps the key content. Subscribes to the call asynchronously and prints out the wrapped key details when a response has been received.</p> * {@codesnippet com.azure.security.keyvault.keys.cryptography.cryptographyclient.wrap-key-Context} * * @param algorithm The encryption algorithm to use for wrapping the key. * @param key The key content to be wrapped * @param context Additional context that is passed through the Http pipeline during the service call. * @throws ResourceNotFoundException if the key cannot be found for wrap operation. * @throws NullPointerException if {@code algorithm} or {@code key} is null. * @return The {@link KeyWrapResult} whose {@link KeyWrapResult */ public KeyWrapResult wrapKey(KeyWrapAlgorithm algorithm, byte[] key, Context context) { return client.wrapKey(algorithm, key, context).block(); } /** * Unwraps a symmetric key using the configured key that was initially used for wrapping that key. This operation is the reverse of the wrap operation. * The unwrap operation supports asymmetric and symmetric keys to unwrap. This operation requires the keys/unwrapKey permission. * * <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for wrapping the specified key content. Possible values for asymmetric keys include: * {@link KeyWrapAlgorithm * Possible values for symmetric keys include: {@link KeyWrapAlgorithm * * <p><strong>Code Samples</strong></p> * <p>Unwraps the key content. Subscribes to the call asynchronously and prints out the unwrapped key details when a response has been received.</p> * {@codesnippet com.azure.security.keyvault.keys.cryptography.cryptographyclient.unwrap-key} * * @param algorithm The encryption algorithm to use for wrapping the key. * @param encryptedKey The encrypted key content to unwrap. * @throws ResourceNotFoundException if the key cannot be found for wrap operation. * @throws NullPointerException if {@code algorithm} or {@code encryptedKey} is null. * @return The unwrapped key content. */ public KeyUnwrapResult unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey) { return unwrapKey(algorithm, encryptedKey, Context.NONE); } /** * Unwraps a symmetric key using the configured key that was initially used for wrapping that key. This operation is the reverse of the wrap operation. * The unwrap operation supports asymmetric and symmetric keys to unwrap. This operation requires the keys/unwrapKey permission. * * <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for wrapping the specified key content. Possible values for asymmetric keys include: * {@link KeyWrapAlgorithm * Possible values for symmetric keys include: {@link KeyWrapAlgorithm * * <p><strong>Code Samples</strong></p> * <p>Unwraps the key content. Subscribes to the call asynchronously and prints out the unwrapped key details when a response has been received.</p> * {@codesnippet com.azure.security.keyvault.keys.cryptography.cryptographyclient.unwrap-key-Context} * * @param algorithm The encryption algorithm to use for wrapping the key. * @param encryptedKey The encrypted key content to unwrap. * @param context Additional context that is passed through the Http pipeline during the service call. * @throws ResourceNotFoundException if the key cannot be found for wrap operation. * @throws NullPointerException if {@code algorithm} or {@code encryptedKey} is null. * @return The unwrapped key content. */ public KeyUnwrapResult unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey, Context context) { return client.unwrapKey(algorithm, encryptedKey, context).block(); } /** * Creates a signature from the raw data using the configured key. The sign data operation supports both asymmetric and * symmetric keys. This operation requires the keys/sign permission. * * <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to create the signature from the digest. Possible values include: * {@link SignatureAlgorithm * {@link SignatureAlgorithm * {@link SignatureAlgorithm * {@link SignatureAlgorithm * * <p><strong>Code Samples</strong></p> * <p>Signs the raw data. Subscribes to the call asynchronously and prints out the signature details when a response has been received.</p> * {@codesnippet com.azure.security.keyvault.keys.cryptography.cryptographyclient.sign-data} * * @param algorithm The algorithm to use for signing. * @param data The content from which signature is to be created. * @throws ResourceNotFoundException if the key cannot be found for signing. * @throws NullPointerException if {@code algorithm} or {@code data} is null. * @return A {@link SignResult} whose {@link SignResult */ public SignResult signData(SignatureAlgorithm algorithm, byte[] data) { return signData(algorithm, data, Context.NONE); } /** * Creates a signature from the raw data using the configured key. The sign data operation supports both asymmetric and * symmetric keys. This operation requires the keys/sign permission. * * <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to create the signature from the digest. Possible values include: * {@link SignatureAlgorithm * {@link SignatureAlgorithm * {@link SignatureAlgorithm * {@link SignatureAlgorithm * * <p><strong>Code Samples</strong></p> * <p>Signs the raw data. Subscribes to the call asynchronously and prints out the signature details when a response has been received.</p> * {@codesnippet com.azure.security.keyvault.keys.cryptography.cryptographyclient.sign-data-Context} * * @param algorithm The algorithm to use for signing. * @param data The content from which signature is to be created. * @param context Additional context that is passed through the Http pipeline during the service call. * @throws ResourceNotFoundException if the key cannot be found for signing. * @throws NullPointerException if {@code algorithm} or {@code data} is null. * @return A {@link SignResult} whose {@link SignResult */ public SignResult signData(SignatureAlgorithm algorithm, byte[] data, Context context) { return client.signData(algorithm, data, context).block(); } /** * Verifies a signature against the raw data using the configured key. The verify operation supports both symmetric keys and asymmetric keys. * In case of asymmetric keys public portion of the key is used to verify the signature . This operation requires the keys/verify permission. * * <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the signature. Possible values include: * {@link SignatureAlgorithm * {@link SignatureAlgorithm * {@link SignatureAlgorithm * {@link SignatureAlgorithm * * <p><strong>Code Samples</strong></p> * <p>Verifies the signature against the raw data. Subscribes to the call asynchronously and prints out the verification details when a response has been received.</p> * {@codesnippet com.azure.security.keyvault.keys.cryptography.cryptographyclient.verify-data} * * @param algorithm The algorithm to use for signing. * @param data The raw content against which signature is to be verified. * @param signature The signature to be verified. * @throws ResourceNotFoundException if the key cannot be found for verifying. * @throws NullPointerException if {@code algorithm}, {@code data} or {@code signature} is null. * @return The {@link Boolean} indicating the signature verification result. */ public VerifyResult verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature) { return verifyData(algorithm, data, signature, Context.NONE); } /** * Verifies a signature against the raw data using the configured key. The verify operation supports both symmetric keys and asymmetric keys. * In case of asymmetric keys public portion of the key is used to verify the signature . This operation requires the keys/verify permission. * * <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the signature. Possible values include: * {@link SignatureAlgorithm * {@link SignatureAlgorithm * {@link SignatureAlgorithm * {@link SignatureAlgorithm * * <p><strong>Code Samples</strong></p> * <p>Verifies the signature against the raw data. Subscribes to the call asynchronously and prints out the verification details when a response has been received.</p> * {@codesnippet com.azure.security.keyvault.keys.cryptography.cryptographyclient.verify-data-Context} * * @param algorithm The algorithm to use for signing. * @param data The raw content against which signature is to be verified. * @param signature The signature to be verified. * @param context Additional context that is passed through the Http pipeline during the service call. * @throws ResourceNotFoundException if the key cannot be found for verifying. * @throws NullPointerException if {@code algorithm}, {@code data} or {@code signature} is null. * @return The {@link Boolean} indicating the signature verification result. */ public VerifyResult verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature, Context context) { return client.verifyData(algorithm, data, signature, context).block(); } CryptographyServiceClient getServiceClient() { return client.getCryptographyServiceClient(); } }
That works too.
private void unpackAndValidateId(String keyId) { if (keyId != null && keyId.length() > 0) { try { URL url = new URL(keyId); String[] tokens = url.getPath().split("/"); String endpoint = url.getProtocol() + ": String keyName = (tokens.length >= 3 ? tokens[2] : null); version = (tokens.length >= 4 ? tokens[3] : null); if(Strings.isNullOrEmpty(endpoint)) { throw new IllegalArgumentException("Key endpoint in key id is invalid"); } else if (Strings.isNullOrEmpty(keyName)) { throw new IllegalArgumentException("Key name in key id is invalid"); } else if(Strings.isNullOrEmpty(version)) { throw new IllegalArgumentException("Key version in key id is invalid"); } } catch (MalformedURLException e) { e.printStackTrace(); } } else { throw new IllegalArgumentException("Key Id is invalid"); } }
if (keyId != null && keyId.length() > 0) {
private void unpackAndValidateId(String keyId) { if (ImplUtils.isNullOrEmpty(keyId)) { throw new IllegalArgumentException("Key Id is invalid"); } try { URL url = new URL(keyId); String[] tokens = url.getPath().split("/"); String endpoint = url.getProtocol() + ": String keyName = (tokens.length >= 3 ? tokens[2] : null); String version = (tokens.length >= 4 ? tokens[3] : null); if (Strings.isNullOrEmpty(endpoint)) { throw new IllegalArgumentException("Key endpoint in key id is invalid"); } else if (Strings.isNullOrEmpty(keyName)) { throw new IllegalArgumentException("Key name in key id is invalid"); } else if (Strings.isNullOrEmpty(version)) { throw new IllegalArgumentException("Key version in key id is invalid"); } } catch (MalformedURLException e) { throw new IllegalArgumentException("The key identifier is malformed", e); } }
class CryptographyAsyncClient { private JsonWebKey key; private CryptographyService service; private String version; private EcKeyCryptographyClient ecKeyCryptographyClient; private RsaKeyCryptographyClient rsaKeyCryptographyClient; private CryptographyServiceClient cryptographyServiceClient; private SymmetricKeyCryptographyClient symmetricKeyCryptographyClient; private final ClientLogger logger = new ClientLogger(CryptographyAsyncClient.class); /** * Creates a CryptographyAsyncClient that uses {@code pipeline} to service requests * * @param key the JsonWebKey to use for cryptography operations. * @param pipeline HttpPipeline that the HTTP requests and responses flow through. */ CryptographyAsyncClient(JsonWebKey key, HttpPipeline pipeline) { Objects.requireNonNull(key); this.key = key; service = RestProxy.create(CryptographyService.class, pipeline); if(!Strings.isNullOrEmpty(key.kid())) { unpackAndValidateId(key.kid()); cryptographyServiceClient = new CryptographyServiceClient(key.kid(), service); } initializeCryptoClients(); } /** * Creates a CryptographyAsyncClient that uses {@code pipeline} to service requests * * @param kid THe Azure Key vault key identifier to use for cryptography operations. * @param pipeline HttpPipeline that the HTTP requests and responses flow through. */ CryptographyAsyncClient(String kid, HttpPipeline pipeline) { unpackAndValidateId(kid); service = RestProxy.create(CryptographyService.class, pipeline); cryptographyServiceClient = new CryptographyServiceClient(kid, service); ecKeyCryptographyClient = new EcKeyCryptographyClient(cryptographyServiceClient); rsaKeyCryptographyClient = new RsaKeyCryptographyClient(cryptographyServiceClient); symmetricKeyCryptographyClient = new SymmetricKeyCryptographyClient(cryptographyServiceClient); } private void initializeCryptoClients() { switch(key.kty()){ case RSA: case RSA_HSM: rsaKeyCryptographyClient = new RsaKeyCryptographyClient(key, cryptographyServiceClient); break; case EC: case EC_HSM: ecKeyCryptographyClient = new EcKeyCryptographyClient(key, cryptographyServiceClient); break; case OCT: symmetricKeyCryptographyClient = new SymmetricKeyCryptographyClient(key, cryptographyServiceClient); break; default: throw new IllegalArgumentException(String.format("The Json Web Key Type: %s is not supported.", key.kty().toString())); } } /** * Gets the public part of the configured key. The get key operation is applicable to all key types and it requires the {@code keys/get} permission. * * @throws ResourceNotFoundException when the configured key doesn't exist in the key vault. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Key>> getKey() { return withContext(context -> getKey(context)); } Mono<Response<Key>> getKey(Context context) { return cryptographyServiceClient.getKey(context); } /** * Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports a * single block of data, the size of which is dependent on the target key and the encryption algorithm to be used. The encrypt * operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys public portion of the key is used * for encryption. This operation requires the keys/encrypt permission. * * <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting the specified encrypted content. Possible values * for assymetric keys include: {@link EncryptionAlgorithm * Possible values for symmetric keys include: {@link EncryptionAlgorithm * {@link EncryptionAlgorithm * * @param algorithm The algorithm to be used for encryption. * @param plaintext The content to be encrypted. * @throws ResourceNotFoundException if the key cannot be found for encryption. * @return A {@link Mono} containing a {@link EncryptResult} whose {@link EncryptResult */ public Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext) { return withContext(context -> encrypt(algorithm, plaintext, context, null, null)); } /** * Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports a * single block of data, the size of which is dependent on the target key and the encryption algorithm to be used. The encrypt * operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys public portion of the key is used * for encryption. This operation requires the keys/encrypt permission. * * <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting the specified encrypted content. Possible values * for assymetric keys include: {@link EncryptionAlgorithm * Possible values for symmetric keys include: {@link EncryptionAlgorithm * {@link EncryptionAlgorithm * * @param algorithm The algorithm to be used for encryption. * @param plaintext The content to be encrypted. * @param iv The initialization vector * @param authenticationData The authentication data * @throws ResourceNotFoundException if the key cannot be found for encryption. * @return A {@link Mono} containing a {@link EncryptResult} whose {@link EncryptResult */ public Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext, byte[] iv, byte[] authenticationData) { return withContext(context -> encrypt(algorithm, plaintext, context, iv, authenticationData)); } Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext, Context context, byte[] iv, byte[] authenticationData) { Objects.requireNonNull(algorithm); boolean keyAvailableLocally = ensureValidKeyAvailable(); if(!keyAvailableLocally) { return cryptographyServiceClient.encrypt(algorithm, plaintext, context); } if (!checkKeyPermissions(this.key.keyOps(), KeyOperation.ENCRYPT)){ return Mono.error(new UnsupportedOperationException(String.format("Encrypt Operation is not supported for key with id %s", key.kid()))); } switch(key.kty()){ case RSA: case RSA_HSM: return rsaKeyCryptographyClient.encryptAsync(algorithm, plaintext, iv, authenticationData, context, key); case EC: case EC_HSM: return ecKeyCryptographyClient.encryptAsync(algorithm, plaintext, iv, authenticationData, context, key); case OCT: return symmetricKeyCryptographyClient.encryptAsync(algorithm, plaintext, iv, authenticationData, context, key); default: throw new UnsupportedOperationException(String.format("Encrypt Async is not allowed for Key Type: %s", key.kty().toString())); } } /** * Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a single block of data may be * decrypted, the size of this block is dependent on the target key and the algorithm to be used. The decrypt operation * is supported for both asymmetric and symmetric keys. This operation requires the keys/decrypt permission. * * <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting the specified encrypted content. Possible values * for assymetric keys include: {@link EncryptionAlgorithm * Possible values for symmetric keys include: {@link EncryptionAlgorithm * {@link EncryptionAlgorithm * * @param algorithm The algorithm to be used for decryption. * @param cipherText The content to be decrypted. * @throws ResourceNotFoundException if the key cannot be found for decryption. * @return A {@link Mono} containing the decrypted blob. */ public Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] cipherText) { return withContext(context -> decrypt(algorithm, cipherText, null, null, null, context)); } /** * Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a single block of data may be * decrypted, the size of this block is dependent on the target key and the algorithm to be used. The decrypt operation * is supported for both asymmetric and symmetric keys. This operation requires the keys/decrypt permission. * * <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting the specified encrypted content. Possible values * for assymetric keys include: {@link EncryptionAlgorithm * Possible values for symmetric keys include: {@link EncryptionAlgorithm * {@link EncryptionAlgorithm * * @param algorithm The algorithm to be used for decryption. * @param cipherText The content to be decrypted. * @throws ResourceNotFoundException if the key cannot be found for decryption. * @return A {@link Mono} containing the decrypted blob. */ public Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] cipherText, byte[] iv, byte[] authenticationData, byte[] authenticationTag) { return withContext(context -> decrypt(algorithm, cipherText, iv, authenticationData, authenticationTag, context)); } Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] cipherText, byte[] iv, byte[] authenticationData, byte[] authenticationTag, Context context) { Objects.requireNonNull(algorithm); boolean keyAvailableLocally = ensureValidKeyAvailable(); if(!keyAvailableLocally) { return cryptographyServiceClient.decrypt(algorithm, cipherText, context); } if (!checkKeyPermissions(this.key.keyOps(), KeyOperation.DECRYPT)){ return Mono.error(new UnsupportedOperationException(String.format("Decrypt Operation is not allowed for key with id %s", key.kid()))); } switch(key.kty()){ case RSA: case RSA_HSM: return rsaKeyCryptographyClient.decryptAsync(algorithm, cipherText, iv, authenticationData, authenticationTag, context, key); case EC: case EC_HSM: return ecKeyCryptographyClient.decryptAsync(algorithm, cipherText, iv, authenticationData, authenticationTag, context, key); case OCT: return symmetricKeyCryptographyClient.decryptAsync(algorithm, cipherText, iv, authenticationData, authenticationTag, context, key); default: return Mono.error(new UnsupportedOperationException(String.format("Decrypt operation is not supported for Key Type: %s", key.kty().toString()))); } } /** * Creates a signature from a digest using the configured key. The sign operation supports both asymmetric and * symmetric keys. This operation requires the keys/sign permission. * * <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to create the signature from the digest. Possible values include: * {@link SignatureAlgorithm * {@link SignatureAlgorithm * {@link SignatureAlgorithm * {@link SignatureAlgorithm * * @param algorithm The algorithm to use for signing. * @param digest The content from which signature is to be created. * @throws ResourceNotFoundException if the key cannot be found for signing. * @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult */ public Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest) { return withContext(context -> sign(algorithm, digest, context)); } Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest, Context context) { Objects.requireNonNull(algorithm); boolean keyAvailableLocally = ensureValidKeyAvailable(); if(!keyAvailableLocally) { return cryptographyServiceClient.sign(algorithm, digest, context); } if (!checkKeyPermissions(this.key.keyOps(), KeyOperation.SIGN)){ return Mono.error(new UnsupportedOperationException(String.format("Sign Operation is not allowed for key with id %s", key.kid()))); } switch(this.key.kty()){ case RSA: case RSA_HSM: return rsaKeyCryptographyClient.signAsync(algorithm, digest, context, key); case EC: case EC_HSM: return ecKeyCryptographyClient.signAsync(algorithm, digest, context, key); case OCT: return symmetricKeyCryptographyClient.signAsync(algorithm, digest, context, key); default: return Mono.error(new UnsupportedOperationException(String.format("Sign operaiton is not supported for Key Type: %s", key.kty().toString()))); } } /** * Verifies a signature using the configured key. The verify operation supports both symmetric keys and asymmetric keys. * In case of asymmetric keys public portion of the key is used to verify the signature . This operation requires the keys/verify permission. * * <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the signature. Possible values include: * {@link SignatureAlgorithm * {@link SignatureAlgorithm * {@link SignatureAlgorithm * {@link SignatureAlgorithm * * @param algorithm The algorithm to use for signing. * @param digest The content from which signature is to be created. * @param signature The signature to be verified. * @throws ResourceNotFoundException if the key cannot be found for verifying. * @return A {@link Mono} containing a {@link Boolean} indicating the signature verification result. */ public Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature) { return withContext(context -> verify(algorithm, digest, signature, context)); } Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature, Context context) { boolean keyAvailableLocally = ensureValidKeyAvailable(); if(!keyAvailableLocally) { return cryptographyServiceClient.verify(algorithm, digest, signature, context); } if (!checkKeyPermissions(this.key.keyOps(), KeyOperation.VERIFY)){ return Mono.error(new UnsupportedOperationException(String.format("Verify Operation is not allowed for key with id %s", key.kid()))); } switch(this.key.kty()){ case RSA: case RSA_HSM: return rsaKeyCryptographyClient.verifyAsync(algorithm, digest, signature, context, key); case EC: case EC_HSM: return ecKeyCryptographyClient.verifyAsync(algorithm, digest, signature, context, key); case OCT: return symmetricKeyCryptographyClient.verifyAsync(algorithm, digest, signature, context, key); default: return Mono.error(new UnsupportedOperationException(String.format("Verify operation is not supported for Key Type: %s", key.kty().toString()))); } } /** * Wraps a symmetric key using the configured key. The wrap operation supports wrapping a symmetric key with both * symmetric and asymmetric keys. This operation requires the keys/wrapKey permission. * * <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for wrapping the specified key content. Possible values include: * {@link KeyWrapAlgorithm * * @param algorithm The encryption algorithm to use for wrapping the key. * @param key The key content to be wrapped * @throws ResourceNotFoundException if the key cannot be found for wrap operation. * @return A {@link Mono} containing a {@link KeyWrapResult} whose {@link KeyWrapResult */ public Mono<KeyWrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key) { return withContext(context -> wrapKey(algorithm, key, context)); } Mono<KeyWrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key, Context context) { boolean keyAvailableLocally = ensureValidKeyAvailable(); if(!keyAvailableLocally) { return cryptographyServiceClient.wrapKey(algorithm, key, context); } if (!checkKeyPermissions(this.key.keyOps(), KeyOperation.WRAP_KEY)){ return Mono.error(new UnsupportedOperationException(String.format("Wrap Key Operation is not allowed for key with id %s", this.key.kid()))); } switch(this.key.kty()){ case RSA: case RSA_HSM: return rsaKeyCryptographyClient.wrapKeyAsync(algorithm, key, context, this.key); case EC: case EC_HSM: return ecKeyCryptographyClient.wrapKeyAsync(algorithm, key, context, this.key); case OCT: return symmetricKeyCryptographyClient.wrapKeyAsync(algorithm, key, context, this.key); default: return Mono.error(new UnsupportedOperationException(String.format("Encrypt Async is not supported for Key Type: %s", this.key.kty().toString()))); } } /** * Unwraps a symmetric key using the configured key that was initially used for wrapping that key. This operation is the reverse of the wrap operation. * The unwrap operation supports asymmetric and symmetric keys to unwrap. This operation requires the keys/unwrapKey permission. * * <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for wrapping the specified key content. Possible values for asymmetric keys include: * {@link KeyWrapAlgorithm * Possible values for symmetric keys include: {@link KeyWrapAlgorithm * * @param algorithm The encryption algorithm to use for wrapping the key. * @param encryptedKey The encrypted key content to unwrap. * @throws ResourceNotFoundException if the key cannot be found for wrap operation. * @return A {@link Mono} containing a the unwrapped key content. */ public Mono<KeyUnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey) { return withContext(context -> unwrapKey(algorithm, encryptedKey, context)); } Mono<KeyUnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey, Context context) { Objects.requireNonNull(algorithm); boolean keyAvailableLocally = ensureValidKeyAvailable(); if(!keyAvailableLocally) { return cryptographyServiceClient.unwrapKey(algorithm, encryptedKey, context); } if (!checkKeyPermissions(this.key.keyOps(), KeyOperation.WRAP_KEY)){ return Mono.error(new UnsupportedOperationException(String.format("Unwrap Key Operation is not allowed for key with id %s", this.key.kid()))); } switch(this.key.kty()){ case RSA: case RSA_HSM: return rsaKeyCryptographyClient.unwrapKeyAsync(algorithm, encryptedKey, context, key); case EC: case EC_HSM: return ecKeyCryptographyClient.unwrapKeyAsync(algorithm, encryptedKey, context, key); case OCT: return symmetricKeyCryptographyClient.unwrapKeyAsync(algorithm, encryptedKey, context, key); default: return Mono.error(new UnsupportedOperationException(String.format("Encrypt Async is not supported for Key Type: %s", key.kty().toString()))); } } /** * Creates a signature from the raw data using the configured key. The sign data operation supports both asymmetric and * symmetric keys. This operation requires the keys/sign permission. * * <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to create the signature from the digest. Possible values include: * {@link SignatureAlgorithm * {@link SignatureAlgorithm * {@link SignatureAlgorithm * {@link SignatureAlgorithm * * @param algorithm The algorithm to use for signing. * @param data The content from which signature is to be created. * @throws ResourceNotFoundException if the key cannot be found for signing. * @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult */ public Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data) { return withContext(context -> signData(algorithm, data, context)); } Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data, Context context) { Objects.requireNonNull(algorithm); boolean keyAvailableLocally = ensureValidKeyAvailable(); if(!keyAvailableLocally) { return cryptographyServiceClient.signData(algorithm, data, context); } if (!checkKeyPermissions(this.key.keyOps(), KeyOperation.SIGN)){ return Mono.error(new UnsupportedOperationException(String.format("Sign Operation is not allowed for key with id %s", this.key.kid()))); } switch(this.key.kty()){ case RSA: case RSA_HSM: return rsaKeyCryptographyClient.signDataAsync(algorithm, data, context, key); case EC: case EC_HSM: return ecKeyCryptographyClient.signDataAsync(algorithm, data, context, key); default: return Mono.error(new UnsupportedOperationException(String.format("Encrypt Async is not supported for Key Type: %s", key.kty().toString()))); } } /** * Verifies a signature against the raw data using the configured key. The verify operation supports both symmetric keys and asymmetric keys. * In case of asymmetric keys public portion of the key is used to verify the signature . This operation requires the keys/verify permission. * * <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the signature. Possible values include: * {@link SignatureAlgorithm * {@link SignatureAlgorithm * {@link SignatureAlgorithm * {@link SignatureAlgorithm * * @param algorithm The algorithm to use for signing. * @param data The raw content against which signature is to be verified. * @param signature The signature to be verified. * @throws ResourceNotFoundException if the key cannot be found for verifying. * @return The {@link Boolean} indicating the signature verification result. */ public Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature) { return withContext(context -> verifyData(algorithm, data, signature, context)); } Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature, Context context) { Objects.requireNonNull(algorithm); boolean keyAvailableLocally = ensureValidKeyAvailable(); if(!keyAvailableLocally) { return cryptographyServiceClient.verifyData(algorithm, data, signature, context); } if (!checkKeyPermissions(this.key.keyOps(), KeyOperation.VERIFY)){ return Mono.error(new UnsupportedOperationException(String.format("Verify Operation is not allowed for key with id %s", this.key.kid()))); } switch(this.key.kty()){ case RSA: case RSA_HSM: return rsaKeyCryptographyClient.verifyDataAsync(algorithm, data, signature, context, key); case EC: case EC_HSM: return ecKeyCryptographyClient.verifyDataAsync(algorithm, data, signature, context, key); default: return Mono.error(new UnsupportedOperationException(String.format("Encrypt Async is not supported for Key Type: %s", key.kty().toString()))); } } private boolean checkKeyPermissions(List<KeyOperation> operations, KeyOperation keyOperation) { if (operations.contains(keyOperation)) { return true; } return false; } private boolean ensureValidKeyAvailable() { boolean keyAvailableLocally = true; if(key == null) { try { this.key = getKey().block().value().keyMaterial(); keyAvailableLocally = this.key.isValid(); } catch (HttpResponseException e) { logger.info("Failed to retrieve key from key vault"); keyAvailableLocally = false; } } return keyAvailableLocally; } }
class CryptographyAsyncClient { static final String KEY_VAULT_SCOPE = "https: private JsonWebKey key; private final CryptographyService service; private final CryptographyServiceClient cryptographyServiceClient; private LocalKeyCryptographyClient localKeyCryptographyClient; private final ClientLogger logger = new ClientLogger(CryptographyAsyncClient.class); /** * Creates a CryptographyAsyncClient that uses {@code pipeline} to service requests * * @param key the JsonWebKey to use for cryptography operations. * @param pipeline HttpPipeline that the HTTP requests and responses flow through. */ CryptographyAsyncClient(JsonWebKey key, HttpPipeline pipeline) { Objects.requireNonNull(key); if (!key.isValid()) { throw new IllegalArgumentException("Json Web Key is not valid"); } if (key.keyOps() == null) { throw new IllegalArgumentException("Json Web Key's key operations property is not configured"); } if (key.kty() == null) { throw new IllegalArgumentException("Json Web Key's key type property is not configured"); } this.key = key; service = RestProxy.create(CryptographyService.class, pipeline); if (!Strings.isNullOrEmpty(key.kid())) { unpackAndValidateId(key.kid()); cryptographyServiceClient = new CryptographyServiceClient(key.kid(), service); } else { cryptographyServiceClient = null; } initializeCryptoClients(); } /** * Creates a CryptographyAsyncClient that uses {@code pipeline} to service requests * * @param kid THe Azure Key vault key identifier to use for cryptography operations. * @param pipeline HttpPipeline that the HTTP requests and responses flow through. */ CryptographyAsyncClient(String kid, HttpPipeline pipeline) { unpackAndValidateId(kid); service = RestProxy.create(CryptographyService.class, pipeline); cryptographyServiceClient = new CryptographyServiceClient(kid, service); this.key = null; } private void initializeCryptoClients() { if (localKeyCryptographyClient != null) { return; } switch (key.kty()) { case RSA: case RSA_HSM: localKeyCryptographyClient = new RsaKeyCryptographyClient(key, cryptographyServiceClient); break; case EC: case EC_HSM: localKeyCryptographyClient = new EcKeyCryptographyClient(key, cryptographyServiceClient); break; case OCT: localKeyCryptographyClient = new SymmetricKeyCryptographyClient(key, cryptographyServiceClient); break; default: throw new IllegalArgumentException(String.format("The Json Web Key Type: %s is not supported.", key.kty().toString())); } } /** * Gets the public part of the configured key. The get key operation is applicable to all key types and it requires the {@code keys/get} permission. * * <p><strong>Code Samples</strong></p> * <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key details when a response has been received.</p> * {@codesnippet com.azure.security.keyvault.keys.cryptography.async.cryptographyclient.getKeyWithResponse} * * @throws ResourceNotFoundException when the configured key doesn't exist in the key vault. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Key>> getKeyWithResponse() { return withContext(context -> getKeyWithResponse(context)); } /** * Gets the public part of the configured key. The get key operation is applicable to all key types and it requires the {@code keys/get} permission. * * <p><strong>Code Samples</strong></p> * <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key details when a response has been received.</p> * {@codesnippet com.azure.security.keyvault.keys.cryptography.cryptographyclient.getKey} * * @throws ResourceNotFoundException when the configured key doesn't exist in the key vault. * @return A {@link Mono} containing the requested {@link Key key}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Key> getKey() { return getKeyWithResponse().flatMap(FluxUtil::toMono); } Mono<Response<Key>> getKeyWithResponse(Context context) { return cryptographyServiceClient.getKey(context); } /** * Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports a * single block of data, the size of which is dependent on the target key and the encryption algorithm to be used. The encrypt * operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys public portion of the key is used * for encryption. This operation requires the keys/encrypt permission. * * <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting the specified {@code plaintext}. Possible values * for assymetric keys include: {@link EncryptionAlgorithm * Possible values for symmetric keys include: {@link EncryptionAlgorithm * {@link EncryptionAlgorithm * * <p><strong>Code Samples</strong></p> * <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when a response has been received.</p> * {@codesnippet com.azure.security.keyvault.keys.cryptography.async.cryptographyclient.encrypt * * @param algorithm The algorithm to be used for encryption. * @param plaintext The content to be encrypted. * @throws ResourceNotFoundException if the key cannot be found for encryption. * @throws NullPointerException if {@code algorithm} or {@code plainText} is null. * @return A {@link Mono} containing a {@link EncryptResult} whose {@link EncryptResult */ public Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext) { return withContext(context -> encrypt(algorithm, plaintext, context, null, null)); } /** * Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports a * single block of data, the size of which is dependent on the target key and the encryption algorithm to be used. The encrypt * operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys public portion of the key is used * for encryption. This operation requires the keys/encrypt permission. * * <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting the specified {@code plaintext}. Possible values * for assymetric keys include: {@link EncryptionAlgorithm * Possible values for symmetric keys include: {@link EncryptionAlgorithm * {@link EncryptionAlgorithm * * <p><strong>Code Samples</strong></p> * <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when a response has been received.</p> * {@codesnippet com.azure.security.keyvault.keys.cryptography.async.cryptographyclient.encrypt * * @param algorithm The algorithm to be used for encryption. * @param plaintext The content to be encrypted. * @param iv The initialization vector * @param authenticationData The authentication data * @throws ResourceNotFoundException if the key cannot be found for encryption. * @throws NullPointerException if {@code algorithm} or {@code plainText} is null. * @return A {@link Mono} containing a {@link EncryptResult} whose {@link EncryptResult */ public Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext, byte[] iv, byte[] authenticationData) { return withContext(context -> encrypt(algorithm, plaintext, context, iv, authenticationData)); } Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext, Context context, byte[] iv, byte[] authenticationData) { Objects.requireNonNull(algorithm, "Encryption algorithm cannot be null."); Objects.requireNonNull(plaintext, "Plain text content to be encrypted cannot be null."); boolean keyAvailableLocally = ensureValidKeyAvailable(); if (!keyAvailableLocally) { return cryptographyServiceClient.encrypt(algorithm, plaintext, context); } if (!checkKeyPermissions(this.key.keyOps(), KeyOperation.ENCRYPT)) { return Mono.error(new UnsupportedOperationException(String.format("Encrypt Operation is missing permission/not supported for key with id %s", key.kid()))); } return localKeyCryptographyClient.encryptAsync(algorithm, plaintext, iv, authenticationData, context, key); } /** * Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a single block of data may be * decrypted, the size of this block is dependent on the target key and the algorithm to be used. The decrypt operation * is supported for both asymmetric and symmetric keys. This operation requires the keys/decrypt permission. * * <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting the specified encrypted content. Possible values * for assymetric keys include: {@link EncryptionAlgorithm * Possible values for symmetric keys include: {@link EncryptionAlgorithm * {@link EncryptionAlgorithm * * <p><strong>Code Samples</strong></p> * <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content details when a response has been received.</p> * {@codesnippet com.azure.security.keyvault.keys.cryptography.async.cryptographyclient.decrypt * * @param algorithm The algorithm to be used for decryption. * @param cipherText The content to be decrypted. * @throws ResourceNotFoundException if the key cannot be found for decryption. * @throws NullPointerException if {@code algorithm} or {@code cipherText} is null. * @return A {@link Mono} containing the decrypted blob. */ public Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] cipherText) { return withContext(context -> decrypt(algorithm, cipherText, null, null, null, context)); } /** * Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a single block of data may be * decrypted, the size of this block is dependent on the target key and the algorithm to be used. The decrypt operation * is supported for both asymmetric and symmetric keys. This operation requires the keys/decrypt permission. * * <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting the specified encrypted content. Possible values * for assymetric keys include: {@link EncryptionAlgorithm * Possible values for symmetric keys include: {@link EncryptionAlgorithm * {@link EncryptionAlgorithm * * <p><strong>Code Samples</strong></p> * <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content details when a response has been received.</p> * {@codesnippet com.azure.security.keyvault.keys.cryptography.async.cryptographyclient.decrypt * * @param algorithm The algorithm to be used for decryption. * @param cipherText The content to be decrypted. * @param iv The initialization vector. * @param authenticationData The authentication data. * @param authenticationTag The authentication tag. * @throws ResourceNotFoundException if the key cannot be found for decryption. * @throws NullPointerException if {@code algorithm} or {@code cipherText} is null. * @return A {@link Mono} containing the decrypted blob. */ public Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] cipherText, byte[] iv, byte[] authenticationData, byte[] authenticationTag) { return withContext(context -> decrypt(algorithm, cipherText, iv, authenticationData, authenticationTag, context)); } Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] cipherText, byte[] iv, byte[] authenticationData, byte[] authenticationTag, Context context) { Objects.requireNonNull(algorithm, "Encryption algorithm cannot be null."); Objects.requireNonNull(cipherText, "Cipher text content to be decrypted cannot be null."); boolean keyAvailableLocally = ensureValidKeyAvailable(); if (!keyAvailableLocally) { return cryptographyServiceClient.decrypt(algorithm, cipherText, context); } if (!checkKeyPermissions(this.key.keyOps(), KeyOperation.DECRYPT)) { return Mono.error(new UnsupportedOperationException(String.format("Decrypt Operation is not allowed for key with id %s", key.kid()))); } return localKeyCryptographyClient.decryptAsync(algorithm, cipherText, iv, authenticationData, authenticationTag, context, key); } /** * Creates a signature from a digest using the configured key. The sign operation supports both asymmetric and * symmetric keys. This operation requires the keys/sign permission. * * <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to create the signature from the digest. Possible values include: * {@link SignatureAlgorithm * {@link SignatureAlgorithm * {@link SignatureAlgorithm * {@link SignatureAlgorithm * * <p><strong>Code Samples</strong></p> * <p>Sings the digest. Subscribes to the call asynchronously and prints out the signature details when a response has been received.</p> * {@codesnippet com.azure.security.keyvault.keys.cryptography.async.cryptographyclient.sign} * * @param algorithm The algorithm to use for signing. * @param digest The content from which signature is to be created. * @throws ResourceNotFoundException if the key cannot be found for signing. * @throws NullPointerException if {@code algorithm} or {@code digest} is null. * @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult */ public Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest) { return withContext(context -> sign(algorithm, digest, context)); } Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest, Context context) { Objects.requireNonNull(algorithm, "Signature algorithm cannot be null."); Objects.requireNonNull(digest, "Digest content to be signed cannot be null."); boolean keyAvailableLocally = ensureValidKeyAvailable(); if (!keyAvailableLocally) { return cryptographyServiceClient.sign(algorithm, digest, context); } if (!checkKeyPermissions(this.key.keyOps(), KeyOperation.SIGN)) { return Mono.error(new UnsupportedOperationException(String.format("Sign Operation is not allowed for key with id %s", key.kid()))); } return localKeyCryptographyClient.signAsync(algorithm, digest, context, key); } /** * Verifies a signature using the configured key. The verify operation supports both symmetric keys and asymmetric keys. * In case of asymmetric keys public portion of the key is used to verify the signature . This operation requires the keys/verify permission. * * <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the signature. Possible values include: * {@link SignatureAlgorithm * {@link SignatureAlgorithm * {@link SignatureAlgorithm * {@link SignatureAlgorithm * * <p><strong>Code Samples</strong></p> * <p>Verifies the signature against the specified digest. Subscribes to the call asynchronously and prints out the verification details when a response has been received.</p> * {@codesnippet com.azure.security.keyvault.keys.cryptography.async.cryptographyclient.verify} * * @param algorithm The algorithm to use for signing. * @param digest The content from which signature is to be created. * @param signature The signature to be verified. * @throws ResourceNotFoundException if the key cannot be found for verifying. * @throws NullPointerException if {@code algorithm}, {@code digest} or {@code signature} is null. * @return A {@link Mono} containing a {@link Boolean} indicating the signature verification result. */ public Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature) { return withContext(context -> verify(algorithm, digest, signature, context)); } Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature, Context context) { Objects.requireNonNull(algorithm, "Signature algorithm cannot be null."); Objects.requireNonNull(digest, "Digest content cannot be null."); Objects.requireNonNull(signature, "Signature to be verified cannot be null."); boolean keyAvailableLocally = ensureValidKeyAvailable(); if (!keyAvailableLocally) { return cryptographyServiceClient.verify(algorithm, digest, signature, context); } if (!checkKeyPermissions(this.key.keyOps(), KeyOperation.VERIFY)) { return Mono.error(new UnsupportedOperationException(String.format("Verify Operation is not allowed for key with id %s", key.kid()))); } return localKeyCryptographyClient.verifyAsync(algorithm, digest, signature, context, key); } /** * Wraps a symmetric key using the configured key. The wrap operation supports wrapping a symmetric key with both * symmetric and asymmetric keys. This operation requires the keys/wrapKey permission. * * <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for wrapping the specified key content. Possible values include: * {@link KeyWrapAlgorithm * * <p><strong>Code Samples</strong></p> * <p>Wraps the key content. Subscribes to the call asynchronously and prints out the wrapped key details when a response has been received.</p> * {@codesnippet com.azure.security.keyvault.keys.cryptography.async.cryptographyclient.wrap-key} * * @param algorithm The encryption algorithm to use for wrapping the key. * @param key The key content to be wrapped * @throws ResourceNotFoundException if the key cannot be found for wrap operation. * @throws NullPointerException if {@code algorithm} or {@code key} is null. * @return A {@link Mono} containing a {@link KeyWrapResult} whose {@link KeyWrapResult */ public Mono<KeyWrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key) { return withContext(context -> wrapKey(algorithm, key, context)); } Mono<KeyWrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key, Context context) { Objects.requireNonNull(algorithm, "Key Wrap algorithm cannot be null."); Objects.requireNonNull(key, "Key content to be wrapped cannot be null."); boolean keyAvailableLocally = ensureValidKeyAvailable(); if (!keyAvailableLocally) { return cryptographyServiceClient.wrapKey(algorithm, key, context); } if (!checkKeyPermissions(this.key.keyOps(), KeyOperation.WRAP_KEY)) { return Mono.error(new UnsupportedOperationException(String.format("Wrap Key Operation is not allowed for key with id %s", this.key.kid()))); } return localKeyCryptographyClient.wrapKeyAsync(algorithm, key, context, this.key); } /** * Unwraps a symmetric key using the configured key that was initially used for wrapping that key. This operation is the reverse of the wrap operation. * The unwrap operation supports asymmetric and symmetric keys to unwrap. This operation requires the keys/unwrapKey permission. * * <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for unwrapping the specified encrypted key content. Possible values for asymmetric keys include: * {@link KeyWrapAlgorithm * Possible values for symmetric keys include: {@link KeyWrapAlgorithm * * <p><strong>Code Samples</strong></p> * <p>Unwraps the key content. Subscribes to the call asynchronously and prints out the unwrapped key details when a response has been received.</p> * {@codesnippet com.azure.security.keyvault.keys.cryptography.async.cryptographyclient.unwrap-key} * * @param algorithm The encryption algorithm to use for wrapping the key. * @param encryptedKey The encrypted key content to unwrap. * @throws ResourceNotFoundException if the key cannot be found for wrap operation. * @throws NullPointerException if {@code algorithm} or {@code encryptedKey} is null. * @return A {@link Mono} containing a the unwrapped key content. */ public Mono<KeyUnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey) { return withContext(context -> unwrapKey(algorithm, encryptedKey, context)); } Mono<KeyUnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey, Context context) { Objects.requireNonNull(algorithm, "Key Wrap algorithm cannot be null."); Objects.requireNonNull(encryptedKey, "Encrypted key content to be unwrapped cannot be null."); boolean keyAvailableLocally = ensureValidKeyAvailable(); if (!keyAvailableLocally) { return cryptographyServiceClient.unwrapKey(algorithm, encryptedKey, context); } if (!checkKeyPermissions(this.key.keyOps(), KeyOperation.WRAP_KEY)) { return Mono.error(new UnsupportedOperationException(String.format("Unwrap Key Operation is not allowed for key with id %s", this.key.kid()))); } return localKeyCryptographyClient.unwrapKeyAsync(algorithm, encryptedKey, context, key); } /** * Creates a signature from the raw data using the configured key. The sign data operation supports both asymmetric and * symmetric keys. This operation requires the keys/sign permission. * * <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to sign the digest. Possible values include: * {@link SignatureAlgorithm * {@link SignatureAlgorithm * {@link SignatureAlgorithm * {@link SignatureAlgorithm * * <p><strong>Code Samples</strong></p> * <p>Signs the raw data. Subscribes to the call asynchronously and prints out the signature details when a response has been received.</p> * {@codesnippet com.azure.security.keyvault.keys.cryptography.async.cryptographyclient.sign-data} * * @param algorithm The algorithm to use for signing. * @param data The content from which signature is to be created. * @throws ResourceNotFoundException if the key cannot be found for signing. * @throws NullPointerException if {@code algorithm} or {@code data} is null. * @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult */ public Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data) { return withContext(context -> signData(algorithm, data, context)); } Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data, Context context) { Objects.requireNonNull(algorithm, "Signature algorithm cannot be null."); Objects.requireNonNull(data, "Data to be signed cannot be null."); boolean keyAvailableLocally = ensureValidKeyAvailable(); if (!keyAvailableLocally) { return cryptographyServiceClient.signData(algorithm, data, context); } if (!checkKeyPermissions(this.key.keyOps(), KeyOperation.SIGN)) { return Mono.error(new UnsupportedOperationException(String.format("Sign Operation is not allowed for key with id %s", this.key.kid()))); } return localKeyCryptographyClient.signDataAsync(algorithm, data, context, key); } /** * Verifies a signature against the raw data using the configured key. The verify operation supports both symmetric keys and asymmetric keys. * In case of asymmetric keys public portion of the key is used to verify the signature . This operation requires the keys/verify permission. * * <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the signature. Possible values include: * {@link SignatureAlgorithm * {@link SignatureAlgorithm * {@link SignatureAlgorithm * {@link SignatureAlgorithm * * <p><strong>Code Samples</strong></p> * <p>Verifies the signature against the raw data. Subscribes to the call asynchronously and prints out the verification details when a response has been received.</p> * {@codesnippet com.azure.security.keyvault.keys.cryptography.async.cryptographyclient.verify-data} * * @param algorithm The algorithm to use for signing. * @param data The raw content against which signature is to be verified. * @param signature The signature to be verified. * @throws ResourceNotFoundException if the key cannot be found for verifying. * @throws NullPointerException if {@code algorithm}, {@code data} or {@code signature} is null. * @return The {@link Boolean} indicating the signature verification result. */ public Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature) { return withContext(context -> verifyData(algorithm, data, signature, context)); } Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature, Context context) { Objects.requireNonNull(algorithm, "Signature algorithm cannot be null."); Objects.requireNonNull(data, "Data cannot be null."); Objects.requireNonNull(signature, "Signature to be verified cannot be null."); boolean keyAvailableLocally = ensureValidKeyAvailable(); if (!keyAvailableLocally) { return cryptographyServiceClient.verifyData(algorithm, data, signature, context); } if (!checkKeyPermissions(this.key.keyOps(), KeyOperation.VERIFY)) { return Mono.error(new UnsupportedOperationException(String.format("Verify Operation is not allowed for key with id %s", this.key.kid()))); } return localKeyCryptographyClient.verifyDataAsync(algorithm, data, signature, context, key); } private boolean checkKeyPermissions(List<KeyOperation> operations, KeyOperation keyOperation) { return operations.contains(keyOperation); } private boolean ensureValidKeyAvailable() { boolean keyAvailableLocally = true; if (this.key == null) { try { this.key = getKey().block().keyMaterial(); keyAvailableLocally = this.key.isValid(); initializeCryptoClients(); } catch (HttpResponseException | NullPointerException e) { logger.info("Failed to retrieve key from key vault"); keyAvailableLocally = false; } } return keyAvailableLocally; } CryptographyServiceClient getCryptographyServiceClient() { return cryptographyServiceClient; } }
because the return type is void.
Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext, Context context, byte[] iv, byte[] authenticationData) { Objects.requireNonNull(algorithm); boolean keyAvailableLocally = ensureValidKeyAvailable(); if(!keyAvailableLocally) { return cryptographyServiceClient.encrypt(algorithm, plaintext, context); } if (!checkKeyPermissions(this.key.keyOps(), KeyOperation.ENCRYPT)){ return Mono.error(new UnsupportedOperationException(String.format("Encrypt Operation is not supported for key with id %s", key.kid()))); } switch(key.kty()){ case RSA: case RSA_HSM: return rsaKeyCryptographyClient.encryptAsync(algorithm, plaintext, iv, authenticationData, context, key); case EC: case EC_HSM: return ecKeyCryptographyClient.encryptAsync(algorithm, plaintext, iv, authenticationData, context, key); case OCT: return symmetricKeyCryptographyClient.encryptAsync(algorithm, plaintext, iv, authenticationData, context, key); default: throw new UnsupportedOperationException(String.format("Encrypt Async is not allowed for Key Type: %s", key.kty().toString())); } }
throw new UnsupportedOperationException(String.format("Encrypt Async is not allowed for Key Type: %s", key.kty().toString()));
Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext, Context context, byte[] iv, byte[] authenticationData) { Objects.requireNonNull(algorithm, "Encryption algorithm cannot be null."); Objects.requireNonNull(plaintext, "Plain text content to be encrypted cannot be null."); boolean keyAvailableLocally = ensureValidKeyAvailable(); if (!keyAvailableLocally) { return cryptographyServiceClient.encrypt(algorithm, plaintext, context); } if (!checkKeyPermissions(this.key.keyOps(), KeyOperation.ENCRYPT)) { return Mono.error(new UnsupportedOperationException(String.format("Encrypt Operation is missing permission/not supported for key with id %s", key.kid()))); } return localKeyCryptographyClient.encryptAsync(algorithm, plaintext, iv, authenticationData, context, key); }
class CryptographyAsyncClient { private JsonWebKey key; private CryptographyService service; private String version; private EcKeyCryptographyClient ecKeyCryptographyClient; private RsaKeyCryptographyClient rsaKeyCryptographyClient; private CryptographyServiceClient cryptographyServiceClient; private SymmetricKeyCryptographyClient symmetricKeyCryptographyClient; private final ClientLogger logger = new ClientLogger(CryptographyAsyncClient.class); /** * Creates a CryptographyAsyncClient that uses {@code pipeline} to service requests * * @param key the JsonWebKey to use for cryptography operations. * @param pipeline HttpPipeline that the HTTP requests and responses flow through. */ CryptographyAsyncClient(JsonWebKey key, HttpPipeline pipeline) { Objects.requireNonNull(key); this.key = key; service = RestProxy.create(CryptographyService.class, pipeline); if(!Strings.isNullOrEmpty(key.kid())) { unpackAndValidateId(key.kid()); cryptographyServiceClient = new CryptographyServiceClient(key.kid(), service); } initializeCryptoClients(); } /** * Creates a CryptographyAsyncClient that uses {@code pipeline} to service requests * * @param kid THe Azure Key vault key identifier to use for cryptography operations. * @param pipeline HttpPipeline that the HTTP requests and responses flow through. */ CryptographyAsyncClient(String kid, HttpPipeline pipeline) { unpackAndValidateId(kid); service = RestProxy.create(CryptographyService.class, pipeline); cryptographyServiceClient = new CryptographyServiceClient(kid, service); ecKeyCryptographyClient = new EcKeyCryptographyClient(cryptographyServiceClient); rsaKeyCryptographyClient = new RsaKeyCryptographyClient(cryptographyServiceClient); symmetricKeyCryptographyClient = new SymmetricKeyCryptographyClient(cryptographyServiceClient); } private void initializeCryptoClients() { switch(key.kty()){ case RSA: case RSA_HSM: rsaKeyCryptographyClient = new RsaKeyCryptographyClient(key, cryptographyServiceClient); break; case EC: case EC_HSM: ecKeyCryptographyClient = new EcKeyCryptographyClient(key, cryptographyServiceClient); break; case OCT: symmetricKeyCryptographyClient = new SymmetricKeyCryptographyClient(key, cryptographyServiceClient); break; default: throw new IllegalArgumentException(String.format("The Json Web Key Type: %s is not supported.", key.kty().toString())); } } /** * Gets the public part of the configured key. The get key operation is applicable to all key types and it requires the {@code keys/get} permission. * * @throws ResourceNotFoundException when the configured key doesn't exist in the key vault. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Key>> getKey() { return withContext(context -> getKey(context)); } Mono<Response<Key>> getKey(Context context) { return cryptographyServiceClient.getKey(context); } /** * Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports a * single block of data, the size of which is dependent on the target key and the encryption algorithm to be used. The encrypt * operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys public portion of the key is used * for encryption. This operation requires the keys/encrypt permission. * * <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting the specified encrypted content. Possible values * for assymetric keys include: {@link EncryptionAlgorithm * Possible values for symmetric keys include: {@link EncryptionAlgorithm * {@link EncryptionAlgorithm * * @param algorithm The algorithm to be used for encryption. * @param plaintext The content to be encrypted. * @throws ResourceNotFoundException if the key cannot be found for encryption. * @return A {@link Mono} containing a {@link EncryptResult} whose {@link EncryptResult */ public Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext) { return withContext(context -> encrypt(algorithm, plaintext, context, null, null)); } /** * Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports a * single block of data, the size of which is dependent on the target key and the encryption algorithm to be used. The encrypt * operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys public portion of the key is used * for encryption. This operation requires the keys/encrypt permission. * * <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting the specified encrypted content. Possible values * for assymetric keys include: {@link EncryptionAlgorithm * Possible values for symmetric keys include: {@link EncryptionAlgorithm * {@link EncryptionAlgorithm * * @param algorithm The algorithm to be used for encryption. * @param plaintext The content to be encrypted. * @param iv The initialization vector * @param authenticationData The authentication data * @throws ResourceNotFoundException if the key cannot be found for encryption. * @return A {@link Mono} containing a {@link EncryptResult} whose {@link EncryptResult */ public Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext, byte[] iv, byte[] authenticationData) { return withContext(context -> encrypt(algorithm, plaintext, context, iv, authenticationData)); } /** * Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a single block of data may be * decrypted, the size of this block is dependent on the target key and the algorithm to be used. The decrypt operation * is supported for both asymmetric and symmetric keys. This operation requires the keys/decrypt permission. * * <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting the specified encrypted content. Possible values * for assymetric keys include: {@link EncryptionAlgorithm * Possible values for symmetric keys include: {@link EncryptionAlgorithm * {@link EncryptionAlgorithm * * @param algorithm The algorithm to be used for decryption. * @param cipherText The content to be decrypted. * @throws ResourceNotFoundException if the key cannot be found for decryption. * @return A {@link Mono} containing the decrypted blob. */ public Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] cipherText) { return withContext(context -> decrypt(algorithm, cipherText, null, null, null, context)); } /** * Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a single block of data may be * decrypted, the size of this block is dependent on the target key and the algorithm to be used. The decrypt operation * is supported for both asymmetric and symmetric keys. This operation requires the keys/decrypt permission. * * <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting the specified encrypted content. Possible values * for assymetric keys include: {@link EncryptionAlgorithm * Possible values for symmetric keys include: {@link EncryptionAlgorithm * {@link EncryptionAlgorithm * * @param algorithm The algorithm to be used for decryption. * @param cipherText The content to be decrypted. * @throws ResourceNotFoundException if the key cannot be found for decryption. * @return A {@link Mono} containing the decrypted blob. */ public Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] cipherText, byte[] iv, byte[] authenticationData, byte[] authenticationTag) { return withContext(context -> decrypt(algorithm, cipherText, iv, authenticationData, authenticationTag, context)); } Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] cipherText, byte[] iv, byte[] authenticationData, byte[] authenticationTag, Context context) { Objects.requireNonNull(algorithm); boolean keyAvailableLocally = ensureValidKeyAvailable(); if(!keyAvailableLocally) { return cryptographyServiceClient.decrypt(algorithm, cipherText, context); } if (!checkKeyPermissions(this.key.keyOps(), KeyOperation.DECRYPT)){ return Mono.error(new UnsupportedOperationException(String.format("Decrypt Operation is not allowed for key with id %s", key.kid()))); } switch(key.kty()){ case RSA: case RSA_HSM: return rsaKeyCryptographyClient.decryptAsync(algorithm, cipherText, iv, authenticationData, authenticationTag, context, key); case EC: case EC_HSM: return ecKeyCryptographyClient.decryptAsync(algorithm, cipherText, iv, authenticationData, authenticationTag, context, key); case OCT: return symmetricKeyCryptographyClient.decryptAsync(algorithm, cipherText, iv, authenticationData, authenticationTag, context, key); default: return Mono.error(new UnsupportedOperationException(String.format("Decrypt operation is not supported for Key Type: %s", key.kty().toString()))); } } /** * Creates a signature from a digest using the configured key. The sign operation supports both asymmetric and * symmetric keys. This operation requires the keys/sign permission. * * <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to create the signature from the digest. Possible values include: * {@link SignatureAlgorithm * {@link SignatureAlgorithm * {@link SignatureAlgorithm * {@link SignatureAlgorithm * * @param algorithm The algorithm to use for signing. * @param digest The content from which signature is to be created. * @throws ResourceNotFoundException if the key cannot be found for signing. * @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult */ public Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest) { return withContext(context -> sign(algorithm, digest, context)); } Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest, Context context) { Objects.requireNonNull(algorithm); boolean keyAvailableLocally = ensureValidKeyAvailable(); if(!keyAvailableLocally) { return cryptographyServiceClient.sign(algorithm, digest, context); } if (!checkKeyPermissions(this.key.keyOps(), KeyOperation.SIGN)){ return Mono.error(new UnsupportedOperationException(String.format("Sign Operation is not allowed for key with id %s", key.kid()))); } switch(this.key.kty()){ case RSA: case RSA_HSM: return rsaKeyCryptographyClient.signAsync(algorithm, digest, context, key); case EC: case EC_HSM: return ecKeyCryptographyClient.signAsync(algorithm, digest, context, key); case OCT: return symmetricKeyCryptographyClient.signAsync(algorithm, digest, context, key); default: return Mono.error(new UnsupportedOperationException(String.format("Sign operaiton is not supported for Key Type: %s", key.kty().toString()))); } } /** * Verifies a signature using the configured key. The verify operation supports both symmetric keys and asymmetric keys. * In case of asymmetric keys public portion of the key is used to verify the signature . This operation requires the keys/verify permission. * * <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the signature. Possible values include: * {@link SignatureAlgorithm * {@link SignatureAlgorithm * {@link SignatureAlgorithm * {@link SignatureAlgorithm * * @param algorithm The algorithm to use for signing. * @param digest The content from which signature is to be created. * @param signature The signature to be verified. * @throws ResourceNotFoundException if the key cannot be found for verifying. * @return A {@link Mono} containing a {@link Boolean} indicating the signature verification result. */ public Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature) { return withContext(context -> verify(algorithm, digest, signature, context)); } Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature, Context context) { boolean keyAvailableLocally = ensureValidKeyAvailable(); if(!keyAvailableLocally) { return cryptographyServiceClient.verify(algorithm, digest, signature, context); } if (!checkKeyPermissions(this.key.keyOps(), KeyOperation.VERIFY)){ return Mono.error(new UnsupportedOperationException(String.format("Verify Operation is not allowed for key with id %s", key.kid()))); } switch(this.key.kty()){ case RSA: case RSA_HSM: return rsaKeyCryptographyClient.verifyAsync(algorithm, digest, signature, context, key); case EC: case EC_HSM: return ecKeyCryptographyClient.verifyAsync(algorithm, digest, signature, context, key); case OCT: return symmetricKeyCryptographyClient.verifyAsync(algorithm, digest, signature, context, key); default: return Mono.error(new UnsupportedOperationException(String.format("Verify operation is not supported for Key Type: %s", key.kty().toString()))); } } /** * Wraps a symmetric key using the configured key. The wrap operation supports wrapping a symmetric key with both * symmetric and asymmetric keys. This operation requires the keys/wrapKey permission. * * <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for wrapping the specified key content. Possible values include: * {@link KeyWrapAlgorithm * * @param algorithm The encryption algorithm to use for wrapping the key. * @param key The key content to be wrapped * @throws ResourceNotFoundException if the key cannot be found for wrap operation. * @return A {@link Mono} containing a {@link KeyWrapResult} whose {@link KeyWrapResult */ public Mono<KeyWrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key) { return withContext(context -> wrapKey(algorithm, key, context)); } Mono<KeyWrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key, Context context) { boolean keyAvailableLocally = ensureValidKeyAvailable(); if(!keyAvailableLocally) { return cryptographyServiceClient.wrapKey(algorithm, key, context); } if (!checkKeyPermissions(this.key.keyOps(), KeyOperation.WRAP_KEY)){ return Mono.error(new UnsupportedOperationException(String.format("Wrap Key Operation is not allowed for key with id %s", this.key.kid()))); } switch(this.key.kty()){ case RSA: case RSA_HSM: return rsaKeyCryptographyClient.wrapKeyAsync(algorithm, key, context, this.key); case EC: case EC_HSM: return ecKeyCryptographyClient.wrapKeyAsync(algorithm, key, context, this.key); case OCT: return symmetricKeyCryptographyClient.wrapKeyAsync(algorithm, key, context, this.key); default: return Mono.error(new UnsupportedOperationException(String.format("Encrypt Async is not supported for Key Type: %s", this.key.kty().toString()))); } } /** * Unwraps a symmetric key using the configured key that was initially used for wrapping that key. This operation is the reverse of the wrap operation. * The unwrap operation supports asymmetric and symmetric keys to unwrap. This operation requires the keys/unwrapKey permission. * * <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for wrapping the specified key content. Possible values for asymmetric keys include: * {@link KeyWrapAlgorithm * Possible values for symmetric keys include: {@link KeyWrapAlgorithm * * @param algorithm The encryption algorithm to use for wrapping the key. * @param encryptedKey The encrypted key content to unwrap. * @throws ResourceNotFoundException if the key cannot be found for wrap operation. * @return A {@link Mono} containing a the unwrapped key content. */ public Mono<KeyUnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey) { return withContext(context -> unwrapKey(algorithm, encryptedKey, context)); } Mono<KeyUnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey, Context context) { Objects.requireNonNull(algorithm); boolean keyAvailableLocally = ensureValidKeyAvailable(); if(!keyAvailableLocally) { return cryptographyServiceClient.unwrapKey(algorithm, encryptedKey, context); } if (!checkKeyPermissions(this.key.keyOps(), KeyOperation.WRAP_KEY)){ return Mono.error(new UnsupportedOperationException(String.format("Unwrap Key Operation is not allowed for key with id %s", this.key.kid()))); } switch(this.key.kty()){ case RSA: case RSA_HSM: return rsaKeyCryptographyClient.unwrapKeyAsync(algorithm, encryptedKey, context, key); case EC: case EC_HSM: return ecKeyCryptographyClient.unwrapKeyAsync(algorithm, encryptedKey, context, key); case OCT: return symmetricKeyCryptographyClient.unwrapKeyAsync(algorithm, encryptedKey, context, key); default: return Mono.error(new UnsupportedOperationException(String.format("Encrypt Async is not supported for Key Type: %s", key.kty().toString()))); } } /** * Creates a signature from the raw data using the configured key. The sign data operation supports both asymmetric and * symmetric keys. This operation requires the keys/sign permission. * * <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to create the signature from the digest. Possible values include: * {@link SignatureAlgorithm * {@link SignatureAlgorithm * {@link SignatureAlgorithm * {@link SignatureAlgorithm * * @param algorithm The algorithm to use for signing. * @param data The content from which signature is to be created. * @throws ResourceNotFoundException if the key cannot be found for signing. * @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult */ public Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data) { return withContext(context -> signData(algorithm, data, context)); } Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data, Context context) { Objects.requireNonNull(algorithm); boolean keyAvailableLocally = ensureValidKeyAvailable(); if(!keyAvailableLocally) { return cryptographyServiceClient.signData(algorithm, data, context); } if (!checkKeyPermissions(this.key.keyOps(), KeyOperation.SIGN)){ return Mono.error(new UnsupportedOperationException(String.format("Sign Operation is not allowed for key with id %s", this.key.kid()))); } switch(this.key.kty()){ case RSA: case RSA_HSM: return rsaKeyCryptographyClient.signDataAsync(algorithm, data, context, key); case EC: case EC_HSM: return ecKeyCryptographyClient.signDataAsync(algorithm, data, context, key); default: return Mono.error(new UnsupportedOperationException(String.format("Encrypt Async is not supported for Key Type: %s", key.kty().toString()))); } } /** * Verifies a signature against the raw data using the configured key. The verify operation supports both symmetric keys and asymmetric keys. * In case of asymmetric keys public portion of the key is used to verify the signature . This operation requires the keys/verify permission. * * <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the signature. Possible values include: * {@link SignatureAlgorithm * {@link SignatureAlgorithm * {@link SignatureAlgorithm * {@link SignatureAlgorithm * * @param algorithm The algorithm to use for signing. * @param data The raw content against which signature is to be verified. * @param signature The signature to be verified. * @throws ResourceNotFoundException if the key cannot be found for verifying. * @return The {@link Boolean} indicating the signature verification result. */ public Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature) { return withContext(context -> verifyData(algorithm, data, signature, context)); } Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature, Context context) { Objects.requireNonNull(algorithm); boolean keyAvailableLocally = ensureValidKeyAvailable(); if(!keyAvailableLocally) { return cryptographyServiceClient.verifyData(algorithm, data, signature, context); } if (!checkKeyPermissions(this.key.keyOps(), KeyOperation.VERIFY)){ return Mono.error(new UnsupportedOperationException(String.format("Verify Operation is not allowed for key with id %s", this.key.kid()))); } switch(this.key.kty()){ case RSA: case RSA_HSM: return rsaKeyCryptographyClient.verifyDataAsync(algorithm, data, signature, context, key); case EC: case EC_HSM: return ecKeyCryptographyClient.verifyDataAsync(algorithm, data, signature, context, key); default: return Mono.error(new UnsupportedOperationException(String.format("Encrypt Async is not supported for Key Type: %s", key.kty().toString()))); } } private void unpackAndValidateId(String keyId) { if (keyId != null && keyId.length() > 0) { try { URL url = new URL(keyId); String[] tokens = url.getPath().split("/"); String endpoint = url.getProtocol() + ": String keyName = (tokens.length >= 3 ? tokens[2] : null); version = (tokens.length >= 4 ? tokens[3] : null); if(Strings.isNullOrEmpty(endpoint)) { throw new IllegalArgumentException("Key endpoint in key id is invalid"); } else if (Strings.isNullOrEmpty(keyName)) { throw new IllegalArgumentException("Key name in key id is invalid"); } else if(Strings.isNullOrEmpty(version)) { throw new IllegalArgumentException("Key version in key id is invalid"); } } catch (MalformedURLException e) { e.printStackTrace(); } } else { throw new IllegalArgumentException("Key Id is invalid"); } } private boolean checkKeyPermissions(List<KeyOperation> operations, KeyOperation keyOperation) { if (operations.contains(keyOperation)) { return true; } return false; } private boolean ensureValidKeyAvailable() { boolean keyAvailableLocally = true; if(key == null) { try { this.key = getKey().block().value().keyMaterial(); keyAvailableLocally = this.key.isValid(); } catch (HttpResponseException e) { logger.info("Failed to retrieve key from key vault"); keyAvailableLocally = false; } } return keyAvailableLocally; } }
class CryptographyAsyncClient { static final String KEY_VAULT_SCOPE = "https: private JsonWebKey key; private final CryptographyService service; private final CryptographyServiceClient cryptographyServiceClient; private LocalKeyCryptographyClient localKeyCryptographyClient; private final ClientLogger logger = new ClientLogger(CryptographyAsyncClient.class); /** * Creates a CryptographyAsyncClient that uses {@code pipeline} to service requests * * @param key the JsonWebKey to use for cryptography operations. * @param pipeline HttpPipeline that the HTTP requests and responses flow through. */ CryptographyAsyncClient(JsonWebKey key, HttpPipeline pipeline) { Objects.requireNonNull(key); if (!key.isValid()) { throw new IllegalArgumentException("Json Web Key is not valid"); } if (key.keyOps() == null) { throw new IllegalArgumentException("Json Web Key's key operations property is not configured"); } if (key.kty() == null) { throw new IllegalArgumentException("Json Web Key's key type property is not configured"); } this.key = key; service = RestProxy.create(CryptographyService.class, pipeline); if (!Strings.isNullOrEmpty(key.kid())) { unpackAndValidateId(key.kid()); cryptographyServiceClient = new CryptographyServiceClient(key.kid(), service); } else { cryptographyServiceClient = null; } initializeCryptoClients(); } /** * Creates a CryptographyAsyncClient that uses {@code pipeline} to service requests * * @param kid THe Azure Key vault key identifier to use for cryptography operations. * @param pipeline HttpPipeline that the HTTP requests and responses flow through. */ CryptographyAsyncClient(String kid, HttpPipeline pipeline) { unpackAndValidateId(kid); service = RestProxy.create(CryptographyService.class, pipeline); cryptographyServiceClient = new CryptographyServiceClient(kid, service); this.key = null; } private void initializeCryptoClients() { if (localKeyCryptographyClient != null) { return; } switch (key.kty()) { case RSA: case RSA_HSM: localKeyCryptographyClient = new RsaKeyCryptographyClient(key, cryptographyServiceClient); break; case EC: case EC_HSM: localKeyCryptographyClient = new EcKeyCryptographyClient(key, cryptographyServiceClient); break; case OCT: localKeyCryptographyClient = new SymmetricKeyCryptographyClient(key, cryptographyServiceClient); break; default: throw new IllegalArgumentException(String.format("The Json Web Key Type: %s is not supported.", key.kty().toString())); } } /** * Gets the public part of the configured key. The get key operation is applicable to all key types and it requires the {@code keys/get} permission. * * <p><strong>Code Samples</strong></p> * <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key details when a response has been received.</p> * {@codesnippet com.azure.security.keyvault.keys.cryptography.async.cryptographyclient.getKeyWithResponse} * * @throws ResourceNotFoundException when the configured key doesn't exist in the key vault. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Key>> getKeyWithResponse() { return withContext(context -> getKeyWithResponse(context)); } /** * Gets the public part of the configured key. The get key operation is applicable to all key types and it requires the {@code keys/get} permission. * * <p><strong>Code Samples</strong></p> * <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key details when a response has been received.</p> * {@codesnippet com.azure.security.keyvault.keys.cryptography.cryptographyclient.getKey} * * @throws ResourceNotFoundException when the configured key doesn't exist in the key vault. * @return A {@link Mono} containing the requested {@link Key key}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Key> getKey() { return getKeyWithResponse().flatMap(FluxUtil::toMono); } Mono<Response<Key>> getKeyWithResponse(Context context) { return cryptographyServiceClient.getKey(context); } /** * Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports a * single block of data, the size of which is dependent on the target key and the encryption algorithm to be used. The encrypt * operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys public portion of the key is used * for encryption. This operation requires the keys/encrypt permission. * * <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting the specified {@code plaintext}. Possible values * for assymetric keys include: {@link EncryptionAlgorithm * Possible values for symmetric keys include: {@link EncryptionAlgorithm * {@link EncryptionAlgorithm * * <p><strong>Code Samples</strong></p> * <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when a response has been received.</p> * {@codesnippet com.azure.security.keyvault.keys.cryptography.async.cryptographyclient.encrypt * * @param algorithm The algorithm to be used for encryption. * @param plaintext The content to be encrypted. * @throws ResourceNotFoundException if the key cannot be found for encryption. * @throws NullPointerException if {@code algorithm} or {@code plainText} is null. * @return A {@link Mono} containing a {@link EncryptResult} whose {@link EncryptResult */ public Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext) { return withContext(context -> encrypt(algorithm, plaintext, context, null, null)); } /** * Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports a * single block of data, the size of which is dependent on the target key and the encryption algorithm to be used. The encrypt * operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys public portion of the key is used * for encryption. This operation requires the keys/encrypt permission. * * <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting the specified {@code plaintext}. Possible values * for assymetric keys include: {@link EncryptionAlgorithm * Possible values for symmetric keys include: {@link EncryptionAlgorithm * {@link EncryptionAlgorithm * * <p><strong>Code Samples</strong></p> * <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when a response has been received.</p> * {@codesnippet com.azure.security.keyvault.keys.cryptography.async.cryptographyclient.encrypt * * @param algorithm The algorithm to be used for encryption. * @param plaintext The content to be encrypted. * @param iv The initialization vector * @param authenticationData The authentication data * @throws ResourceNotFoundException if the key cannot be found for encryption. * @throws NullPointerException if {@code algorithm} or {@code plainText} is null. * @return A {@link Mono} containing a {@link EncryptResult} whose {@link EncryptResult */ public Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext, byte[] iv, byte[] authenticationData) { return withContext(context -> encrypt(algorithm, plaintext, context, iv, authenticationData)); } /** * Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a single block of data may be * decrypted, the size of this block is dependent on the target key and the algorithm to be used. The decrypt operation * is supported for both asymmetric and symmetric keys. This operation requires the keys/decrypt permission. * * <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting the specified encrypted content. Possible values * for assymetric keys include: {@link EncryptionAlgorithm * Possible values for symmetric keys include: {@link EncryptionAlgorithm * {@link EncryptionAlgorithm * * <p><strong>Code Samples</strong></p> * <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content details when a response has been received.</p> * {@codesnippet com.azure.security.keyvault.keys.cryptography.async.cryptographyclient.decrypt * * @param algorithm The algorithm to be used for decryption. * @param cipherText The content to be decrypted. * @throws ResourceNotFoundException if the key cannot be found for decryption. * @throws NullPointerException if {@code algorithm} or {@code cipherText} is null. * @return A {@link Mono} containing the decrypted blob. */ public Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] cipherText) { return withContext(context -> decrypt(algorithm, cipherText, null, null, null, context)); } /** * Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a single block of data may be * decrypted, the size of this block is dependent on the target key and the algorithm to be used. The decrypt operation * is supported for both asymmetric and symmetric keys. This operation requires the keys/decrypt permission. * * <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting the specified encrypted content. Possible values * for assymetric keys include: {@link EncryptionAlgorithm * Possible values for symmetric keys include: {@link EncryptionAlgorithm * {@link EncryptionAlgorithm * * <p><strong>Code Samples</strong></p> * <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content details when a response has been received.</p> * {@codesnippet com.azure.security.keyvault.keys.cryptography.async.cryptographyclient.decrypt * * @param algorithm The algorithm to be used for decryption. * @param cipherText The content to be decrypted. * @param iv The initialization vector. * @param authenticationData The authentication data. * @param authenticationTag The authentication tag. * @throws ResourceNotFoundException if the key cannot be found for decryption. * @throws NullPointerException if {@code algorithm} or {@code cipherText} is null. * @return A {@link Mono} containing the decrypted blob. */ public Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] cipherText, byte[] iv, byte[] authenticationData, byte[] authenticationTag) { return withContext(context -> decrypt(algorithm, cipherText, iv, authenticationData, authenticationTag, context)); } Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] cipherText, byte[] iv, byte[] authenticationData, byte[] authenticationTag, Context context) { Objects.requireNonNull(algorithm, "Encryption algorithm cannot be null."); Objects.requireNonNull(cipherText, "Cipher text content to be decrypted cannot be null."); boolean keyAvailableLocally = ensureValidKeyAvailable(); if (!keyAvailableLocally) { return cryptographyServiceClient.decrypt(algorithm, cipherText, context); } if (!checkKeyPermissions(this.key.keyOps(), KeyOperation.DECRYPT)) { return Mono.error(new UnsupportedOperationException(String.format("Decrypt Operation is not allowed for key with id %s", key.kid()))); } return localKeyCryptographyClient.decryptAsync(algorithm, cipherText, iv, authenticationData, authenticationTag, context, key); } /** * Creates a signature from a digest using the configured key. The sign operation supports both asymmetric and * symmetric keys. This operation requires the keys/sign permission. * * <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to create the signature from the digest. Possible values include: * {@link SignatureAlgorithm * {@link SignatureAlgorithm * {@link SignatureAlgorithm * {@link SignatureAlgorithm * * <p><strong>Code Samples</strong></p> * <p>Sings the digest. Subscribes to the call asynchronously and prints out the signature details when a response has been received.</p> * {@codesnippet com.azure.security.keyvault.keys.cryptography.async.cryptographyclient.sign} * * @param algorithm The algorithm to use for signing. * @param digest The content from which signature is to be created. * @throws ResourceNotFoundException if the key cannot be found for signing. * @throws NullPointerException if {@code algorithm} or {@code digest} is null. * @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult */ public Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest) { return withContext(context -> sign(algorithm, digest, context)); } Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest, Context context) { Objects.requireNonNull(algorithm, "Signature algorithm cannot be null."); Objects.requireNonNull(digest, "Digest content to be signed cannot be null."); boolean keyAvailableLocally = ensureValidKeyAvailable(); if (!keyAvailableLocally) { return cryptographyServiceClient.sign(algorithm, digest, context); } if (!checkKeyPermissions(this.key.keyOps(), KeyOperation.SIGN)) { return Mono.error(new UnsupportedOperationException(String.format("Sign Operation is not allowed for key with id %s", key.kid()))); } return localKeyCryptographyClient.signAsync(algorithm, digest, context, key); } /** * Verifies a signature using the configured key. The verify operation supports both symmetric keys and asymmetric keys. * In case of asymmetric keys public portion of the key is used to verify the signature . This operation requires the keys/verify permission. * * <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the signature. Possible values include: * {@link SignatureAlgorithm * {@link SignatureAlgorithm * {@link SignatureAlgorithm * {@link SignatureAlgorithm * * <p><strong>Code Samples</strong></p> * <p>Verifies the signature against the specified digest. Subscribes to the call asynchronously and prints out the verification details when a response has been received.</p> * {@codesnippet com.azure.security.keyvault.keys.cryptography.async.cryptographyclient.verify} * * @param algorithm The algorithm to use for signing. * @param digest The content from which signature is to be created. * @param signature The signature to be verified. * @throws ResourceNotFoundException if the key cannot be found for verifying. * @throws NullPointerException if {@code algorithm}, {@code digest} or {@code signature} is null. * @return A {@link Mono} containing a {@link Boolean} indicating the signature verification result. */ public Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature) { return withContext(context -> verify(algorithm, digest, signature, context)); } Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature, Context context) { Objects.requireNonNull(algorithm, "Signature algorithm cannot be null."); Objects.requireNonNull(digest, "Digest content cannot be null."); Objects.requireNonNull(signature, "Signature to be verified cannot be null."); boolean keyAvailableLocally = ensureValidKeyAvailable(); if (!keyAvailableLocally) { return cryptographyServiceClient.verify(algorithm, digest, signature, context); } if (!checkKeyPermissions(this.key.keyOps(), KeyOperation.VERIFY)) { return Mono.error(new UnsupportedOperationException(String.format("Verify Operation is not allowed for key with id %s", key.kid()))); } return localKeyCryptographyClient.verifyAsync(algorithm, digest, signature, context, key); } /** * Wraps a symmetric key using the configured key. The wrap operation supports wrapping a symmetric key with both * symmetric and asymmetric keys. This operation requires the keys/wrapKey permission. * * <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for wrapping the specified key content. Possible values include: * {@link KeyWrapAlgorithm * * <p><strong>Code Samples</strong></p> * <p>Wraps the key content. Subscribes to the call asynchronously and prints out the wrapped key details when a response has been received.</p> * {@codesnippet com.azure.security.keyvault.keys.cryptography.async.cryptographyclient.wrap-key} * * @param algorithm The encryption algorithm to use for wrapping the key. * @param key The key content to be wrapped * @throws ResourceNotFoundException if the key cannot be found for wrap operation. * @throws NullPointerException if {@code algorithm} or {@code key} is null. * @return A {@link Mono} containing a {@link KeyWrapResult} whose {@link KeyWrapResult */ public Mono<KeyWrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key) { return withContext(context -> wrapKey(algorithm, key, context)); } Mono<KeyWrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key, Context context) { Objects.requireNonNull(algorithm, "Key Wrap algorithm cannot be null."); Objects.requireNonNull(key, "Key content to be wrapped cannot be null."); boolean keyAvailableLocally = ensureValidKeyAvailable(); if (!keyAvailableLocally) { return cryptographyServiceClient.wrapKey(algorithm, key, context); } if (!checkKeyPermissions(this.key.keyOps(), KeyOperation.WRAP_KEY)) { return Mono.error(new UnsupportedOperationException(String.format("Wrap Key Operation is not allowed for key with id %s", this.key.kid()))); } return localKeyCryptographyClient.wrapKeyAsync(algorithm, key, context, this.key); } /** * Unwraps a symmetric key using the configured key that was initially used for wrapping that key. This operation is the reverse of the wrap operation. * The unwrap operation supports asymmetric and symmetric keys to unwrap. This operation requires the keys/unwrapKey permission. * * <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for unwrapping the specified encrypted key content. Possible values for asymmetric keys include: * {@link KeyWrapAlgorithm * Possible values for symmetric keys include: {@link KeyWrapAlgorithm * * <p><strong>Code Samples</strong></p> * <p>Unwraps the key content. Subscribes to the call asynchronously and prints out the unwrapped key details when a response has been received.</p> * {@codesnippet com.azure.security.keyvault.keys.cryptography.async.cryptographyclient.unwrap-key} * * @param algorithm The encryption algorithm to use for wrapping the key. * @param encryptedKey The encrypted key content to unwrap. * @throws ResourceNotFoundException if the key cannot be found for wrap operation. * @throws NullPointerException if {@code algorithm} or {@code encryptedKey} is null. * @return A {@link Mono} containing a the unwrapped key content. */ public Mono<KeyUnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey) { return withContext(context -> unwrapKey(algorithm, encryptedKey, context)); } Mono<KeyUnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey, Context context) { Objects.requireNonNull(algorithm, "Key Wrap algorithm cannot be null."); Objects.requireNonNull(encryptedKey, "Encrypted key content to be unwrapped cannot be null."); boolean keyAvailableLocally = ensureValidKeyAvailable(); if (!keyAvailableLocally) { return cryptographyServiceClient.unwrapKey(algorithm, encryptedKey, context); } if (!checkKeyPermissions(this.key.keyOps(), KeyOperation.WRAP_KEY)) { return Mono.error(new UnsupportedOperationException(String.format("Unwrap Key Operation is not allowed for key with id %s", this.key.kid()))); } return localKeyCryptographyClient.unwrapKeyAsync(algorithm, encryptedKey, context, key); } /** * Creates a signature from the raw data using the configured key. The sign data operation supports both asymmetric and * symmetric keys. This operation requires the keys/sign permission. * * <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to sign the digest. Possible values include: * {@link SignatureAlgorithm * {@link SignatureAlgorithm * {@link SignatureAlgorithm * {@link SignatureAlgorithm * * <p><strong>Code Samples</strong></p> * <p>Signs the raw data. Subscribes to the call asynchronously and prints out the signature details when a response has been received.</p> * {@codesnippet com.azure.security.keyvault.keys.cryptography.async.cryptographyclient.sign-data} * * @param algorithm The algorithm to use for signing. * @param data The content from which signature is to be created. * @throws ResourceNotFoundException if the key cannot be found for signing. * @throws NullPointerException if {@code algorithm} or {@code data} is null. * @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult */ public Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data) { return withContext(context -> signData(algorithm, data, context)); } Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data, Context context) { Objects.requireNonNull(algorithm, "Signature algorithm cannot be null."); Objects.requireNonNull(data, "Data to be signed cannot be null."); boolean keyAvailableLocally = ensureValidKeyAvailable(); if (!keyAvailableLocally) { return cryptographyServiceClient.signData(algorithm, data, context); } if (!checkKeyPermissions(this.key.keyOps(), KeyOperation.SIGN)) { return Mono.error(new UnsupportedOperationException(String.format("Sign Operation is not allowed for key with id %s", this.key.kid()))); } return localKeyCryptographyClient.signDataAsync(algorithm, data, context, key); } /** * Verifies a signature against the raw data using the configured key. The verify operation supports both symmetric keys and asymmetric keys. * In case of asymmetric keys public portion of the key is used to verify the signature . This operation requires the keys/verify permission. * * <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the signature. Possible values include: * {@link SignatureAlgorithm * {@link SignatureAlgorithm * {@link SignatureAlgorithm * {@link SignatureAlgorithm * * <p><strong>Code Samples</strong></p> * <p>Verifies the signature against the raw data. Subscribes to the call asynchronously and prints out the verification details when a response has been received.</p> * {@codesnippet com.azure.security.keyvault.keys.cryptography.async.cryptographyclient.verify-data} * * @param algorithm The algorithm to use for signing. * @param data The raw content against which signature is to be verified. * @param signature The signature to be verified. * @throws ResourceNotFoundException if the key cannot be found for verifying. * @throws NullPointerException if {@code algorithm}, {@code data} or {@code signature} is null. * @return The {@link Boolean} indicating the signature verification result. */ public Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature) { return withContext(context -> verifyData(algorithm, data, signature, context)); } Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature, Context context) { Objects.requireNonNull(algorithm, "Signature algorithm cannot be null."); Objects.requireNonNull(data, "Data cannot be null."); Objects.requireNonNull(signature, "Signature to be verified cannot be null."); boolean keyAvailableLocally = ensureValidKeyAvailable(); if (!keyAvailableLocally) { return cryptographyServiceClient.verifyData(algorithm, data, signature, context); } if (!checkKeyPermissions(this.key.keyOps(), KeyOperation.VERIFY)) { return Mono.error(new UnsupportedOperationException(String.format("Verify Operation is not allowed for key with id %s", this.key.kid()))); } return localKeyCryptographyClient.verifyDataAsync(algorithm, data, signature, context, key); } private void unpackAndValidateId(String keyId) { if (ImplUtils.isNullOrEmpty(keyId)) { throw new IllegalArgumentException("Key Id is invalid"); } try { URL url = new URL(keyId); String[] tokens = url.getPath().split("/"); String endpoint = url.getProtocol() + ": String keyName = (tokens.length >= 3 ? tokens[2] : null); String version = (tokens.length >= 4 ? tokens[3] : null); if (Strings.isNullOrEmpty(endpoint)) { throw new IllegalArgumentException("Key endpoint in key id is invalid"); } else if (Strings.isNullOrEmpty(keyName)) { throw new IllegalArgumentException("Key name in key id is invalid"); } else if (Strings.isNullOrEmpty(version)) { throw new IllegalArgumentException("Key version in key id is invalid"); } } catch (MalformedURLException e) { throw new IllegalArgumentException("The key identifier is malformed", e); } } private boolean checkKeyPermissions(List<KeyOperation> operations, KeyOperation keyOperation) { return operations.contains(keyOperation); } private boolean ensureValidKeyAvailable() { boolean keyAvailableLocally = true; if (this.key == null) { try { this.key = getKey().block().keyMaterial(); keyAvailableLocally = this.key.isValid(); initializeCryptoClients(); } catch (HttpResponseException | NullPointerException e) { logger.info("Failed to retrieve key from key vault"); keyAvailableLocally = false; } } return keyAvailableLocally; } CryptographyServiceClient getCryptographyServiceClient() { return cryptographyServiceClient; } }
If ANNOTATION is a child of MODIFIER use getFirstToken, it does what this for loop is doing.
private boolean hasServiceClientAnnotation(DetailAST classDefToken) { final DetailAST modifiersToken = classDefToken.findFirstToken(TokenTypes.MODIFIERS); for (DetailAST ast = modifiersToken.getFirstChild(); ast != null; ast = ast.getNextSibling()) { if (ast.getType() != TokenTypes.ANNOTATION) { continue; } final DetailAST annotationIdent = ast.findFirstToken(TokenTypes.IDENT); if (annotationIdent != null && SERVICE_CLIENT.equals(annotationIdent.getText())) { isAsync = isAsyncServiceClient(ast); return true; } } return false; }
for (DetailAST ast = modifiersToken.getFirstChild(); ast != null; ast = ast.getNextSibling()) {
private boolean hasServiceClientAnnotation(DetailAST classDefToken) { final DetailAST modifiersToken = classDefToken.findFirstToken(TokenTypes.MODIFIERS); for (DetailAST ast = modifiersToken.getFirstChild(); ast != null; ast = ast.getNextSibling()) { if (ast.getType() != TokenTypes.ANNOTATION) { continue; } final DetailAST annotationIdent = ast.findFirstToken(TokenTypes.IDENT); if (annotationIdent != null && SERVICE_CLIENT.equals(annotationIdent.getText())) { isAsync = isAsyncServiceClient(ast); return true; } } return false; }
class is annotated with @ServiceClient, false otherwise. */
class is annotated with @ServiceClient, false otherwise. */
This check may not trigger when it should since async class can false-positive into the check above. I think we should change this a bit by having: ``` Java if (async name) { async checks; } else if (sync name) { sync checks; } else { naming error }
private void checkServiceClientNaming(DetailAST classDefToken) { final String className = classDefToken.findFirstToken(TokenTypes.IDENT).getText(); if (isAsync && !className.endsWith(ASYNC_CLIENT)) { log(classDefToken, String.format("Async class ''%s'' must be named <ServiceName>AsyncClient.", className)); } if (!isAsync && !className.endsWith(CLIENT)) { log(classDefToken, String.format("Sync class %s must be named <ServiceName>Client.", className)); } if (className.endsWith(ASYNC_CLIENT) && !isAsync) { log(classDefToken, String.format("Asynchronous Client, class ''%s'' must set property ''%s'' to true.", className, IS_ASYNC)); } if (className.endsWith(CLIENT) && !className.endsWith(ASYNC_CLIENT) && isAsync) { log(classDefToken, String.format("Synchronous Client, class ''%s'' must set property''%s'' to false or without the property.", className, IS_ASYNC)); } }
if (className.endsWith(ASYNC_CLIENT) && !isAsync) {
private void checkServiceClientNaming(DetailAST classDefToken) { final String className = classDefToken.findFirstToken(TokenTypes.IDENT).getText(); if (isAsync && !className.endsWith(ASYNC_CLIENT)) { log(classDefToken, String.format("Async class ''%s'' must be named <ServiceName>AsyncClient ", className)); } if (!isAsync && !className.endsWith(CLIENT)) { log(classDefToken, String.format("Sync class %s must be named <ServiceName>Client.", className)); } }
class name of Service Client. It should be named <ServiceName>AsyncClient or <ServiceName>Client. * * @param classDefToken the CLASS_DEF AST node */
class name of Service Client. It should be named <ServiceName>AsyncClient or <ServiceName>Client. * * @param classDefToken the CLASS_DEF AST node */
Error message says `class must be named AsyncClient` but condition only check if name ends with this string. So, a name like `thisIsMySuperMethodAsyncClient` would be valid right? If yes, update the error message to say that `class name must end with this string.
private void checkServiceClientNaming(DetailAST classDefToken) { final String className = classDefToken.findFirstToken(TokenTypes.IDENT).getText(); if (isAsync && !className.endsWith(ASYNC_CLIENT)) { log(classDefToken, String.format("Async class ''%s'' must be named <ServiceName>AsyncClient.", className)); } if (!isAsync && !className.endsWith(CLIENT)) { log(classDefToken, String.format("Sync class %s must be named <ServiceName>Client.", className)); } if (className.endsWith(ASYNC_CLIENT) && !isAsync) { log(classDefToken, String.format("Asynchronous Client, class ''%s'' must set property ''%s'' to true.", className, IS_ASYNC)); } if (className.endsWith(CLIENT) && !className.endsWith(ASYNC_CLIENT) && isAsync) { log(classDefToken, String.format("Synchronous Client, class ''%s'' must set property''%s'' to false or without the property.", className, IS_ASYNC)); } }
if (isAsync && !className.endsWith(ASYNC_CLIENT)) {
private void checkServiceClientNaming(DetailAST classDefToken) { final String className = classDefToken.findFirstToken(TokenTypes.IDENT).getText(); if (isAsync && !className.endsWith(ASYNC_CLIENT)) { log(classDefToken, String.format("Async class ''%s'' must be named <ServiceName>AsyncClient ", className)); } if (!isAsync && !className.endsWith(CLIENT)) { log(classDefToken, String.format("Sync class %s must be named <ServiceName>Client.", className)); } }
class name of Service Client. It should be named <ServiceName>AsyncClient or <ServiceName>Client. * * @param classDefToken the CLASS_DEF AST node */
class name of Service Client. It should be named <ServiceName>AsyncClient or <ServiceName>Client. * * @param classDefToken the CLASS_DEF AST node */
Do we want to make this if statements `else if` statements? Do we need to check all condition even after we find one as true?
private void checkServiceClientNaming(DetailAST classDefToken) { final String className = classDefToken.findFirstToken(TokenTypes.IDENT).getText(); if (isAsync && !className.endsWith(ASYNC_CLIENT)) { log(classDefToken, String.format("Async class ''%s'' must be named <ServiceName>AsyncClient.", className)); } if (!isAsync && !className.endsWith(CLIENT)) { log(classDefToken, String.format("Sync class %s must be named <ServiceName>Client.", className)); } if (className.endsWith(ASYNC_CLIENT) && !isAsync) { log(classDefToken, String.format("Asynchronous Client, class ''%s'' must set property ''%s'' to true.", className, IS_ASYNC)); } if (className.endsWith(CLIENT) && !className.endsWith(ASYNC_CLIENT) && isAsync) { log(classDefToken, String.format("Synchronous Client, class ''%s'' must set property''%s'' to false or without the property.", className, IS_ASYNC)); } }
if (className.endsWith(CLIENT) && !className.endsWith(ASYNC_CLIENT) && isAsync) {
private void checkServiceClientNaming(DetailAST classDefToken) { final String className = classDefToken.findFirstToken(TokenTypes.IDENT).getText(); if (isAsync && !className.endsWith(ASYNC_CLIENT)) { log(classDefToken, String.format("Async class ''%s'' must be named <ServiceName>AsyncClient ", className)); } if (!isAsync && !className.endsWith(CLIENT)) { log(classDefToken, String.format("Sync class %s must be named <ServiceName>Client.", className)); } }
class name of Service Client. It should be named <ServiceName>AsyncClient or <ServiceName>Client. * * @param classDefToken the CLASS_DEF AST node */
class name of Service Client. It should be named <ServiceName>AsyncClient or <ServiceName>Client. * * @param classDefToken the CLASS_DEF AST node */
If the class does not have `@ServiceClient` annotation, is it possible to terminate this custom checkstyle evaluation i.e. no more calls to `visitToken()`? If that's possible, you could simplify the other switch cases by not having to check `if (!hasServiceClientAnnotation)`
public void visitToken(DetailAST token) { switch (token.getType()) { case TokenTypes.IMPORT: addImportedClassPath(token); break; case TokenTypes.CLASS_DEF: hasServiceClientAnnotation = hasServiceClientAnnotation(token); if (!hasServiceClientAnnotation) { return; } checkServiceClientNaming(token); break; case TokenTypes.CTOR_DEF: if (!hasServiceClientAnnotation) { return; } checkConstructor(token); break; case TokenTypes.METHOD_DEF: if (!hasServiceClientAnnotation) { return; } checkMethodNameBuilder(token); checkMethodNamingPattern(token); break; case TokenTypes.OBJBLOCK: if (!hasServiceClientAnnotation) { return; } checkClassField(token); break; default: break; } }
if (!hasServiceClientAnnotation) {
public void visitToken(DetailAST token) { if (isImplPackage) { return; } switch (token.getType()) { case TokenTypes.PACKAGE_DEF: String packageName = FullIdent.createFullIdent(token.findFirstToken(TokenTypes.DOT)).getText(); isImplPackage = packageName.contains(".implementation"); break; case TokenTypes.CLASS_DEF: hasServiceClientAnnotation = hasServiceClientAnnotation(token); if (hasServiceClientAnnotation) { checkServiceClientNaming(token); } break; case TokenTypes.CTOR_DEF: if (hasServiceClientAnnotation) { checkConstructor(token); } break; case TokenTypes.METHOD_DEF: if (hasServiceClientAnnotation) { checkMethodName(token); } break; case TokenTypes.OBJBLOCK: if (hasServiceClientAnnotation) { checkClassField(token); } break; default: break; } }
class if it returns a ''sync'' single value."; private static final Set<String> COMMON_NAMING_PREFIX_SET = Collections.unmodifiableSet(new HashSet<>(Arrays.asList( "upsert", "set", "create", "update", "replace", "delete", "add", "get", "list" ))); private static boolean isAsync; private static boolean hasServiceClientAnnotation; private final Map<String, String> simpleClassNameToQualifiedNameMap = new HashMap<>(); @Override public int[] getDefaultTokens() { return getRequiredTokens(); }
class ServiceClientInstantiationCheck extends AbstractCheck { private static final String SERVICE_CLIENT = "ServiceClient"; private static final String BUILDER = "builder"; private static final String ASYNC_CLIENT = "AsyncClient"; private static final String CLIENT = "Client"; private static final String IS_ASYNC = "isAsync"; private static boolean hasServiceClientAnnotation; private static boolean isAsync; private static boolean isImplPackage; @Override public int[] getDefaultTokens() { return getRequiredTokens(); } @Override public int[] getAcceptableTokens() { return getRequiredTokens(); } @Override public int[] getRequiredTokens() { return new int[] { TokenTypes.PACKAGE_DEF, TokenTypes.CLASS_DEF, TokenTypes.CTOR_DEF, TokenTypes.METHOD_DEF, TokenTypes.OBJBLOCK }; } @Override public void beginTree(DetailAST root) { hasServiceClientAnnotation = false; isAsync = false; isImplPackage = false; } @Override /** * Checks if the class is annotated with annotation @ServiceClient. A class could have multiple annotations. * * @param classDefToken the CLASS_DEF AST node * @return true if the class is annotated with @ServiceClient, false otherwise. */ private boolean hasServiceClientAnnotation(DetailAST classDefToken) { final DetailAST modifiersToken = classDefToken.findFirstToken(TokenTypes.MODIFIERS); for (DetailAST ast = modifiersToken.getFirstChild(); ast != null; ast = ast.getNextSibling()) { if (ast.getType() != TokenTypes.ANNOTATION) { continue; } final DetailAST annotationIdent = ast.findFirstToken(TokenTypes.IDENT); if (annotationIdent != null && SERVICE_CLIENT.equals(annotationIdent.getText())) { isAsync = isAsyncServiceClient(ast); return true; } } return false; } /** * Checks for public or protected constructor for the service client class. * Log error if the service client has public or protected constructor. * * @param ctorToken the CTOR_DEF AST node */ private void checkConstructor(DetailAST ctorToken) { final DetailAST modifiersToken = ctorToken.findFirstToken(TokenTypes.MODIFIERS); final AccessModifier accessModifier = CheckUtil.getAccessModifierFromModifiersToken(modifiersToken); if (accessModifier.equals(AccessModifier.PUBLIC) || accessModifier.equals(AccessModifier.PROTECTED)) { log(modifiersToken, "@ServiceClient class should not have any public or protected constructor."); } } /** * Checks for public static method named 'builder'. Should avoid to use method name, 'builder'. * * @param methodDefToken the METHOD_DEF AST node */ private void checkMethodName(DetailAST methodDefToken) { final DetailAST methodNameToken = methodDefToken.findFirstToken(TokenTypes.IDENT); if (!BUILDER.equals(methodNameToken.getText())) { return; } final DetailAST modifiersToken = methodDefToken.findFirstToken(TokenTypes.MODIFIERS); final AccessModifier accessModifier = CheckUtil.getAccessModifierFromModifiersToken(modifiersToken); if (accessModifier.equals(AccessModifier.PUBLIC) && modifiersToken.branchContains(TokenTypes.LITERAL_STATIC)) { log(modifiersToken, "@ServiceClient class should not have a public static method named ''builder''."); } } /** * Checks that the field variables in the @ServiceClient are final. ServiceClients should be immutable. * * @param objBlockToken the OBJBLOCK AST node */ private void checkClassField(DetailAST objBlockToken) { for (DetailAST ast = objBlockToken.getFirstChild(); ast != null; ast = ast.getNextSibling()) { if (TokenTypes.VARIABLE_DEF != ast.getType()) { continue; } final DetailAST modifiersToken = ast.findFirstToken(TokenTypes.MODIFIERS); if (!modifiersToken.branchContains(TokenTypes.FINAL)) { log(modifiersToken, String.format("The variable field ''%s'' of class ''%s'' should be final. Classes annotated with @ServiceClient are supposed to be immutable.", ast.findFirstToken(TokenTypes.IDENT).getText(), objBlockToken.getPreviousSibling().getText())); } } } /** * Checks for the class name of Service Client. It should be named <ServiceName>AsyncClient or <ServiceName>Client. * * @param classDefToken the CLASS_DEF AST node */ private void checkServiceClientNaming(DetailAST classDefToken) { final String className = classDefToken.findFirstToken(TokenTypes.IDENT).getText(); if (isAsync && !className.endsWith(ASYNC_CLIENT)) { log(classDefToken, String.format("Async class ''%s'' must be named <ServiceName>AsyncClient ", className)); } if (!isAsync && !className.endsWith(CLIENT)) { log(classDefToken, String.format("Sync class %s must be named <ServiceName>Client.", className)); } } /** * A function checks if the annotation node has a member key is {@code IS_ASYNC} with value equals to 'true'. * If the value equals 'true', which indicates the @ServiceClient is an asynchronous client. * If the member pair is missing. By default, it is a synchronous service client. * * @param annotationToken the ANNOTATION AST node * @return true if the annotation has {@code IS_ASYNC} value 'true', otherwise, false. */ private boolean isAsyncServiceClient(DetailAST annotationToken) { for (DetailAST ast = annotationToken.getFirstChild(); ast != null; ast = ast.getNextSibling()) { if (ast.getType() != TokenTypes.ANNOTATION_MEMBER_VALUE_PAIR) { continue; } final DetailAST identToken = ast.findFirstToken(TokenTypes.IDENT); if (identToken == null) { continue; } if (!IS_ASYNC.equals(identToken.getText())) { continue; } final DetailAST exprToken = ast.findFirstToken(TokenTypes.EXPR); if (exprToken == null) { continue; } return exprToken.branchContains(TokenTypes.LITERAL_TRUE); } return false; } }
I found no way to do an earlier termination of tree traversal,
public void visitToken(DetailAST token) { switch (token.getType()) { case TokenTypes.IMPORT: addImportedClassPath(token); break; case TokenTypes.CLASS_DEF: hasServiceClientAnnotation = hasServiceClientAnnotation(token); if (!hasServiceClientAnnotation) { return; } checkServiceClientNaming(token); break; case TokenTypes.CTOR_DEF: if (!hasServiceClientAnnotation) { return; } checkConstructor(token); break; case TokenTypes.METHOD_DEF: if (!hasServiceClientAnnotation) { return; } checkMethodNameBuilder(token); checkMethodNamingPattern(token); break; case TokenTypes.OBJBLOCK: if (!hasServiceClientAnnotation) { return; } checkClassField(token); break; default: break; } }
if (!hasServiceClientAnnotation) {
public void visitToken(DetailAST token) { if (isImplPackage) { return; } switch (token.getType()) { case TokenTypes.PACKAGE_DEF: String packageName = FullIdent.createFullIdent(token.findFirstToken(TokenTypes.DOT)).getText(); isImplPackage = packageName.contains(".implementation"); break; case TokenTypes.CLASS_DEF: hasServiceClientAnnotation = hasServiceClientAnnotation(token); if (hasServiceClientAnnotation) { checkServiceClientNaming(token); } break; case TokenTypes.CTOR_DEF: if (hasServiceClientAnnotation) { checkConstructor(token); } break; case TokenTypes.METHOD_DEF: if (hasServiceClientAnnotation) { checkMethodName(token); } break; case TokenTypes.OBJBLOCK: if (hasServiceClientAnnotation) { checkClassField(token); } break; default: break; } }
class if it returns a ''sync'' single value."; private static final Set<String> COMMON_NAMING_PREFIX_SET = Collections.unmodifiableSet(new HashSet<>(Arrays.asList( "upsert", "set", "create", "update", "replace", "delete", "add", "get", "list" ))); private static boolean isAsync; private static boolean hasServiceClientAnnotation; private final Map<String, String> simpleClassNameToQualifiedNameMap = new HashMap<>(); @Override public int[] getDefaultTokens() { return getRequiredTokens(); }
class ServiceClientInstantiationCheck extends AbstractCheck { private static final String SERVICE_CLIENT = "ServiceClient"; private static final String BUILDER = "builder"; private static final String ASYNC_CLIENT = "AsyncClient"; private static final String CLIENT = "Client"; private static final String IS_ASYNC = "isAsync"; private static boolean hasServiceClientAnnotation; private static boolean isAsync; private static boolean isImplPackage; @Override public int[] getDefaultTokens() { return getRequiredTokens(); } @Override public int[] getAcceptableTokens() { return getRequiredTokens(); } @Override public int[] getRequiredTokens() { return new int[] { TokenTypes.PACKAGE_DEF, TokenTypes.CLASS_DEF, TokenTypes.CTOR_DEF, TokenTypes.METHOD_DEF, TokenTypes.OBJBLOCK }; } @Override public void beginTree(DetailAST root) { hasServiceClientAnnotation = false; isAsync = false; isImplPackage = false; } @Override /** * Checks if the class is annotated with annotation @ServiceClient. A class could have multiple annotations. * * @param classDefToken the CLASS_DEF AST node * @return true if the class is annotated with @ServiceClient, false otherwise. */ private boolean hasServiceClientAnnotation(DetailAST classDefToken) { final DetailAST modifiersToken = classDefToken.findFirstToken(TokenTypes.MODIFIERS); for (DetailAST ast = modifiersToken.getFirstChild(); ast != null; ast = ast.getNextSibling()) { if (ast.getType() != TokenTypes.ANNOTATION) { continue; } final DetailAST annotationIdent = ast.findFirstToken(TokenTypes.IDENT); if (annotationIdent != null && SERVICE_CLIENT.equals(annotationIdent.getText())) { isAsync = isAsyncServiceClient(ast); return true; } } return false; } /** * Checks for public or protected constructor for the service client class. * Log error if the service client has public or protected constructor. * * @param ctorToken the CTOR_DEF AST node */ private void checkConstructor(DetailAST ctorToken) { final DetailAST modifiersToken = ctorToken.findFirstToken(TokenTypes.MODIFIERS); final AccessModifier accessModifier = CheckUtil.getAccessModifierFromModifiersToken(modifiersToken); if (accessModifier.equals(AccessModifier.PUBLIC) || accessModifier.equals(AccessModifier.PROTECTED)) { log(modifiersToken, "@ServiceClient class should not have any public or protected constructor."); } } /** * Checks for public static method named 'builder'. Should avoid to use method name, 'builder'. * * @param methodDefToken the METHOD_DEF AST node */ private void checkMethodName(DetailAST methodDefToken) { final DetailAST methodNameToken = methodDefToken.findFirstToken(TokenTypes.IDENT); if (!BUILDER.equals(methodNameToken.getText())) { return; } final DetailAST modifiersToken = methodDefToken.findFirstToken(TokenTypes.MODIFIERS); final AccessModifier accessModifier = CheckUtil.getAccessModifierFromModifiersToken(modifiersToken); if (accessModifier.equals(AccessModifier.PUBLIC) && modifiersToken.branchContains(TokenTypes.LITERAL_STATIC)) { log(modifiersToken, "@ServiceClient class should not have a public static method named ''builder''."); } } /** * Checks that the field variables in the @ServiceClient are final. ServiceClients should be immutable. * * @param objBlockToken the OBJBLOCK AST node */ private void checkClassField(DetailAST objBlockToken) { for (DetailAST ast = objBlockToken.getFirstChild(); ast != null; ast = ast.getNextSibling()) { if (TokenTypes.VARIABLE_DEF != ast.getType()) { continue; } final DetailAST modifiersToken = ast.findFirstToken(TokenTypes.MODIFIERS); if (!modifiersToken.branchContains(TokenTypes.FINAL)) { log(modifiersToken, String.format("The variable field ''%s'' of class ''%s'' should be final. Classes annotated with @ServiceClient are supposed to be immutable.", ast.findFirstToken(TokenTypes.IDENT).getText(), objBlockToken.getPreviousSibling().getText())); } } } /** * Checks for the class name of Service Client. It should be named <ServiceName>AsyncClient or <ServiceName>Client. * * @param classDefToken the CLASS_DEF AST node */ private void checkServiceClientNaming(DetailAST classDefToken) { final String className = classDefToken.findFirstToken(TokenTypes.IDENT).getText(); if (isAsync && !className.endsWith(ASYNC_CLIENT)) { log(classDefToken, String.format("Async class ''%s'' must be named <ServiceName>AsyncClient ", className)); } if (!isAsync && !className.endsWith(CLIENT)) { log(classDefToken, String.format("Sync class %s must be named <ServiceName>Client.", className)); } } /** * A function checks if the annotation node has a member key is {@code IS_ASYNC} with value equals to 'true'. * If the value equals 'true', which indicates the @ServiceClient is an asynchronous client. * If the member pair is missing. By default, it is a synchronous service client. * * @param annotationToken the ANNOTATION AST node * @return true if the annotation has {@code IS_ASYNC} value 'true', otherwise, false. */ private boolean isAsyncServiceClient(DetailAST annotationToken) { for (DetailAST ast = annotationToken.getFirstChild(); ast != null; ast = ast.getNextSibling()) { if (ast.getType() != TokenTypes.ANNOTATION_MEMBER_VALUE_PAIR) { continue; } final DetailAST identToken = ast.findFirstToken(TokenTypes.IDENT); if (identToken == null) { continue; } if (!IS_ASYNC.equals(identToken.getText())) { continue; } final DetailAST exprToken = ast.findFirstToken(TokenTypes.EXPR); if (exprToken == null) { continue; } return exprToken.branchContains(TokenTypes.LITERAL_TRUE); } return false; } }
What's the reason of removing the null checking?
public ConfigurationClientBuilder httpClient(HttpClient client) { this.httpClient = client; return this; }
this.httpClient = client;
public ConfigurationClientBuilder httpClient(HttpClient client) { if (this.httpClient != null && client == null) { logger.info("HttpClient is being set to 'null' when it was previously configured."); } this.httpClient = client; return this; }
class ConfigurationClientBuilder { private static final String ECHO_REQUEST_ID_HEADER = "x-ms-return-client-request-id"; private static final String CONTENT_TYPE_HEADER = "Content-Type"; private static final String CONTENT_TYPE_HEADER_VALUE = "application/json"; private static final String ACCEPT_HEADER = "Accept"; private static final String ACCEPT_HEADER_VALUE = "application/vnd.microsoft.azconfig.kv+json"; private final ClientLogger logger = new ClientLogger(ConfigurationClientBuilder.class); private final List<HttpPipelinePolicy> policies; private final HttpHeaders headers; private ConfigurationClientCredentials credential; private URL endpoint; private HttpClient httpClient; private HttpLogDetailLevel httpLogDetailLevel; private HttpPipeline pipeline; private RetryPolicy retryPolicy; private Configuration configuration; /** * The constructor with defaults. */ public ConfigurationClientBuilder() { retryPolicy = new RetryPolicy(); httpLogDetailLevel = HttpLogDetailLevel.NONE; policies = new ArrayList<>(); headers = new HttpHeaders() .put(ECHO_REQUEST_ID_HEADER, "true") .put(CONTENT_TYPE_HEADER, CONTENT_TYPE_HEADER_VALUE) .put(ACCEPT_HEADER, ACCEPT_HEADER_VALUE); } /** * Creates a {@link ConfigurationClient} based on options set in the Builder. Every time {@code buildClient()} is * called a new instance of {@link ConfigurationClient} is created. * * <p> * If {@link * {@link * settings are ignored.</p> * * @return A ConfigurationClient with the options set from the builder. * @throws NullPointerException If {@code endpoint} has not been set. This setting is automatically set when * {@link * explicitly by calling {@link * @throws IllegalStateException If {@link */ public ConfigurationClient buildClient() { return new ConfigurationClient(buildAsyncClient()); } /** * Creates a {@link ConfigurationAsyncClient} based on options set in the Builder. Every time * {@code buildAsyncClient()} is called a new instance of {@link ConfigurationAsyncClient} is created. * * <p> * If {@link * {@link * builder settings are ignored. * </p> * * @return A ConfigurationAsyncClient with the options set from the builder. * @throws NullPointerException If {@code endpoint} has not been set. This setting is automatically set when * {@link * explicitly by calling {@link * @throws IllegalStateException If {@link */ public ConfigurationAsyncClient buildAsyncClient() { Configuration buildConfiguration = (configuration == null) ? ConfigurationManager.getConfiguration().clone() : configuration; ConfigurationClientCredentials configurationCredentials = getConfigurationCredentials(buildConfiguration); URL buildEndpoint = getBuildEndpoint(configurationCredentials); Objects.requireNonNull(buildEndpoint); if (pipeline != null) { return new ConfigurationAsyncClient(buildEndpoint, pipeline); } ConfigurationClientCredentials buildCredential = (credential == null) ? configurationCredentials : credential; if (buildCredential == null) { logger.logAndThrow(new IllegalStateException("'credential' is required.")); } final List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new UserAgentPolicy(AzureConfiguration.NAME, AzureConfiguration.VERSION, buildConfiguration)); policies.add(new RequestIdPolicy()); policies.add(new AddHeadersPolicy(headers)); policies.add(new AddDatePolicy()); policies.add(new ConfigurationCredentialsPolicy(buildCredential)); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(retryPolicy); policies.addAll(this.policies); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogDetailLevel)); HttpPipeline pipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); return new ConfigurationAsyncClient(buildEndpoint, pipeline); } /** * Sets the service endpoint for the Azure App Configuration instance. * * @param endpoint The URL of the Azure App Configuration instance to send {@link ConfigurationSetting} * service requests to and receive responses from. * @return The updated ConfigurationClientBuilder object. * @throws IllegalArgumentException if {@code endpoint} is null or it cannot be parsed into a valid URL. */ public ConfigurationClientBuilder endpoint(String endpoint) { try { this.endpoint = new URL(endpoint); } catch (MalformedURLException ex) { logger.logAndThrow(new IllegalArgumentException("'endpoint' must be a valid URL")); } return this; } /** * Sets the credential to use when authenticating HTTP requests. Also, sets the {@link * for this ConfigurationClientBuilder. * * @param credential The credential to use for authenticating HTTP requests. * @return The updated ConfigurationClientBuilder object. * @throws NullPointerException If {@code credential} is {@code null}. */ public ConfigurationClientBuilder credential(ConfigurationClientCredentials credential) { this.credential = Objects.requireNonNull(credential); this.endpoint = credential.baseUri(); return this; } /** * Sets the logging level for HTTP requests and responses. * * @param logLevel The amount of logging output when sending and receiving HTTP requests/responses. * @return The updated ConfigurationClientBuilder object. * @throws NullPointerException If {@code logLevel} is {@code null}. */ public ConfigurationClientBuilder httpLogDetailLevel(HttpLogDetailLevel logLevel) { httpLogDetailLevel = Objects.requireNonNull(logLevel); return this; } /** * Adds a policy to the set of existing policies that are executed after required policies. * * @param policy The retry policy for service requests. * @return The updated ConfigurationClientBuilder object. * @throws NullPointerException If {@code policy} is {@code null}. */ public ConfigurationClientBuilder addPolicy(HttpPipelinePolicy policy) { Objects.requireNonNull(policy); policies.add(policy); return this; } /** * Sets the HTTP client to use for sending and receiving requests to and from the service. * * @param client The HTTP client to use for requests. * @return The updated ConfigurationClientBuilder object. */ /** * Sets the HTTP pipeline to use for the service client. * * If {@code pipeline} is set, all other settings are ignored, aside from * {@link ConfigurationClientBuilder * * @param pipeline The HTTP pipeline to use for sending service requests and receiving responses. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder pipeline(HttpPipeline pipeline) { this.pipeline = pipeline; return this; } /** * Sets the configuration store that is used during construction of the service client. * * The default configuration store is a clone of the {@link ConfigurationManager * configuration store}, use {@link Configuration * * @param configuration The configuration store used to * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } private ConfigurationClientCredentials getConfigurationCredentials(Configuration configuration) { String connectionString = configuration.get("AZURE_APPCONFIG_CONNECTION_STRING"); if (ImplUtils.isNullOrEmpty(connectionString)) { return credential; } try { return new ConfigurationClientCredentials(connectionString); } catch (InvalidKeyException | NoSuchAlgorithmException ex) { return null; } } private URL getBuildEndpoint(ConfigurationClientCredentials buildCredentials) { if (endpoint != null) { return endpoint; } else if (buildCredentials != null) { return buildCredentials.baseUri(); } else { return null; } } }
class ConfigurationClientBuilder { private static final String ECHO_REQUEST_ID_HEADER = "x-ms-return-client-request-id"; private static final String CONTENT_TYPE_HEADER = "Content-Type"; private static final String CONTENT_TYPE_HEADER_VALUE = "application/json"; private static final String ACCEPT_HEADER = "Accept"; private static final String ACCEPT_HEADER_VALUE = "application/vnd.microsoft.azconfig.kv+json"; private final ClientLogger logger = new ClientLogger(ConfigurationClientBuilder.class); private final List<HttpPipelinePolicy> policies; private final HttpHeaders headers; private ConfigurationClientCredentials credential; private URL endpoint; private HttpClient httpClient; private HttpLogDetailLevel httpLogDetailLevel; private HttpPipeline pipeline; private RetryPolicy retryPolicy; private Configuration configuration; /** * The constructor with defaults. */ public ConfigurationClientBuilder() { retryPolicy = new RetryPolicy(); httpLogDetailLevel = HttpLogDetailLevel.NONE; policies = new ArrayList<>(); headers = new HttpHeaders() .put(ECHO_REQUEST_ID_HEADER, "true") .put(CONTENT_TYPE_HEADER, CONTENT_TYPE_HEADER_VALUE) .put(ACCEPT_HEADER, ACCEPT_HEADER_VALUE); } /** * Creates a {@link ConfigurationClient} based on options set in the Builder. Every time {@code buildClient()} is * called a new instance of {@link ConfigurationClient} is created. * * <p> * If {@link * {@link * settings are ignored.</p> * * @return A ConfigurationClient with the options set from the builder. * @throws NullPointerException If {@code endpoint} has not been set. This setting is automatically set when * {@link * explicitly by calling {@link * @throws IllegalStateException If {@link */ public ConfigurationClient buildClient() { return new ConfigurationClient(buildAsyncClient()); } /** * Creates a {@link ConfigurationAsyncClient} based on options set in the Builder. Every time * {@code buildAsyncClient()} is called a new instance of {@link ConfigurationAsyncClient} is created. * * <p> * If {@link * {@link * builder settings are ignored. * </p> * * @return A ConfigurationAsyncClient with the options set from the builder. * @throws NullPointerException If {@code endpoint} has not been set. This setting is automatically set when * {@link * explicitly by calling {@link * @throws IllegalStateException If {@link */ public ConfigurationAsyncClient buildAsyncClient() { Configuration buildConfiguration = (configuration == null) ? ConfigurationManager.getConfiguration().clone() : configuration; ConfigurationClientCredentials configurationCredentials = getConfigurationCredentials(buildConfiguration); URL buildEndpoint = getBuildEndpoint(configurationCredentials); Objects.requireNonNull(buildEndpoint); if (pipeline != null) { return new ConfigurationAsyncClient(buildEndpoint, pipeline); } ConfigurationClientCredentials buildCredential = (credential == null) ? configurationCredentials : credential; if (buildCredential == null) { logger.logAndThrow(new IllegalStateException("'credential' is required.")); } final List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new UserAgentPolicy(AzureConfiguration.NAME, AzureConfiguration.VERSION, buildConfiguration)); policies.add(new RequestIdPolicy()); policies.add(new AddHeadersPolicy(headers)); policies.add(new AddDatePolicy()); policies.add(new ConfigurationCredentialsPolicy(buildCredential)); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(retryPolicy); policies.addAll(this.policies); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogDetailLevel)); HttpPipeline pipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); return new ConfigurationAsyncClient(buildEndpoint, pipeline); } /** * Sets the service endpoint for the Azure App Configuration instance. * * @param endpoint The URL of the Azure App Configuration instance to send {@link ConfigurationSetting} * service requests to and receive responses from. * @return The updated ConfigurationClientBuilder object. * @throws IllegalArgumentException if {@code endpoint} is null or it cannot be parsed into a valid URL. */ public ConfigurationClientBuilder endpoint(String endpoint) { try { this.endpoint = new URL(endpoint); } catch (MalformedURLException ex) { logger.logAndThrow(new IllegalArgumentException("'endpoint' must be a valid URL")); } return this; } /** * Sets the credential to use when authenticating HTTP requests. Also, sets the {@link * for this ConfigurationClientBuilder. * * @param credential The credential to use for authenticating HTTP requests. * @return The updated ConfigurationClientBuilder object. * @throws NullPointerException If {@code credential} is {@code null}. */ public ConfigurationClientBuilder credential(ConfigurationClientCredentials credential) { this.credential = Objects.requireNonNull(credential); this.endpoint = credential.baseUri(); return this; } /** * Sets the logging level for HTTP requests and responses. * * @param logLevel The amount of logging output when sending and receiving HTTP requests/responses. * @return The updated ConfigurationClientBuilder object. * @throws NullPointerException If {@code logLevel} is {@code null}. */ public ConfigurationClientBuilder httpLogDetailLevel(HttpLogDetailLevel logLevel) { httpLogDetailLevel = Objects.requireNonNull(logLevel); return this; } /** * Adds a policy to the set of existing policies that are executed after required policies. * * @param policy The retry policy for service requests. * @return The updated ConfigurationClientBuilder object. * @throws NullPointerException If {@code policy} is {@code null}. */ public ConfigurationClientBuilder addPolicy(HttpPipelinePolicy policy) { Objects.requireNonNull(policy); policies.add(policy); return this; } /** * Sets the HTTP client to use for sending and receiving requests to and from the service. * * @param client The HTTP client to use for requests. * @return The updated ConfigurationClientBuilder object. */ /** * Sets the HTTP pipeline to use for the service client. * * If {@code pipeline} is set, all other settings are ignored, aside from * {@link ConfigurationClientBuilder * * @param pipeline The HTTP pipeline to use for sending service requests and receiving responses. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder pipeline(HttpPipeline pipeline) { if (this.pipeline != null && pipeline == null) { logger.info("HttpPipeline is being set to 'null' when it was previously configured."); } this.pipeline = pipeline; return this; } /** * Sets the configuration store that is used during construction of the service client. * * The default configuration store is a clone of the {@link ConfigurationManager * configuration store}, use {@link Configuration * * @param configuration The configuration store used to * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } private ConfigurationClientCredentials getConfigurationCredentials(Configuration configuration) { String connectionString = configuration.get("AZURE_APPCONFIG_CONNECTION_STRING"); if (ImplUtils.isNullOrEmpty(connectionString)) { return credential; } try { return new ConfigurationClientCredentials(connectionString); } catch (InvalidKeyException | NoSuchAlgorithmException ex) { return null; } } private URL getBuildEndpoint(ConfigurationClientCredentials buildCredentials) { if (endpoint != null) { return endpoint; } else if (buildCredentials != null) { return buildCredentials.baseUri(); } else { return null; } } }
Instead of making the traversal twice, assign to a local variable. `parameterToken.findFirstToken(TokenTypes.TYPE).findFirstToken(TokenTypes.IDENT)`
private void checkContextInRightPlace(DetailAST methodDefToken) { final DetailAST parametersToken = methodDefToken.findFirstToken(TokenTypes.PARAMETERS); final String returnType = getReturnType(methodDefToken.findFirstToken(TokenTypes.TYPE), new StringBuilder()).toString(); final boolean containsTypeParameter = TokenUtil.findFirstTokenByPredicate(parametersToken, parameterToken -> parameterToken.getType() == TokenTypes.PARAMETER_DEF && parameterToken.findFirstToken(TokenTypes.TYPE).findFirstToken(TokenTypes.IDENT) != null && parameterToken.findFirstToken(TokenTypes.TYPE).findFirstToken(TokenTypes.IDENT) .getText().equals(CONTEXT)) .isPresent(); if (containsTypeParameter) { if (returnType.startsWith(MONO_BRACKET) || returnType.startsWith(PAGED_FLUX_BRACKET)) { log(methodDefToken, String.format(ASYNC_CONTEXT_ERROR, CONTEXT)); } } else { if (returnType.startsWith(RESPONSE_BRACKET) || returnType.startsWith(PAGED_ITERABLE_BRACKET)) { log(methodDefToken, String.format(SYNC_CONTEXT_ERROR, CONTEXT)); } } }
&& parameterToken.findFirstToken(TokenTypes.TYPE).findFirstToken(TokenTypes.IDENT) != null
private void checkContextInRightPlace(DetailAST methodDefToken) { final DetailAST parametersToken = methodDefToken.findFirstToken(TokenTypes.PARAMETERS); final String returnType = getReturnType(methodDefToken.findFirstToken(TokenTypes.TYPE), new StringBuilder()).toString(); final boolean containsContextParameter = TokenUtil.findFirstTokenByPredicate(parametersToken, parameterToken -> { if (parameterToken.getType() != TokenTypes.PARAMETER_DEF) { return false; } final DetailAST paramTypeIdentToken = parameterToken.findFirstToken(TokenTypes.TYPE).findFirstToken(TokenTypes.IDENT); return paramTypeIdentToken != null && CONTEXT.equals(paramTypeIdentToken.getText()); }) .isPresent(); if (containsContextParameter) { if (returnType.startsWith(MONO_BRACKET) || returnType.startsWith(PAGED_FLUX_BRACKET)) { log(methodDefToken, String.format(ASYNC_CONTEXT_ERROR, CONTEXT)); } } else { if (returnType.startsWith(RESPONSE_BRACKET)) { log(methodDefToken, String.format(SYNC_CONTEXT_ERROR, CONTEXT)); } } }
class annotated with @ServiceClient should * follow below rules: * 1) Follows method naming pattern. Refer to Java Spec. * 2) Methods should not have "Async" added to the method name. * 3) The return type of async and sync clients should be as per guidelines: * 3.1) The return type for async collection should be of type? extends PagedFlux. * 3.2) The return type for async single value should be of type? extends Mono. * 3.3) The return type for sync collection should be of type? extends PagedIterable. * 3.4) The return type for sync single value should be of type? extends Response. * 4) Naming pattern for 'WithResponse'. * 5) Synchronous method with annotation @ServiceMethod has to have {@code Context}
class annotated with @ServiceClient should * follow below rules: * 1) Follows method naming pattern. Refer to Java Spec. * 2) Methods should not have "Async" added to the method name. * 3) The return type of async and sync clients should be as per guidelines: * 3.1) The return type for async collection should be of type? extends PagedFlux. * 3.2) The return type for async single value should be of type? extends Mono. * 3.3) The return type for sync collection should be of type? extends PagedIterable. * 3.4) The return type for sync single value should be of type? extends Response. * 4) Naming pattern for 'WithResponse'. * 5) Synchronous method with annotation @ServiceMethod has to have {@code Context}
`containsTypeParameter` is a misleading name. I spent a minute trying to understand if the code was wrong because it wasn't looking for a type parameter. You're looking to see if it contains a parameter of type Context. `containsContextParameter` is better.
private void checkContextInRightPlace(DetailAST methodDefToken) { final DetailAST parametersToken = methodDefToken.findFirstToken(TokenTypes.PARAMETERS); final String returnType = getReturnType(methodDefToken.findFirstToken(TokenTypes.TYPE), new StringBuilder()).toString(); final boolean containsTypeParameter = TokenUtil.findFirstTokenByPredicate(parametersToken, parameterToken -> parameterToken.getType() == TokenTypes.PARAMETER_DEF && parameterToken.findFirstToken(TokenTypes.TYPE).findFirstToken(TokenTypes.IDENT) != null && parameterToken.findFirstToken(TokenTypes.TYPE).findFirstToken(TokenTypes.IDENT) .getText().equals(CONTEXT)) .isPresent(); if (containsTypeParameter) { if (returnType.startsWith(MONO_BRACKET) || returnType.startsWith(PAGED_FLUX_BRACKET)) { log(methodDefToken, String.format(ASYNC_CONTEXT_ERROR, CONTEXT)); } } else { if (returnType.startsWith(RESPONSE_BRACKET) || returnType.startsWith(PAGED_ITERABLE_BRACKET)) { log(methodDefToken, String.format(SYNC_CONTEXT_ERROR, CONTEXT)); } } }
final boolean containsTypeParameter = TokenUtil.findFirstTokenByPredicate(parametersToken,
private void checkContextInRightPlace(DetailAST methodDefToken) { final DetailAST parametersToken = methodDefToken.findFirstToken(TokenTypes.PARAMETERS); final String returnType = getReturnType(methodDefToken.findFirstToken(TokenTypes.TYPE), new StringBuilder()).toString(); final boolean containsContextParameter = TokenUtil.findFirstTokenByPredicate(parametersToken, parameterToken -> { if (parameterToken.getType() != TokenTypes.PARAMETER_DEF) { return false; } final DetailAST paramTypeIdentToken = parameterToken.findFirstToken(TokenTypes.TYPE).findFirstToken(TokenTypes.IDENT); return paramTypeIdentToken != null && CONTEXT.equals(paramTypeIdentToken.getText()); }) .isPresent(); if (containsContextParameter) { if (returnType.startsWith(MONO_BRACKET) || returnType.startsWith(PAGED_FLUX_BRACKET)) { log(methodDefToken, String.format(ASYNC_CONTEXT_ERROR, CONTEXT)); } } else { if (returnType.startsWith(RESPONSE_BRACKET)) { log(methodDefToken, String.format(SYNC_CONTEXT_ERROR, CONTEXT)); } } }
class annotated with @ServiceClient should * follow below rules: * 1) Follows method naming pattern. Refer to Java Spec. * 2) Methods should not have "Async" added to the method name. * 3) The return type of async and sync clients should be as per guidelines: * 3.1) The return type for async collection should be of type? extends PagedFlux. * 3.2) The return type for async single value should be of type? extends Mono. * 3.3) The return type for sync collection should be of type? extends PagedIterable. * 3.4) The return type for sync single value should be of type? extends Response. * 4) Naming pattern for 'WithResponse'. * 5) Synchronous method with annotation @ServiceMethod has to have {@code Context}
class annotated with @ServiceClient should * follow below rules: * 1) Follows method naming pattern. Refer to Java Spec. * 2) Methods should not have "Async" added to the method name. * 3) The return type of async and sync clients should be as per guidelines: * 3.1) The return type for async collection should be of type? extends PagedFlux. * 3.2) The return type for async single value should be of type? extends Mono. * 3.3) The return type for sync collection should be of type? extends PagedIterable. * 3.4) The return type for sync single value should be of type? extends Response. * 4) Naming pattern for 'WithResponse'. * 5) Synchronous method with annotation @ServiceMethod has to have {@code Context}
It's nice to have expression lambdas, but using `node.findFirstToken(TokenTypes.IDENT)` results in evaluating the same node twice. Assign to a local variable.
private boolean hasServiceClientAnnotation(DetailAST classDefToken) { final DetailAST modifiersToken = classDefToken.findFirstToken(TokenTypes.MODIFIERS); final Optional<DetailAST> serviceClientAnnotationOption = TokenUtil.findFirstTokenByPredicate(modifiersToken, node -> node.getType() == TokenTypes.ANNOTATION && node.findFirstToken(TokenTypes.IDENT) != null && SERVICE_CLIENT.equals(node.findFirstToken(TokenTypes.IDENT).getText())); if (serviceClientAnnotationOption.isPresent()) { isAsync = isAsyncServiceClient(serviceClientAnnotationOption.get()); return true; } return false; }
node -> node.getType() == TokenTypes.ANNOTATION && node.findFirstToken(TokenTypes.IDENT) != null
private boolean hasServiceClientAnnotation(DetailAST classDefToken) { final DetailAST modifiersToken = classDefToken.findFirstToken(TokenTypes.MODIFIERS); final Optional<DetailAST> serviceClientAnnotationOption = TokenUtil.findFirstTokenByPredicate(modifiersToken, node -> { if (node.getType() != TokenTypes.ANNOTATION) { return false; } final DetailAST annotationIdentToken = node.findFirstToken(TokenTypes.IDENT); return annotationIdentToken != null && SERVICE_CLIENT.equals(annotationIdentToken.getText()); } ); if (serviceClientAnnotationOption.isPresent()) { isAsync = isAsyncServiceClient(serviceClientAnnotationOption.get()); return true; } return false; }
class is annotated with @ServiceClient, false otherwise. */
class is annotated with @ServiceClient, false otherwise. */
What's the justification of not null here? I think make a default one or silence it would be better.
public ConfigurationClientBuilder httpLogDetailLevel(HttpLogDetailLevel logLevel) { httpLogDetailLevel = Objects.requireNonNull(logLevel); return this; }
httpLogDetailLevel = Objects.requireNonNull(logLevel);
public ConfigurationClientBuilder httpLogDetailLevel(HttpLogDetailLevel logLevel) { httpLogDetailLevel = Objects.requireNonNull(logLevel); return this; }
class ConfigurationClientBuilder { private static final String ECHO_REQUEST_ID_HEADER = "x-ms-return-client-request-id"; private static final String CONTENT_TYPE_HEADER = "Content-Type"; private static final String CONTENT_TYPE_HEADER_VALUE = "application/json"; private static final String ACCEPT_HEADER = "Accept"; private static final String ACCEPT_HEADER_VALUE = "application/vnd.microsoft.azconfig.kv+json"; private final ClientLogger logger = new ClientLogger(ConfigurationClientBuilder.class); private final List<HttpPipelinePolicy> policies; private final HttpHeaders headers; private ConfigurationClientCredentials credential; private URL endpoint; private HttpClient httpClient; private HttpLogDetailLevel httpLogDetailLevel; private HttpPipeline pipeline; private RetryPolicy retryPolicy; private Configuration configuration; /** * The constructor with defaults. */ public ConfigurationClientBuilder() { retryPolicy = new RetryPolicy(); httpLogDetailLevel = HttpLogDetailLevel.NONE; policies = new ArrayList<>(); headers = new HttpHeaders() .put(ECHO_REQUEST_ID_HEADER, "true") .put(CONTENT_TYPE_HEADER, CONTENT_TYPE_HEADER_VALUE) .put(ACCEPT_HEADER, ACCEPT_HEADER_VALUE); } /** * Creates a {@link ConfigurationClient} based on options set in the Builder. Every time {@code buildClient()} is * called a new instance of {@link ConfigurationClient} is created. * * <p> * If {@link * {@link * settings are ignored.</p> * * @return A ConfigurationClient with the options set from the builder. * @throws NullPointerException If {@code endpoint} has not been set. This setting is automatically set when * {@link * explicitly by calling {@link * @throws IllegalStateException If {@link */ public ConfigurationClient buildClient() { return new ConfigurationClient(buildAsyncClient()); } /** * Creates a {@link ConfigurationAsyncClient} based on options set in the Builder. Every time * {@code buildAsyncClient()} is called a new instance of {@link ConfigurationAsyncClient} is created. * * <p> * If {@link * {@link * builder settings are ignored. * </p> * * @return A ConfigurationAsyncClient with the options set from the builder. * @throws NullPointerException If {@code endpoint} has not been set. This setting is automatically set when * {@link * explicitly by calling {@link * @throws IllegalStateException If {@link */ public ConfigurationAsyncClient buildAsyncClient() { Configuration buildConfiguration = (configuration == null) ? ConfigurationManager.getConfiguration().clone() : configuration; ConfigurationClientCredentials configurationCredentials = getConfigurationCredentials(buildConfiguration); URL buildEndpoint = getBuildEndpoint(configurationCredentials); Objects.requireNonNull(buildEndpoint); if (pipeline != null) { return new ConfigurationAsyncClient(buildEndpoint, pipeline); } ConfigurationClientCredentials buildCredential = (credential == null) ? configurationCredentials : credential; if (buildCredential == null) { logger.logAndThrow(new IllegalStateException("'credential' is required.")); } final List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new UserAgentPolicy(AzureConfiguration.NAME, AzureConfiguration.VERSION, buildConfiguration)); policies.add(new RequestIdPolicy()); policies.add(new AddHeadersPolicy(headers)); policies.add(new AddDatePolicy()); policies.add(new ConfigurationCredentialsPolicy(buildCredential)); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(retryPolicy); policies.addAll(this.policies); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogDetailLevel)); HttpPipeline pipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); return new ConfigurationAsyncClient(buildEndpoint, pipeline); } /** * Sets the service endpoint for the Azure App Configuration instance. * * @param endpoint The URL of the Azure App Configuration instance to send {@link ConfigurationSetting} * service requests to and receive responses from. * @return The updated ConfigurationClientBuilder object. * @throws IllegalArgumentException if {@code endpoint} is null or it cannot be parsed into a valid URL. */ public ConfigurationClientBuilder endpoint(String endpoint) { try { this.endpoint = new URL(endpoint); } catch (MalformedURLException ex) { logger.logAndThrow(new IllegalArgumentException("'endpoint' must be a valid URL")); } return this; } /** * Sets the credential to use when authenticating HTTP requests. Also, sets the {@link * for this ConfigurationClientBuilder. * * @param credential The credential to use for authenticating HTTP requests. * @return The updated ConfigurationClientBuilder object. * @throws NullPointerException If {@code credential} is {@code null}. */ public ConfigurationClientBuilder credential(ConfigurationClientCredentials credential) { this.credential = Objects.requireNonNull(credential); this.endpoint = credential.baseUri(); return this; } /** * Sets the logging level for HTTP requests and responses. * * @param logLevel The amount of logging output when sending and receiving HTTP requests/responses. * @return The updated ConfigurationClientBuilder object. * @throws NullPointerException If {@code logLevel} is {@code null}. */ /** * Adds a policy to the set of existing policies that are executed after required policies. * * @param policy The retry policy for service requests. * @return The updated ConfigurationClientBuilder object. * @throws NullPointerException If {@code policy} is {@code null}. */ public ConfigurationClientBuilder addPolicy(HttpPipelinePolicy policy) { Objects.requireNonNull(policy); policies.add(policy); return this; } /** * Sets the HTTP client to use for sending and receiving requests to and from the service. * * @param client The HTTP client to use for requests. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder httpClient(HttpClient client) { if (this.httpClient != null && client == null) { logger.info("HttpClient is being set to 'null' when it was previously configured."); } this.httpClient = client; return this; } /** * Sets the HTTP pipeline to use for the service client. * * If {@code pipeline} is set, all other settings are ignored, aside from * {@link ConfigurationClientBuilder * * @param pipeline The HTTP pipeline to use for sending service requests and receiving responses. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder pipeline(HttpPipeline pipeline) { if (this.pipeline != null && pipeline == null) { logger.info("HttpPipeline is being set to 'null' when it was previously configured."); } this.pipeline = pipeline; return this; } /** * Sets the configuration store that is used during construction of the service client. * * The default configuration store is a clone of the {@link ConfigurationManager * configuration store}, use {@link Configuration * * @param configuration The configuration store used to * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } private ConfigurationClientCredentials getConfigurationCredentials(Configuration configuration) { String connectionString = configuration.get("AZURE_APPCONFIG_CONNECTION_STRING"); if (ImplUtils.isNullOrEmpty(connectionString)) { return credential; } try { return new ConfigurationClientCredentials(connectionString); } catch (InvalidKeyException | NoSuchAlgorithmException ex) { return null; } } private URL getBuildEndpoint(ConfigurationClientCredentials buildCredentials) { if (endpoint != null) { return endpoint; } else if (buildCredentials != null) { return buildCredentials.baseUri(); } else { return null; } } }
class ConfigurationClientBuilder { private static final String ECHO_REQUEST_ID_HEADER = "x-ms-return-client-request-id"; private static final String CONTENT_TYPE_HEADER = "Content-Type"; private static final String CONTENT_TYPE_HEADER_VALUE = "application/json"; private static final String ACCEPT_HEADER = "Accept"; private static final String ACCEPT_HEADER_VALUE = "application/vnd.microsoft.azconfig.kv+json"; private final ClientLogger logger = new ClientLogger(ConfigurationClientBuilder.class); private final List<HttpPipelinePolicy> policies; private final HttpHeaders headers; private ConfigurationClientCredentials credential; private URL endpoint; private HttpClient httpClient; private HttpLogDetailLevel httpLogDetailLevel; private HttpPipeline pipeline; private RetryPolicy retryPolicy; private Configuration configuration; /** * The constructor with defaults. */ public ConfigurationClientBuilder() { retryPolicy = new RetryPolicy(); httpLogDetailLevel = HttpLogDetailLevel.NONE; policies = new ArrayList<>(); headers = new HttpHeaders() .put(ECHO_REQUEST_ID_HEADER, "true") .put(CONTENT_TYPE_HEADER, CONTENT_TYPE_HEADER_VALUE) .put(ACCEPT_HEADER, ACCEPT_HEADER_VALUE); } /** * Creates a {@link ConfigurationClient} based on options set in the Builder. Every time {@code buildClient()} is * called a new instance of {@link ConfigurationClient} is created. * * <p> * If {@link * {@link * settings are ignored.</p> * * @return A ConfigurationClient with the options set from the builder. * @throws NullPointerException If {@code endpoint} has not been set. This setting is automatically set when * {@link * explicitly by calling {@link * @throws IllegalStateException If {@link */ public ConfigurationClient buildClient() { return new ConfigurationClient(buildAsyncClient()); } /** * Creates a {@link ConfigurationAsyncClient} based on options set in the Builder. Every time * {@code buildAsyncClient()} is called a new instance of {@link ConfigurationAsyncClient} is created. * * <p> * If {@link * {@link * builder settings are ignored. * </p> * * @return A ConfigurationAsyncClient with the options set from the builder. * @throws NullPointerException If {@code endpoint} has not been set. This setting is automatically set when * {@link * explicitly by calling {@link * @throws IllegalStateException If {@link */ public ConfigurationAsyncClient buildAsyncClient() { Configuration buildConfiguration = (configuration == null) ? ConfigurationManager.getConfiguration().clone() : configuration; ConfigurationClientCredentials configurationCredentials = getConfigurationCredentials(buildConfiguration); URL buildEndpoint = getBuildEndpoint(configurationCredentials); Objects.requireNonNull(buildEndpoint); if (pipeline != null) { return new ConfigurationAsyncClient(buildEndpoint, pipeline); } ConfigurationClientCredentials buildCredential = (credential == null) ? configurationCredentials : credential; if (buildCredential == null) { logger.logAndThrow(new IllegalStateException("'credential' is required.")); } final List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new UserAgentPolicy(AzureConfiguration.NAME, AzureConfiguration.VERSION, buildConfiguration)); policies.add(new RequestIdPolicy()); policies.add(new AddHeadersPolicy(headers)); policies.add(new AddDatePolicy()); policies.add(new ConfigurationCredentialsPolicy(buildCredential)); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(retryPolicy); policies.addAll(this.policies); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogDetailLevel)); HttpPipeline pipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); return new ConfigurationAsyncClient(buildEndpoint, pipeline); } /** * Sets the service endpoint for the Azure App Configuration instance. * * @param endpoint The URL of the Azure App Configuration instance to send {@link ConfigurationSetting} * service requests to and receive responses from. * @return The updated ConfigurationClientBuilder object. * @throws IllegalArgumentException if {@code endpoint} is null or it cannot be parsed into a valid URL. */ public ConfigurationClientBuilder endpoint(String endpoint) { try { this.endpoint = new URL(endpoint); } catch (MalformedURLException ex) { logger.logAndThrow(new IllegalArgumentException("'endpoint' must be a valid URL")); } return this; } /** * Sets the credential to use when authenticating HTTP requests. Also, sets the {@link * for this ConfigurationClientBuilder. * * @param credential The credential to use for authenticating HTTP requests. * @return The updated ConfigurationClientBuilder object. * @throws NullPointerException If {@code credential} is {@code null}. */ public ConfigurationClientBuilder credential(ConfigurationClientCredentials credential) { this.credential = Objects.requireNonNull(credential); this.endpoint = credential.baseUri(); return this; } /** * Sets the logging level for HTTP requests and responses. * * @param logLevel The amount of logging output when sending and receiving HTTP requests/responses. * @return The updated ConfigurationClientBuilder object. * @throws NullPointerException If {@code logLevel} is {@code null}. */ /** * Adds a policy to the set of existing policies that are executed after required policies. * * @param policy The retry policy for service requests. * @return The updated ConfigurationClientBuilder object. * @throws NullPointerException If {@code policy} is {@code null}. */ public ConfigurationClientBuilder addPolicy(HttpPipelinePolicy policy) { Objects.requireNonNull(policy); policies.add(policy); return this; } /** * Sets the HTTP client to use for sending and receiving requests to and from the service. * * @param client The HTTP client to use for requests. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder httpClient(HttpClient client) { if (this.httpClient != null && client == null) { logger.info("HttpClient is being set to 'null' when it was previously configured."); } this.httpClient = client; return this; } /** * Sets the HTTP pipeline to use for the service client. * * If {@code pipeline} is set, all other settings are ignored, aside from * {@link ConfigurationClientBuilder * * @param pipeline The HTTP pipeline to use for sending service requests and receiving responses. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder pipeline(HttpPipeline pipeline) { if (this.pipeline != null && pipeline == null) { logger.info("HttpPipeline is being set to 'null' when it was previously configured."); } this.pipeline = pipeline; return this; } /** * Sets the configuration store that is used during construction of the service client. * * The default configuration store is a clone of the {@link ConfigurationManager * configuration store}, use {@link Configuration * * @param configuration The configuration store used to * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } private ConfigurationClientCredentials getConfigurationCredentials(Configuration configuration) { String connectionString = configuration.get("AZURE_APPCONFIG_CONNECTION_STRING"); if (ImplUtils.isNullOrEmpty(connectionString)) { return credential; } try { return new ConfigurationClientCredentials(connectionString); } catch (InvalidKeyException | NoSuchAlgorithmException ex) { return null; } } private URL getBuildEndpoint(ConfigurationClientCredentials buildCredentials) { if (endpoint != null) { return endpoint; } else if (buildCredentials != null) { return buildCredentials.baseUri(); } else { return null; } } }
Unlike HttpClient and HttpPipeline there is no default handling for HttpLogDetailLevel in the builder. This will end up passing in a null logging level into the logging policy and that will end up throwing a NullPointerException during execution. So add this prevent a consumer from getting further into the call stack to have an error occur.
public ConfigurationClientBuilder httpLogDetailLevel(HttpLogDetailLevel logLevel) { httpLogDetailLevel = Objects.requireNonNull(logLevel); return this; }
httpLogDetailLevel = Objects.requireNonNull(logLevel);
public ConfigurationClientBuilder httpLogDetailLevel(HttpLogDetailLevel logLevel) { httpLogDetailLevel = Objects.requireNonNull(logLevel); return this; }
class ConfigurationClientBuilder { private static final String ECHO_REQUEST_ID_HEADER = "x-ms-return-client-request-id"; private static final String CONTENT_TYPE_HEADER = "Content-Type"; private static final String CONTENT_TYPE_HEADER_VALUE = "application/json"; private static final String ACCEPT_HEADER = "Accept"; private static final String ACCEPT_HEADER_VALUE = "application/vnd.microsoft.azconfig.kv+json"; private final ClientLogger logger = new ClientLogger(ConfigurationClientBuilder.class); private final List<HttpPipelinePolicy> policies; private final HttpHeaders headers; private ConfigurationClientCredentials credential; private URL endpoint; private HttpClient httpClient; private HttpLogDetailLevel httpLogDetailLevel; private HttpPipeline pipeline; private RetryPolicy retryPolicy; private Configuration configuration; /** * The constructor with defaults. */ public ConfigurationClientBuilder() { retryPolicy = new RetryPolicy(); httpLogDetailLevel = HttpLogDetailLevel.NONE; policies = new ArrayList<>(); headers = new HttpHeaders() .put(ECHO_REQUEST_ID_HEADER, "true") .put(CONTENT_TYPE_HEADER, CONTENT_TYPE_HEADER_VALUE) .put(ACCEPT_HEADER, ACCEPT_HEADER_VALUE); } /** * Creates a {@link ConfigurationClient} based on options set in the Builder. Every time {@code buildClient()} is * called a new instance of {@link ConfigurationClient} is created. * * <p> * If {@link * {@link * settings are ignored.</p> * * @return A ConfigurationClient with the options set from the builder. * @throws NullPointerException If {@code endpoint} has not been set. This setting is automatically set when * {@link * explicitly by calling {@link * @throws IllegalStateException If {@link */ public ConfigurationClient buildClient() { return new ConfigurationClient(buildAsyncClient()); } /** * Creates a {@link ConfigurationAsyncClient} based on options set in the Builder. Every time * {@code buildAsyncClient()} is called a new instance of {@link ConfigurationAsyncClient} is created. * * <p> * If {@link * {@link * builder settings are ignored. * </p> * * @return A ConfigurationAsyncClient with the options set from the builder. * @throws NullPointerException If {@code endpoint} has not been set. This setting is automatically set when * {@link * explicitly by calling {@link * @throws IllegalStateException If {@link */ public ConfigurationAsyncClient buildAsyncClient() { Configuration buildConfiguration = (configuration == null) ? ConfigurationManager.getConfiguration().clone() : configuration; ConfigurationClientCredentials configurationCredentials = getConfigurationCredentials(buildConfiguration); URL buildEndpoint = getBuildEndpoint(configurationCredentials); Objects.requireNonNull(buildEndpoint); if (pipeline != null) { return new ConfigurationAsyncClient(buildEndpoint, pipeline); } ConfigurationClientCredentials buildCredential = (credential == null) ? configurationCredentials : credential; if (buildCredential == null) { logger.logAndThrow(new IllegalStateException("'credential' is required.")); } final List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new UserAgentPolicy(AzureConfiguration.NAME, AzureConfiguration.VERSION, buildConfiguration)); policies.add(new RequestIdPolicy()); policies.add(new AddHeadersPolicy(headers)); policies.add(new AddDatePolicy()); policies.add(new ConfigurationCredentialsPolicy(buildCredential)); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(retryPolicy); policies.addAll(this.policies); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogDetailLevel)); HttpPipeline pipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); return new ConfigurationAsyncClient(buildEndpoint, pipeline); } /** * Sets the service endpoint for the Azure App Configuration instance. * * @param endpoint The URL of the Azure App Configuration instance to send {@link ConfigurationSetting} * service requests to and receive responses from. * @return The updated ConfigurationClientBuilder object. * @throws IllegalArgumentException if {@code endpoint} is null or it cannot be parsed into a valid URL. */ public ConfigurationClientBuilder endpoint(String endpoint) { try { this.endpoint = new URL(endpoint); } catch (MalformedURLException ex) { logger.logAndThrow(new IllegalArgumentException("'endpoint' must be a valid URL")); } return this; } /** * Sets the credential to use when authenticating HTTP requests. Also, sets the {@link * for this ConfigurationClientBuilder. * * @param credential The credential to use for authenticating HTTP requests. * @return The updated ConfigurationClientBuilder object. * @throws NullPointerException If {@code credential} is {@code null}. */ public ConfigurationClientBuilder credential(ConfigurationClientCredentials credential) { this.credential = Objects.requireNonNull(credential); this.endpoint = credential.baseUri(); return this; } /** * Sets the logging level for HTTP requests and responses. * * @param logLevel The amount of logging output when sending and receiving HTTP requests/responses. * @return The updated ConfigurationClientBuilder object. * @throws NullPointerException If {@code logLevel} is {@code null}. */ /** * Adds a policy to the set of existing policies that are executed after required policies. * * @param policy The retry policy for service requests. * @return The updated ConfigurationClientBuilder object. * @throws NullPointerException If {@code policy} is {@code null}. */ public ConfigurationClientBuilder addPolicy(HttpPipelinePolicy policy) { Objects.requireNonNull(policy); policies.add(policy); return this; } /** * Sets the HTTP client to use for sending and receiving requests to and from the service. * * @param client The HTTP client to use for requests. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder httpClient(HttpClient client) { if (this.httpClient != null && client == null) { logger.info("HttpClient is being set to 'null' when it was previously configured."); } this.httpClient = client; return this; } /** * Sets the HTTP pipeline to use for the service client. * * If {@code pipeline} is set, all other settings are ignored, aside from * {@link ConfigurationClientBuilder * * @param pipeline The HTTP pipeline to use for sending service requests and receiving responses. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder pipeline(HttpPipeline pipeline) { if (this.pipeline != null && pipeline == null) { logger.info("HttpPipeline is being set to 'null' when it was previously configured."); } this.pipeline = pipeline; return this; } /** * Sets the configuration store that is used during construction of the service client. * * The default configuration store is a clone of the {@link ConfigurationManager * configuration store}, use {@link Configuration * * @param configuration The configuration store used to * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } private ConfigurationClientCredentials getConfigurationCredentials(Configuration configuration) { String connectionString = configuration.get("AZURE_APPCONFIG_CONNECTION_STRING"); if (ImplUtils.isNullOrEmpty(connectionString)) { return credential; } try { return new ConfigurationClientCredentials(connectionString); } catch (InvalidKeyException | NoSuchAlgorithmException ex) { return null; } } private URL getBuildEndpoint(ConfigurationClientCredentials buildCredentials) { if (endpoint != null) { return endpoint; } else if (buildCredentials != null) { return buildCredentials.baseUri(); } else { return null; } } }
class ConfigurationClientBuilder { private static final String ECHO_REQUEST_ID_HEADER = "x-ms-return-client-request-id"; private static final String CONTENT_TYPE_HEADER = "Content-Type"; private static final String CONTENT_TYPE_HEADER_VALUE = "application/json"; private static final String ACCEPT_HEADER = "Accept"; private static final String ACCEPT_HEADER_VALUE = "application/vnd.microsoft.azconfig.kv+json"; private final ClientLogger logger = new ClientLogger(ConfigurationClientBuilder.class); private final List<HttpPipelinePolicy> policies; private final HttpHeaders headers; private ConfigurationClientCredentials credential; private URL endpoint; private HttpClient httpClient; private HttpLogDetailLevel httpLogDetailLevel; private HttpPipeline pipeline; private RetryPolicy retryPolicy; private Configuration configuration; /** * The constructor with defaults. */ public ConfigurationClientBuilder() { retryPolicy = new RetryPolicy(); httpLogDetailLevel = HttpLogDetailLevel.NONE; policies = new ArrayList<>(); headers = new HttpHeaders() .put(ECHO_REQUEST_ID_HEADER, "true") .put(CONTENT_TYPE_HEADER, CONTENT_TYPE_HEADER_VALUE) .put(ACCEPT_HEADER, ACCEPT_HEADER_VALUE); } /** * Creates a {@link ConfigurationClient} based on options set in the Builder. Every time {@code buildClient()} is * called a new instance of {@link ConfigurationClient} is created. * * <p> * If {@link * {@link * settings are ignored.</p> * * @return A ConfigurationClient with the options set from the builder. * @throws NullPointerException If {@code endpoint} has not been set. This setting is automatically set when * {@link * explicitly by calling {@link * @throws IllegalStateException If {@link */ public ConfigurationClient buildClient() { return new ConfigurationClient(buildAsyncClient()); } /** * Creates a {@link ConfigurationAsyncClient} based on options set in the Builder. Every time * {@code buildAsyncClient()} is called a new instance of {@link ConfigurationAsyncClient} is created. * * <p> * If {@link * {@link * builder settings are ignored. * </p> * * @return A ConfigurationAsyncClient with the options set from the builder. * @throws NullPointerException If {@code endpoint} has not been set. This setting is automatically set when * {@link * explicitly by calling {@link * @throws IllegalStateException If {@link */ public ConfigurationAsyncClient buildAsyncClient() { Configuration buildConfiguration = (configuration == null) ? ConfigurationManager.getConfiguration().clone() : configuration; ConfigurationClientCredentials configurationCredentials = getConfigurationCredentials(buildConfiguration); URL buildEndpoint = getBuildEndpoint(configurationCredentials); Objects.requireNonNull(buildEndpoint); if (pipeline != null) { return new ConfigurationAsyncClient(buildEndpoint, pipeline); } ConfigurationClientCredentials buildCredential = (credential == null) ? configurationCredentials : credential; if (buildCredential == null) { logger.logAndThrow(new IllegalStateException("'credential' is required.")); } final List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new UserAgentPolicy(AzureConfiguration.NAME, AzureConfiguration.VERSION, buildConfiguration)); policies.add(new RequestIdPolicy()); policies.add(new AddHeadersPolicy(headers)); policies.add(new AddDatePolicy()); policies.add(new ConfigurationCredentialsPolicy(buildCredential)); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(retryPolicy); policies.addAll(this.policies); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogDetailLevel)); HttpPipeline pipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); return new ConfigurationAsyncClient(buildEndpoint, pipeline); } /** * Sets the service endpoint for the Azure App Configuration instance. * * @param endpoint The URL of the Azure App Configuration instance to send {@link ConfigurationSetting} * service requests to and receive responses from. * @return The updated ConfigurationClientBuilder object. * @throws IllegalArgumentException if {@code endpoint} is null or it cannot be parsed into a valid URL. */ public ConfigurationClientBuilder endpoint(String endpoint) { try { this.endpoint = new URL(endpoint); } catch (MalformedURLException ex) { logger.logAndThrow(new IllegalArgumentException("'endpoint' must be a valid URL")); } return this; } /** * Sets the credential to use when authenticating HTTP requests. Also, sets the {@link * for this ConfigurationClientBuilder. * * @param credential The credential to use for authenticating HTTP requests. * @return The updated ConfigurationClientBuilder object. * @throws NullPointerException If {@code credential} is {@code null}. */ public ConfigurationClientBuilder credential(ConfigurationClientCredentials credential) { this.credential = Objects.requireNonNull(credential); this.endpoint = credential.baseUri(); return this; } /** * Sets the logging level for HTTP requests and responses. * * @param logLevel The amount of logging output when sending and receiving HTTP requests/responses. * @return The updated ConfigurationClientBuilder object. * @throws NullPointerException If {@code logLevel} is {@code null}. */ /** * Adds a policy to the set of existing policies that are executed after required policies. * * @param policy The retry policy for service requests. * @return The updated ConfigurationClientBuilder object. * @throws NullPointerException If {@code policy} is {@code null}. */ public ConfigurationClientBuilder addPolicy(HttpPipelinePolicy policy) { Objects.requireNonNull(policy); policies.add(policy); return this; } /** * Sets the HTTP client to use for sending and receiving requests to and from the service. * * @param client The HTTP client to use for requests. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder httpClient(HttpClient client) { if (this.httpClient != null && client == null) { logger.info("HttpClient is being set to 'null' when it was previously configured."); } this.httpClient = client; return this; } /** * Sets the HTTP pipeline to use for the service client. * * If {@code pipeline} is set, all other settings are ignored, aside from * {@link ConfigurationClientBuilder * * @param pipeline The HTTP pipeline to use for sending service requests and receiving responses. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder pipeline(HttpPipeline pipeline) { if (this.pipeline != null && pipeline == null) { logger.info("HttpPipeline is being set to 'null' when it was previously configured."); } this.pipeline = pipeline; return this; } /** * Sets the configuration store that is used during construction of the service client. * * The default configuration store is a clone of the {@link ConfigurationManager * configuration store}, use {@link Configuration * * @param configuration The configuration store used to * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } private ConfigurationClientCredentials getConfigurationCredentials(Configuration configuration) { String connectionString = configuration.get("AZURE_APPCONFIG_CONNECTION_STRING"); if (ImplUtils.isNullOrEmpty(connectionString)) { return credential; } try { return new ConfigurationClientCredentials(connectionString); } catch (InvalidKeyException | NoSuchAlgorithmException ex) { return null; } } private URL getBuildEndpoint(ConfigurationClientCredentials buildCredentials) { if (endpoint != null) { return endpoint; } else if (buildCredentials != null) { return buildCredentials.baseUri(); } else { return null; } } }
This kind of horizontal shuffling should be reverted.
Mono<Response<String>> startCopyFromURLWithResponse(URL sourceURL, Metadata metadata, ModifiedAccessConditions sourceModifiedAccessConditions, BlobAccessConditions destAccessConditions, Context context) { metadata = metadata == null ? new Metadata() : metadata; sourceModifiedAccessConditions = sourceModifiedAccessConditions == null ? new ModifiedAccessConditions() : sourceModifiedAccessConditions; destAccessConditions = destAccessConditions == null ? new BlobAccessConditions() : destAccessConditions; SourceModifiedAccessConditions sourceConditions = new SourceModifiedAccessConditions() .sourceIfModifiedSince(sourceModifiedAccessConditions.ifModifiedSince()) .sourceIfUnmodifiedSince(sourceModifiedAccessConditions.ifUnmodifiedSince()) .sourceIfMatch(sourceModifiedAccessConditions.ifMatch()) .sourceIfNoneMatch(sourceModifiedAccessConditions.ifNoneMatch()); return postProcessResponse(this.azureBlobStorage.blobs().startCopyFromURLWithRestResponseAsync( null, null, sourceURL, null, metadata, null, sourceConditions, destAccessConditions.modifiedAccessConditions(), destAccessConditions.leaseAccessConditions(), context)) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().copyId())); }
.sourceIfNoneMatch(sourceModifiedAccessConditions.ifNoneMatch());
new Metadata() : metadata; sourceModifiedAccessConditions = sourceModifiedAccessConditions == null ? new ModifiedAccessConditions() : sourceModifiedAccessConditions; destAccessConditions = destAccessConditions == null ? new BlobAccessConditions() : destAccessConditions; SourceModifiedAccessConditions sourceConditions = new SourceModifiedAccessConditions() .sourceIfModifiedSince(sourceModifiedAccessConditions.ifModifiedSince()) .sourceIfUnmodifiedSince(sourceModifiedAccessConditions.ifUnmodifiedSince()) .sourceIfMatch(sourceModifiedAccessConditions.ifMatch()) .sourceIfNoneMatch(sourceModifiedAccessConditions.ifNoneMatch()); return postProcessResponse(this.azureBlobStorage.blobs().startCopyFromURLWithRestResponseAsync( null, null, sourceURL, null, metadata, null, null, null, null, sourceConditions, destAccessConditions.modifiedAccessConditions(), destAccessConditions.leaseAccessConditions(), context)) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().copyId())); } /** * Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.abortCopyFromURL
class BlobAsyncClient { private static final int BLOB_DEFAULT_DOWNLOAD_BLOCK_SIZE = 4 * Constants.MB; private static final int BLOB_MAX_DOWNLOAD_BLOCK_SIZE = 100 * Constants.MB; final AzureBlobStorageImpl azureBlobStorage; protected final String snapshot; /** * Package-private constructor for use by {@link BlobClientBuilder}. * * @param azureBlobStorage the API client for blob storage */ BlobAsyncClient(AzureBlobStorageImpl azureBlobStorage, String snapshot) { this.azureBlobStorage = azureBlobStorage; this.snapshot = snapshot; } /** * Creates a new {@link BlockBlobAsyncClient} to this resource, maintaining configurations. Only do this for blobs * that are known to be block blobs. * * @return A {@link BlockBlobAsyncClient} to this resource. */ public BlockBlobAsyncClient asBlockBlobAsyncClient() { return new BlockBlobAsyncClient(new AzureBlobStorageBuilder() .url(getBlobUrl().toString()) .pipeline(azureBlobStorage.getHttpPipeline()) .build(), snapshot); } /** * Creates a new {@link AppendBlobAsyncClient} to this resource, maintaining configurations. Only do this for blobs * that are known to be append blobs. * * @return A {@link AppendBlobAsyncClient} to this resource. */ public AppendBlobAsyncClient asAppendBlobAsyncClient() { return new AppendBlobAsyncClient(new AzureBlobStorageBuilder() .url(getBlobUrl().toString()) .pipeline(azureBlobStorage.getHttpPipeline()) .build(), snapshot); } /** * Creates a new {@link PageBlobAsyncClient} to this resource, maintaining configurations. Only do this for blobs * that are known to be page blobs. * * @return A {@link PageBlobAsyncClient} to this resource. */ public PageBlobAsyncClient asPageBlobAsyncClient() { return new PageBlobAsyncClient(new AzureBlobStorageBuilder() .url(getBlobUrl().toString()) .pipeline(azureBlobStorage.getHttpPipeline()) .build(), snapshot); } /** * Creates a new {@link BlobAsyncClient} linked to the {@code snapshot} of this blob resource. * * @param snapshot the identifier for a specific snapshot of this blob * @return a {@link BlobAsyncClient} used to interact with the specific snapshot. */ public BlobAsyncClient getSnapshotClient(String snapshot) { return new BlobAsyncClient(new AzureBlobStorageBuilder() .url(getBlobUrl().toString()) .pipeline(azureBlobStorage.getHttpPipeline()) .build(), snapshot); } /** * Initializes a {@link ContainerAsyncClient} object pointing to the container this blob is in. This method does not * create a container. It simply constructs the client to the container and offers access to methods relevant to * containers. * * @return A {@link ContainerAsyncClient} object pointing to the container containing the blob */ public ContainerAsyncClient getContainerAsyncClient() { BlobURLParts parts = URLParser.parse(getBlobUrl()); return new ContainerAsyncClient(new AzureBlobStorageBuilder() .url(String.format("%s: .pipeline(azureBlobStorage.getHttpPipeline()) .build()); } /** * Gets the URL of the blob represented by this client. * * @return the URL. * @throws RuntimeException If the blob is using a malformed URL. */ public URL getBlobUrl() { try { UrlBuilder urlBuilder = UrlBuilder.parse(azureBlobStorage.getUrl()); if (snapshot != null) { urlBuilder.query("snapshot=" + snapshot); } return urlBuilder.toURL(); } catch (MalformedURLException e) { throw new RuntimeException(String.format("Invalid URL on %s: %s" + getClass().getSimpleName(), azureBlobStorage.getUrl()), e); } } /** * Determines if the blob this client represents exists in the cloud. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.exists} * * @return true if the blob exists, false if it doesn't */ public Mono<Boolean> exists() { return existsWithResponse().flatMap(FluxUtil::toMono); } /** * Determines if the blob this client represents exists in the cloud. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.existsWithResponse} * * @return true if the blob exists, false if it doesn't */ public Mono<Response<Boolean>> existsWithResponse() { return withContext(context -> existsWithResponse(context)); } Mono<Response<Boolean>> existsWithResponse(Context context) { return this.getPropertiesWithResponse(null, context) .map(cp -> (Response<Boolean>) new SimpleResponse<>(cp, true)) .onErrorResume(t -> t instanceof StorageException && ((StorageException) t).statusCode() == 404, t -> { HttpResponse response = ((StorageException) t).response(); return Mono.just(new SimpleResponse<>(response.request(), response.statusCode(), response.headers(), false)); }); } /** * Copies the data at the source URL to a blob. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.startCopyFromURL * * <p>For more information, see the * <a href="https: * * @param sourceURL The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * @return A reactive response containing the copy ID for the long running operation. */ public Mono<String> startCopyFromURL(URL sourceURL) { return startCopyFromURLWithResponse(sourceURL, null, null, null).flatMap(FluxUtil::toMono); } /** * Copies the data at the source URL to a blob. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.startCopyFromURLWithResponse * * <p>For more information, see the * <a href="https: * * @param sourceURL The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * @param metadata {@link Metadata} * @param sourceModifiedAccessConditions {@link ModifiedAccessConditions} against the source. Standard HTTP Access * conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions * related to when the blob was changed relative to the given request. The request will fail if the specified * condition is not satisfied. * @param destAccessConditions {@link BlobAccessConditions} against the destination. * @return A reactive response containing the copy ID for the long running operation. */ public Mono<Response<String>> startCopyFromURLWithResponse(URL sourceURL, Metadata metadata, ModifiedAccessConditions sourceModifiedAccessConditions, BlobAccessConditions destAccessConditions) { return withContext(context -> startCopyFromURLWithResponse(sourceURL, metadata, sourceModifiedAccessConditions, destAccessConditions, context)); } Mono<Response<String>> startCopyFromURLWithResponse(URL sourceURL, Metadata metadata, ModifiedAccessConditions sourceModifiedAccessConditions, BlobAccessConditions destAccessConditions, Context context) { metadata = metadata == null ? * * <p>For more information, see the * <a href="https: * * @param copyId The id of the copy operation to abort. Returned as the {@code copyId} field on the {@link * BlobStartCopyFromURLHeaders} object. * @return A reactive response signalling completion. */ public Mono<Void> abortCopyFromURL(String copyId) { return abortCopyFromURLWithResponse(copyId, null).flatMap(FluxUtil::toMono); } /** * Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.abortCopyFromURLWithResponse * * <p>For more information, see the * <a href="https: * * @param copyId The id of the copy operation to abort. Returned as the {@code copyId} field on the {@link * BlobStartCopyFromURLHeaders} object. * @param leaseAccessConditions By setting lease access conditions, requests will fail if the provided lease does * not match the active lease on the blob. * @return A reactive response signalling completion. */ public Mono<VoidResponse> abortCopyFromURLWithResponse(String copyId, LeaseAccessConditions leaseAccessConditions) { return withContext(context -> abortCopyFromURLWithResponse(copyId, leaseAccessConditions, context)); } Mono<VoidResponse> abortCopyFromURLWithResponse(String copyId, LeaseAccessConditions leaseAccessConditions, Context context) { return postProcessResponse(this.azureBlobStorage.blobs().abortCopyFromURLWithRestResponseAsync( null, null, copyId, null, null, leaseAccessConditions, context)) .map(VoidResponse::new); } /** * Copies the data at the source URL to a blob and waits for the copy to complete before returning a response. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.copyFromURL * * <p>For more information, see the * <a href="https: * * @param copySource The source URL to copy from. * @return A reactive response containing the copy ID for the long running operation. */ public Mono<String> copyFromURL(URL copySource) { return copyFromURLWithResponse(copySource, null, null, null).flatMap(FluxUtil::toMono); } /** * Copies the data at the source URL to a blob and waits for the copy to complete before returning a response. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.copyFromURLWithResponse * * <p>For more information, see the * <a href="https: * * @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * @param metadata {@link Metadata} * @param sourceModifiedAccessConditions {@link ModifiedAccessConditions} against the source. Standard HTTP Access * conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions * related to when the blob was changed relative to the given request. The request will fail if the specified * condition is not satisfied. * @param destAccessConditions {@link BlobAccessConditions} against the destination. * @return A reactive response containing the copy ID for the long running operation. */ public Mono<Response<String>> copyFromURLWithResponse(URL copySource, Metadata metadata, ModifiedAccessConditions sourceModifiedAccessConditions, BlobAccessConditions destAccessConditions) { return withContext(context -> copyFromURLWithResponse(copySource, metadata, sourceModifiedAccessConditions, destAccessConditions, context)); } Mono<Response<String>> copyFromURLWithResponse(URL copySource, Metadata metadata, ModifiedAccessConditions sourceModifiedAccessConditions, BlobAccessConditions destAccessConditions, Context context) { metadata = metadata == null ? new Metadata() : metadata; sourceModifiedAccessConditions = sourceModifiedAccessConditions == null ? new ModifiedAccessConditions() : sourceModifiedAccessConditions; destAccessConditions = destAccessConditions == null ? new BlobAccessConditions() : destAccessConditions; SourceModifiedAccessConditions sourceConditions = new SourceModifiedAccessConditions() .sourceIfModifiedSince(sourceModifiedAccessConditions.ifModifiedSince()) .sourceIfUnmodifiedSince(sourceModifiedAccessConditions.ifUnmodifiedSince()) .sourceIfMatch(sourceModifiedAccessConditions.ifMatch()) .sourceIfNoneMatch(sourceModifiedAccessConditions.ifNoneMatch()); return postProcessResponse(this.azureBlobStorage.blobs().copyFromURLWithRestResponseAsync( null, null, copySource, null, metadata, null, sourceConditions, destAccessConditions.modifiedAccessConditions(), destAccessConditions.leaseAccessConditions(), context)) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().copyId())); } /** * Reads the entire blob. Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or * {@link AppendBlobClient}. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.download} * * <p>For more information, see the * <a href="https: * * @return A reactive response containing the blob data. */ public Mono<Flux<ByteBuffer>> download() { return downloadWithResponse(null, null, null, false).flatMap(FluxUtil::toMono); } /** * Reads a range of bytes from a blob. Uploading data must be done from the {@link BlockBlobClient}, {@link * PageBlobClient}, or {@link AppendBlobClient}. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.downloadWithResponse * * <p>For more information, see the * <a href="https: * * @param range {@link BlobRange} * @param options {@link ReliableDownloadOptions} * @param accessConditions {@link BlobAccessConditions} * @param rangeGetContentMD5 Whether the contentMD5 for the specified blob range should be returned. * @return A reactive response containing the blob data. */ public Mono<Response<Flux<ByteBuffer>>> downloadWithResponse(BlobRange range, ReliableDownloadOptions options, BlobAccessConditions accessConditions, boolean rangeGetContentMD5) { return withContext(context -> downloadWithResponse(range, options, accessConditions, rangeGetContentMD5, context)); } Mono<Response<Flux<ByteBuffer>>> downloadWithResponse(BlobRange range, ReliableDownloadOptions options, BlobAccessConditions accessConditions, boolean rangeGetContentMD5, Context context) { return download(range, accessConditions, rangeGetContentMD5, context) .map(response -> new SimpleResponse<>( response.rawResponse(), response.body(options).map(ByteBuf::nioBuffer).switchIfEmpty(Flux.just(ByteBuffer.allocate(0))))); } /** * Reads a range of bytes from a blob. The response also includes the blob's properties and metadata. For more * information, see the <a href="https: * <p> * Note that the response body has reliable download functionality built in, meaning that a failed download stream * will be automatically retried. This behavior may be configured with {@link ReliableDownloadOptions}. * * @param range {@link BlobRange} * @param accessConditions {@link BlobAccessConditions} * @param rangeGetContentMD5 Whether the contentMD5 for the specified blob range should be returned. * @return Emits the successful response. * @apiNote * "Sample code for BlobAsyncClient.download")] \n For more samples, please see the [Samples * file](%https: */ Mono<DownloadAsyncResponse> download(BlobRange range, BlobAccessConditions accessConditions, boolean rangeGetContentMD5) { return withContext(context -> download(range, accessConditions, rangeGetContentMD5, context)); } Mono<DownloadAsyncResponse> download(BlobRange range, BlobAccessConditions accessConditions, boolean rangeGetContentMD5, Context context) { range = range == null ? new BlobRange(0) : range; Boolean getMD5 = rangeGetContentMD5 ? rangeGetContentMD5 : null; accessConditions = accessConditions == null ? new BlobAccessConditions() : accessConditions; HTTPGetterInfo info = new HTTPGetterInfo() .offset(range.offset()) .count(range.count()) .eTag(accessConditions.modifiedAccessConditions().ifMatch()); return postProcessResponse(this.azureBlobStorage.blobs().downloadWithRestResponseAsync( null, null, snapshot, null, null, range.toHeaderValue(), getMD5, null, null, null, null, accessConditions.leaseAccessConditions(), accessConditions.modifiedAccessConditions(), context)) .map(response -> { info.eTag(response.deserializedHeaders().eTag()); return new DownloadAsyncResponse(response, info, newInfo -> this.download(new BlobRange(newInfo.offset(), newInfo.count()), new BlobAccessConditions().modifiedAccessConditions( new ModifiedAccessConditions().ifMatch(info.eTag())), false, context)); }); } /** * Downloads the entire blob into a file specified by the path. The file will be created if it doesn't exist. * Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link * AppendBlobClient}. * <p> * This method makes an extra HTTP call to get the length of the blob in the beginning. To avoid this extra call, * use the other overload providing the {@link BlobRange} parameter. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.downloadToFile * * <p>For more information, see the * <a href="https: * * @param filePath A non-null {@link OutputStream} instance where the downloaded data will be written. * @return An empty response */ public Mono<Void> downloadToFile(String filePath) { return downloadToFile(filePath, null, BLOB_DEFAULT_DOWNLOAD_BLOCK_SIZE, null, null, false); } /** * Downloads a range of bytes blob into a file specified by the path. The file will be created if it doesn't exist. * Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link * AppendBlobClient}. * <p> * This method makes an extra HTTP call to get the length of the blob in the beginning. To avoid this extra call, * provide the {@link BlobRange} parameter. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.downloadToFile * * <p>For more information, see the * <a href="https: * * @param filePath A non-null {@link OutputStream} instance where the downloaded data will be written. * @param range {@link BlobRange} * @param blockSize the size of a chunk to download at a time, in bytes * @param options {@link ReliableDownloadOptions} * @param accessConditions {@link BlobAccessConditions} * @param rangeGetContentMD5 Whether the contentMD5 for the specified blob range should be returned. * @return An empty response * @throws IllegalArgumentException If {@code blockSize} is less than 0 or greater than 100MB. * @throws UncheckedIOException If an I/O error occurs. */ public Mono<Void> downloadToFile(String filePath, BlobRange range, Integer blockSize, ReliableDownloadOptions options, BlobAccessConditions accessConditions, boolean rangeGetContentMD5) { return withContext(context -> downloadToFile(filePath, range, blockSize, options, accessConditions, rangeGetContentMD5, context)); } Mono<Void> downloadToFile(String filePath, BlobRange range, Integer blockSize, ReliableDownloadOptions options, BlobAccessConditions accessConditions, boolean rangeGetContentMD5, Context context) { if (blockSize < 0 || blockSize > BLOB_MAX_DOWNLOAD_BLOCK_SIZE) { throw new IllegalArgumentException("Block size should not exceed 100MB"); } return Mono.using(() -> downloadToFileResourceSupplier(filePath), channel -> Mono.justOrEmpty(range) .switchIfEmpty(getFullBlobRange(accessConditions)) .flatMapMany(rg -> Flux.fromIterable(sliceBlobRange(rg, blockSize))) .flatMap(chunk -> this.download(chunk, accessConditions, rangeGetContentMD5, context) .subscribeOn(Schedulers.elastic()) .flatMap(dar -> FluxUtil.bytebufStreamToFile(dar.body(options), channel, chunk.offset() - (range == null ? 0 : range.offset())))) .then(), this::downloadToFileCleanup); } private AsynchronousFileChannel downloadToFileResourceSupplier(String filePath) { try { return AsynchronousFileChannel.open(Paths.get(filePath), StandardOpenOption.READ, StandardOpenOption.WRITE); } catch (IOException e) { throw new UncheckedIOException(e); } } private void downloadToFileCleanup(AsynchronousFileChannel channel) { try { channel.close(); } catch (IOException e) { throw new UncheckedIOException(e); } } private Mono<BlobRange> getFullBlobRange(BlobAccessConditions accessConditions) { return getPropertiesWithResponse(accessConditions).map(rb -> new BlobRange(0, rb.value().blobSize())); } private List<BlobRange> sliceBlobRange(BlobRange blobRange, Integer blockSize) { if (blockSize == null) { blockSize = BLOB_DEFAULT_DOWNLOAD_BLOCK_SIZE; } long offset = blobRange.offset(); long length = blobRange.count(); List<BlobRange> chunks = new ArrayList<>(); for (long pos = offset; pos < offset + length; pos += blockSize) { long count = blockSize; if (pos + count > offset + length) { count = offset + length - pos; } chunks.add(new BlobRange(pos, count)); } return chunks; } /** * Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.delete} * * <p>For more information, see the * <a href="https: * * @return A reactive response signalling completion. */ public Mono<Void> delete() { return deleteWithResponse(null, null).flatMap(FluxUtil::toMono); } /** * Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.deleteWithResponse * * <p>For more information, see the * <a href="https: * * @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include} * will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being * deleted, you must pass null. * @param accessConditions {@link BlobAccessConditions} * @return A reactive response signalling completion. */ public Mono<VoidResponse> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions, BlobAccessConditions accessConditions) { return withContext(context -> deleteWithResponse(deleteBlobSnapshotOptions, accessConditions, context)); } Mono<VoidResponse> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions, BlobAccessConditions accessConditions, Context context) { accessConditions = accessConditions == null ? new BlobAccessConditions() : accessConditions; return postProcessResponse(this.azureBlobStorage.blobs().deleteWithRestResponseAsync( null, null, snapshot, null, null, deleteBlobSnapshotOptions, null, accessConditions.leaseAccessConditions(), accessConditions.modifiedAccessConditions(), context)) .map(VoidResponse::new); } /** * Returns the blob's metadata and properties. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.getProperties} * * <p>For more information, see the * <a href="https: * * @return A reactive response containing the blob properties and metadata. */ public Mono<BlobProperties> getProperties() { return getPropertiesWithResponse(null).flatMap(FluxUtil::toMono); } /** * Returns the blob's metadata and properties. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.getPropertiesWithResponse * * <p>For more information, see the * <a href="https: * * @param accessConditions {@link BlobAccessConditions} * @return A reactive response containing the blob properties and metadata. */ public Mono<Response<BlobProperties>> getPropertiesWithResponse(BlobAccessConditions accessConditions) { return withContext(context -> getPropertiesWithResponse(accessConditions, context)); } Mono<Response<BlobProperties>> getPropertiesWithResponse(BlobAccessConditions accessConditions, Context context) { accessConditions = accessConditions == null ? new BlobAccessConditions() : accessConditions; return postProcessResponse(this.azureBlobStorage.blobs().getPropertiesWithRestResponseAsync( null, null, snapshot, null, null, null, null, null, null, accessConditions.leaseAccessConditions(), accessConditions.modifiedAccessConditions(), context)) .map(rb -> new SimpleResponse<>(rb, new BlobProperties(rb.deserializedHeaders()))); } /** * Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In * order to preserve existing values, they must be passed alongside the header being changed. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.setHTTPHeaders * * <p>For more information, see the * <a href="https: * * @param headers {@link BlobHTTPHeaders} * @return A reactive response signalling completion. */ public Mono<Void> setHTTPHeaders(BlobHTTPHeaders headers) { return setHTTPHeadersWithResponse(headers, null).flatMap(FluxUtil::toMono); } /** * Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In * order to preserve existing values, they must be passed alongside the header being changed. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.setHTTPHeadersWithResponse * * <p>For more information, see the * <a href="https: * * @param headers {@link BlobHTTPHeaders} * @param accessConditions {@link BlobAccessConditions} * @return A reactive response signalling completion. */ public Mono<VoidResponse> setHTTPHeadersWithResponse(BlobHTTPHeaders headers, BlobAccessConditions accessConditions) { return withContext(context -> setHTTPHeadersWithResponse(headers, accessConditions, context)); } Mono<VoidResponse> setHTTPHeadersWithResponse(BlobHTTPHeaders headers, BlobAccessConditions accessConditions, Context context) { accessConditions = accessConditions == null ? new BlobAccessConditions() : accessConditions; return postProcessResponse(this.azureBlobStorage.blobs().setHTTPHeadersWithRestResponseAsync( null, null, null, null, headers, accessConditions.leaseAccessConditions(), accessConditions.modifiedAccessConditions(), context)) .map(VoidResponse::new); } /** * Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values * must be preserved, they must be downloaded and included in the call to this method. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.setMetadata * * <p>For more information, see the * <a href="https: * * @param metadata {@link Metadata} * @return A reactive response signalling completion. */ public Mono<Void> setMetadata(Metadata metadata) { return setMetadataWithResponse(metadata, null).flatMap(FluxUtil::toMono); } /** * Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values * must be preserved, they must be downloaded and included in the call to this method. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.setMetadataWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata {@link Metadata} * @param accessConditions {@link BlobAccessConditions} * @return A reactive response signalling completion. */ public Mono<VoidResponse> setMetadataWithResponse(Metadata metadata, BlobAccessConditions accessConditions) { return withContext(context -> setMetadataWithResponse(metadata, accessConditions, context)); } Mono<VoidResponse> setMetadataWithResponse(Metadata metadata, BlobAccessConditions accessConditions, Context context) { metadata = metadata == null ? new Metadata() : metadata; accessConditions = accessConditions == null ? new BlobAccessConditions() : accessConditions; return postProcessResponse(this.azureBlobStorage.blobs().setMetadataWithRestResponseAsync( null, null, null, metadata, null, null, null, null, accessConditions.leaseAccessConditions(), accessConditions.modifiedAccessConditions(), context)) .map(VoidResponse::new); } /** * Creates a read-only snapshot of the blob. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.createSnapshot} * * <p>For more information, see the * <a href="https: * * @return A response containing a {@link BlobAsyncClient} which is used to interact with the created snapshot, use * {@link BlobAsyncClient */ public Mono<BlobAsyncClient> createSnapshot() { return createSnapshotWithResponse(null, null).flatMap(FluxUtil::toMono); } /** * Creates a read-only snapshot of the blob. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.createSnapshotWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata {@link Metadata} * @param accessConditions {@link BlobAccessConditions} * @return A response containing a {@link BlobAsyncClient} which is used to interact with the created snapshot, use * {@link BlobAsyncClient */ public Mono<Response<BlobAsyncClient>> createSnapshotWithResponse(Metadata metadata, BlobAccessConditions accessConditions) { return withContext(context -> createSnapshotWithResponse(metadata, accessConditions, context)); } Mono<Response<BlobAsyncClient>> createSnapshotWithResponse(Metadata metadata, BlobAccessConditions accessConditions, Context context) { metadata = metadata == null ? new Metadata() : metadata; accessConditions = accessConditions == null ? new BlobAccessConditions() : accessConditions; return postProcessResponse(this.azureBlobStorage.blobs().createSnapshotWithRestResponseAsync( null, null, null, metadata, null, null, null, null, accessConditions.modifiedAccessConditions(), accessConditions.leaseAccessConditions(), context)) .map(rb -> new SimpleResponse<>(rb, this.getSnapshotClient(rb.deserializedHeaders().snapshot()))); } /** * Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in * a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's * etag. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.setTier * * <p>For more information, see the * <a href="https: * * @param tier The new tier for the blob. * @return A reactive response signalling completion. */ public Mono<Void> setTier(AccessTier tier) { return setTierWithResponse(tier, null).flatMap(FluxUtil::toMono); } /** * Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in * a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's * etag. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.setTierWithResponse * * <p>For more information, see the * <a href="https: * * @param tier The new tier for the blob. * @param leaseAccessConditions By setting lease access conditions, requests will fail if the provided lease does * not match the active lease on the blob. * @return A reactive response signalling completion. */ public Mono<VoidResponse> setTierWithResponse(AccessTier tier, LeaseAccessConditions leaseAccessConditions) { return withContext(context -> setTierWithResponse(tier, leaseAccessConditions, context)); } Mono<VoidResponse> setTierWithResponse(AccessTier tier, LeaseAccessConditions leaseAccessConditions, Context context) { Utility.assertNotNull("tier", tier); return postProcessResponse(this.azureBlobStorage.blobs().setTierWithRestResponseAsync( null, null, tier, null, null, leaseAccessConditions, context)) .map(VoidResponse::new); } /** * Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.undelete} * * <p>For more information, see the * <a href="https: * * @return A reactive response signalling completion. */ public Mono<Void> undelete() { return undeleteWithResponse().flatMap(FluxUtil::toMono); } /** * Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.undeleteWithResponse} * * <p>For more information, see the * <a href="https: * * @return A reactive response signalling completion. */ public Mono<VoidResponse> undeleteWithResponse() { return withContext(context -> undeleteWithResponse(context)); } Mono<VoidResponse> undeleteWithResponse(Context context) { return postProcessResponse(this.azureBlobStorage.blobs().undeleteWithRestResponseAsync(null, null, context)) .map(VoidResponse::new); } /** * Acquires a lease on the blob for write and delete operations. The lease duration must be between 15 to 60 * seconds, or infinite (-1). * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.acquireLease * * <p>For more information, see the * <a href="https: * * @param proposedId A {@code String} in any valid GUID format. May be null. * @param duration The duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A * non-infinite lease can be between 15 and 60 seconds. * @return A reactive response containing the lease ID. */ public Mono<String> acquireLease(String proposedId, int duration) { return acquireLeaseWithResponse(proposedId, duration, null).flatMap(FluxUtil::toMono); } /** * Acquires a lease on the blob for write and delete operations. The lease duration must be between 15 to 60 * seconds, or infinite (-1). * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.acquireLeaseWithResponse * * <p>For more information, see the * <a href="https: * * @param proposedId A {@code String} in any valid GUID format. May be null. * @param duration The duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A * non-infinite lease can be between 15 and 60 seconds. * @param modifiedAccessConditions Standard HTTP Access conditions related to the modification of data. ETag and * LastModifiedTime are used to construct conditions related to when the blob was changed relative to the given * request. The request will fail if the specified condition is not satisfied. * @return A reactive response containing the lease ID. * @throws IllegalArgumentException If {@code duration} is outside the bounds of 15 to 60 or isn't -1. */ public Mono<Response<String>> acquireLeaseWithResponse(String proposedId, int duration, ModifiedAccessConditions modifiedAccessConditions) { return withContext(context -> acquireLeaseWithResponse(proposedId, duration, modifiedAccessConditions, context)); } Mono<Response<String>> acquireLeaseWithResponse(String proposedId, int duration, ModifiedAccessConditions modifiedAccessConditions, Context context) { if (!(duration == -1 || (duration >= 15 && duration <= 60))) { throw new IllegalArgumentException("Duration must be -1 or between 15 and 60."); } return postProcessResponse(this.azureBlobStorage.blobs().acquireLeaseWithRestResponseAsync( null, null, null, duration, proposedId, null, modifiedAccessConditions, context)) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().leaseId())); } /** * Renews the blob's previously-acquired lease. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.renewLease * * <p>For more information, see the * <a href="https: * * @param leaseId The leaseId of the active lease on the blob. * @return A reactive response containing the renewed lease ID. */ public Mono<String> renewLease(String leaseId) { return renewLeaseWithResponse(leaseId, null).flatMap(FluxUtil::toMono); } /** * Renews the blob's previously-acquired lease. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.renewLeaseWithResponse * * <p>For more information, see the * <a href="https: * * @param leaseId The leaseId of the active lease on the blob. * @param modifiedAccessConditions Standard HTTP Access conditions related to the modification of data. ETag and * LastModifiedTime are used to construct conditions related to when the blob was changed relative to the given * request. The request will fail if the specified condition is not satisfied. * @return A reactive response containing the renewed lease ID. */ public Mono<Response<String>> renewLeaseWithResponse(String leaseId, ModifiedAccessConditions modifiedAccessConditions) { return withContext(context -> renewLeaseWithResponse(leaseId, modifiedAccessConditions, context)); } Mono<Response<String>> renewLeaseWithResponse(String leaseId, ModifiedAccessConditions modifiedAccessConditions, Context context) { return postProcessResponse(this.azureBlobStorage.blobs().renewLeaseWithRestResponseAsync(null, null, leaseId, null, null, modifiedAccessConditions, context)) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().leaseId())); } /** * Releases the blob's previously-acquired lease. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.releaseLease * * <p>For more information, see the * <a href="https: * * @param leaseId The leaseId of the active lease on the blob. * @return A reactive response signalling completion. */ public Mono<Void> releaseLease(String leaseId) { return releaseLeaseWithResponse(leaseId, null).flatMap(FluxUtil::toMono); } /** * Releases the blob's previously-acquired lease. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.releaseLeaseWithResponse * * <p>For more information, see the * <a href="https: * * @param leaseId The leaseId of the active lease on the blob. * @param modifiedAccessConditions Standard HTTP Access conditions related to the modification of data. ETag and * LastModifiedTime are used to construct conditions related to when the blob was changed relative to the given * request. The request will fail if the specified condition is not satisfied. * @return A reactive response signalling completion. */ public Mono<VoidResponse> releaseLeaseWithResponse(String leaseId, ModifiedAccessConditions modifiedAccessConditions) { return withContext(context -> releaseLeaseWithResponse(leaseId, modifiedAccessConditions, context)); } Mono<VoidResponse> releaseLeaseWithResponse(String leaseId, ModifiedAccessConditions modifiedAccessConditions, Context context) { return postProcessResponse(this.azureBlobStorage.blobs().releaseLeaseWithRestResponseAsync(null, null, leaseId, null, null, modifiedAccessConditions, context)) .map(VoidResponse::new); } /** * BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1) constant * to break a fixed-duration lease when it expires or an infinite lease immediately. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.breakLease} * * <p>For more information, see the * <a href="https: * * @return A reactive response containing the remaining time in the broken lease in seconds. */ public Mono<Integer> breakLease() { return breakLeaseWithResponse(null, null).flatMap(FluxUtil::toMono); } /** * BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1) constant * to break a fixed-duration lease when it expires or an infinite lease immediately. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.breakLeaseWithResponse * * <p>For more information, see the * <a href="https: * * @param breakPeriodInSeconds An optional {@code Integer} representing the proposed duration of seconds that the * lease should continue before it is broken, between 0 and 60 seconds. This break period is only used if it is * shorter than the time remaining on the lease. If longer, the time remaining on the lease is used. A new lease * will not be available before the break period has expired, but the lease may be held for longer than the break * period. * @param modifiedAccessConditions Standard HTTP Access conditions related to the modification of data. ETag and * LastModifiedTime are used to construct conditions related to when the blob was changed relative to the given * request. The request will fail if the specified condition is not satisfied. * @return A reactive response containing the remaining time in the broken lease in seconds. */ public Mono<Response<Integer>> breakLeaseWithResponse(Integer breakPeriodInSeconds, ModifiedAccessConditions modifiedAccessConditions) { return withContext(context -> breakLeaseWithResponse(breakPeriodInSeconds, modifiedAccessConditions, context)); } Mono<Response<Integer>> breakLeaseWithResponse(Integer breakPeriodInSeconds, ModifiedAccessConditions modifiedAccessConditions, Context context) { return postProcessResponse(this.azureBlobStorage.blobs().breakLeaseWithRestResponseAsync(null, null, null, breakPeriodInSeconds, null, modifiedAccessConditions, context)) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().leaseTime())); } /** * ChangeLease changes the blob's lease ID. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.changeLease * * <p>For more information, see the * <a href="https: * * @param leaseId The leaseId of the active lease on the blob. * @param proposedId A {@code String} in any valid GUID format. * @return A reactive response containing the new lease ID. */ public Mono<String> changeLease(String leaseId, String proposedId) { return changeLeaseWithResponse(leaseId, proposedId, null).flatMap(FluxUtil::toMono); } /** * ChangeLease changes the blob's lease ID. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.changeLeaseWithResponse * * <p>For more information, see the * <a href="https: * * @param leaseId The leaseId of the active lease on the blob. * @param proposedId A {@code String} in any valid GUID format. * @param modifiedAccessConditions Standard HTTP Access conditions related to the modification of data. ETag and * LastModifiedTime are used to construct conditions related to when the blob was changed relative to the given * request. The request will fail if the specified condition is not satisfied. * @return A reactive response containing the new lease ID. */ public Mono<Response<String>> changeLeaseWithResponse(String leaseId, String proposedId, ModifiedAccessConditions modifiedAccessConditions) { return withContext(context -> changeLeaseWithResponse(leaseId, proposedId, modifiedAccessConditions, context)); } Mono<Response<String>> changeLeaseWithResponse(String leaseId, String proposedId, ModifiedAccessConditions modifiedAccessConditions, Context context) { return postProcessResponse(this.azureBlobStorage.blobs().changeLeaseWithRestResponseAsync(null, null, leaseId, proposedId, null, null, modifiedAccessConditions, context)) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().leaseId())); } /** * Returns the sku name and account kind for the account. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.getAccountInfo} * * <p>For more information, see the * <a href="https: * * @return a reactor response containing the sku name and account kind. */ public Mono<StorageAccountInfo> getAccountInfo() { return getAccountInfoWithResponse().flatMap(FluxUtil::toMono); } /** * Returns the sku name and account kind for the account. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.getAccountInfo} * * <p>For more information, see the * <a href="https: * * @return a reactor response containing the sku name and account kind. */ public Mono<Response<StorageAccountInfo>> getAccountInfoWithResponse() { return withContext(context -> getAccountInfoWithResponse(context)); } Mono<Response<StorageAccountInfo>> getAccountInfoWithResponse(Context context) { return postProcessResponse( this.azureBlobStorage.blobs().getAccountInfoWithRestResponseAsync(null, null, context)) .map(rb -> new SimpleResponse<>(rb, new StorageAccountInfo(rb.deserializedHeaders()))); } /** * Generates a user delegation SAS with the specified parameters * * @param userDelegationKey The {@code UserDelegationKey} user delegation key for the SAS * @param accountName The {@code String} account name for the SAS * @param permissions The {@code ContainerSASPermissions} permission for the SAS * @param expiryTime The {@code OffsetDateTime} expiry time for the SAS * @return A string that represents the SAS token */ public String generateUserDelegationSAS(UserDelegationKey userDelegationKey, String accountName, BlobSASPermission permissions, OffsetDateTime expiryTime) { return this.generateUserDelegationSAS(userDelegationKey, accountName, permissions, expiryTime, null /* startTime */, null /* version */, null /*sasProtocol */, null /* ipRange */, null /* cacheControl */, null /*contentDisposition */, null /* contentEncoding */, null /* contentLanguage */, null /* contentType */); } /** * Generates a user delegation SAS token with the specified parameters * * @param userDelegationKey The {@code UserDelegationKey} user delegation key for the SAS * @param accountName The {@code String} account name for the SAS * @param permissions The {@code ContainerSASPermissions} permission for the SAS * @param expiryTime The {@code OffsetDateTime} expiry time for the SAS * @param startTime An optional {@code OffsetDateTime} start time for the SAS * @param version An optional {@code String} version for the SAS * @param sasProtocol An optional {@code SASProtocol} protocol for the SAS * @param ipRange An optional {@code IPRange} ip address range for the SAS * @return A string that represents the SAS token */ public String generateUserDelegationSAS(UserDelegationKey userDelegationKey, String accountName, BlobSASPermission permissions, OffsetDateTime expiryTime, OffsetDateTime startTime, String version, SASProtocol sasProtocol, IPRange ipRange) { return this.generateUserDelegationSAS(userDelegationKey, accountName, permissions, expiryTime, startTime, version, sasProtocol, ipRange, null /* cacheControl */, null /* contentDisposition */, null /* contentEncoding */, null /* contentLanguage */, null /* contentType */); } /** * Generates a user delegation SAS token with the specified parameters * * @param userDelegationKey The {@code UserDelegationKey} user delegation key for the SAS * @param accountName The {@code String} account name for the SAS * @param permissions The {@code ContainerSASPermissions} permission for the SAS * @param expiryTime The {@code OffsetDateTime} expiry time for the SAS * @param startTime An optional {@code OffsetDateTime} start time for the SAS * @param version An optional {@code String} version for the SAS * @param sasProtocol An optional {@code SASProtocol} protocol for the SAS * @param ipRange An optional {@code IPRange} ip address range for the SAS * @param cacheControl An optional {@code String} cache-control header for the SAS. * @param contentDisposition An optional {@code String} content-disposition header for the SAS. * @param contentEncoding An optional {@code String} content-encoding header for the SAS. * @param contentLanguage An optional {@code String} content-language header for the SAS. * @param contentType An optional {@code String} content-type header for the SAS. * @return A string that represents the SAS token */ public String generateUserDelegationSAS(UserDelegationKey userDelegationKey, String accountName, BlobSASPermission permissions, OffsetDateTime expiryTime, OffsetDateTime startTime, String version, SASProtocol sasProtocol, IPRange ipRange, String cacheControl, String contentDisposition, String contentEncoding, String contentLanguage, String contentType) { ServiceSASSignatureValues serviceSASSignatureValues = new ServiceSASSignatureValues(version, sasProtocol, startTime, expiryTime, permissions == null ? null : permissions.toString(), ipRange, null /* identifier*/, cacheControl, contentDisposition, contentEncoding, contentLanguage, contentType); ServiceSASSignatureValues values = configureServiceSASSignatureValues(serviceSASSignatureValues, accountName); SASQueryParameters sasQueryParameters = values.generateSASQueryParameters(userDelegationKey); return sasQueryParameters.encode(); } /** * Generates a SAS token with the specified parameters * * @param permissions The {@code ContainerSASPermissions} permission for the SAS * @param expiryTime The {@code OffsetDateTime} expiry time for the SAS * @return A string that represents the SAS token */ public String generateSAS(BlobSASPermission permissions, OffsetDateTime expiryTime) { return this.generateSAS(null, permissions, expiryTime, null /* startTime */, /* identifier */ null /* version */, null /* sasProtocol */, null /* ipRange */, null /* cacheControl */, null /* contentLanguage*/, null /* contentEncoding */, null /* contentLanguage */, null /* contentType */); } /** * Generates a SAS token with the specified parameters * * @param identifier The {@code String} name of the access policy on the container this SAS references if any * @return A string that represents the SAS token */ public String generateSAS(String identifier) { return this.generateSAS(identifier, null /* permissions */, null /* expiryTime */, null /* startTime */, null /* version */, null /* sasProtocol */, null /* ipRange */, null /* cacheControl */, null /* contentLanguage*/, null /* contentEncoding */, null /* contentLanguage */, null /* contentType */); } /** * Generates a SAS token with the specified parameters * * @param identifier The {@code String} name of the access policy on the container this SAS references if any * @param permissions The {@code ContainerSASPermissions} permission for the SAS * @param expiryTime The {@code OffsetDateTime} expiry time for the SAS * @param startTime An optional {@code OffsetDateTime} start time for the SAS * @param version An optional {@code String} version for the SAS * @param sasProtocol An optional {@code SASProtocol} protocol for the SAS * @param ipRange An optional {@code IPRange} ip address range for the SAS * @return A string that represents the SAS token */ public String generateSAS(String identifier, BlobSASPermission permissions, OffsetDateTime expiryTime, OffsetDateTime startTime, String version, SASProtocol sasProtocol, IPRange ipRange) { return this.generateSAS(identifier, permissions, expiryTime, startTime, version, sasProtocol, ipRange, null /* cacheControl */, null /* contentLanguage*/, null /* contentEncoding */, null /* contentLanguage */, null /* contentType */); } /** * Generates a SAS token with the specified parameters * * @param identifier The {@code String} name of the access policy on the container this SAS references if any * @param permissions The {@code ContainerSASPermissions} permission for the SAS * @param expiryTime The {@code OffsetDateTime} expiry time for the SAS * @param startTime An optional {@code OffsetDateTime} start time for the SAS * @param version An optional {@code String} version for the SAS * @param sasProtocol An optional {@code SASProtocol} protocol for the SAS * @param ipRange An optional {@code IPRange} ip address range for the SAS * @param cacheControl An optional {@code String} cache-control header for the SAS. * @param contentDisposition An optional {@code String} content-disposition header for the SAS. * @param contentEncoding An optional {@code String} content-encoding header for the SAS. * @param contentLanguage An optional {@code String} content-language header for the SAS. * @param contentType An optional {@code String} content-type header for the SAS. * @return A string that represents the SAS token */ public String generateSAS(String identifier, BlobSASPermission permissions, OffsetDateTime expiryTime, OffsetDateTime startTime, String version, SASProtocol sasProtocol, IPRange ipRange, String cacheControl, String contentDisposition, String contentEncoding, String contentLanguage, String contentType) { ServiceSASSignatureValues serviceSASSignatureValues = new ServiceSASSignatureValues(version, sasProtocol, startTime, expiryTime, permissions == null ? null : permissions.toString(), ipRange, identifier, cacheControl, contentDisposition, contentEncoding, contentLanguage, contentType); SharedKeyCredential sharedKeyCredential = Utility.getSharedKeyCredential(this.azureBlobStorage.getHttpPipeline()); Utility.assertNotNull("sharedKeyCredential", sharedKeyCredential); ServiceSASSignatureValues values = configureServiceSASSignatureValues(serviceSASSignatureValues, sharedKeyCredential.accountName()); SASQueryParameters sasQueryParameters = values.generateSASQueryParameters(sharedKeyCredential); return sasQueryParameters.encode(); } /** * Sets serviceSASSignatureValues parameters dependent on the current blob type */ ServiceSASSignatureValues configureServiceSASSignatureValues(ServiceSASSignatureValues serviceSASSignatureValues, String accountName) { serviceSASSignatureValues.canonicalName(this.azureBlobStorage.getUrl(), accountName); serviceSASSignatureValues.snapshotId(getSnapshotId()); if (isSnapshot()) { serviceSASSignatureValues.resource(Constants.UrlConstants.SAS_BLOB_SNAPSHOT_CONSTANT); } else { serviceSASSignatureValues.resource(Constants.UrlConstants.SAS_BLOB_CONSTANT); } return serviceSASSignatureValues; } /** * Gets the snapshotId for a blob resource * * @return A string that represents the snapshotId of the snapshot blob */ public String getSnapshotId() { return this.snapshot; } /** * Determines if a blob is a snapshot * * @return A boolean that indicates if a blob is a snapshot */ public boolean isSnapshot() { return this.snapshot != null; } }
class BlobAsyncClient { private static final int BLOB_DEFAULT_DOWNLOAD_BLOCK_SIZE = 4 * Constants.MB; private static final int BLOB_MAX_DOWNLOAD_BLOCK_SIZE = 100 * Constants.MB; final AzureBlobStorageImpl azureBlobStorage; protected final String snapshot; /** * Package-private constructor for use by {@link BlobClientBuilder}. * * @param azureBlobStorage the API client for blob storage */ BlobAsyncClient(AzureBlobStorageImpl azureBlobStorage, String snapshot) { this.azureBlobStorage = azureBlobStorage; this.snapshot = snapshot; } /** * Creates a new {@link BlockBlobAsyncClient} to this resource, maintaining configurations. Only do this for blobs * that are known to be block blobs. * * @return A {@link BlockBlobAsyncClient} to this resource. */ public BlockBlobAsyncClient asBlockBlobAsyncClient() { return new BlockBlobAsyncClient(new AzureBlobStorageBuilder() .url(getBlobUrl().toString()) .pipeline(azureBlobStorage.getHttpPipeline()) .build(), snapshot); } /** * Creates a new {@link AppendBlobAsyncClient} to this resource, maintaining configurations. Only do this for blobs * that are known to be append blobs. * * @return A {@link AppendBlobAsyncClient} to this resource. */ public AppendBlobAsyncClient asAppendBlobAsyncClient() { return new AppendBlobAsyncClient(new AzureBlobStorageBuilder() .url(getBlobUrl().toString()) .pipeline(azureBlobStorage.getHttpPipeline()) .build(), snapshot); } /** * Creates a new {@link PageBlobAsyncClient} to this resource, maintaining configurations. Only do this for blobs * that are known to be page blobs. * * @return A {@link PageBlobAsyncClient} to this resource. */ public PageBlobAsyncClient asPageBlobAsyncClient() { return new PageBlobAsyncClient(new AzureBlobStorageBuilder() .url(getBlobUrl().toString()) .pipeline(azureBlobStorage.getHttpPipeline()) .build(), snapshot); } /** * Creates a new {@link BlobAsyncClient} linked to the {@code snapshot} of this blob resource. * * @param snapshot the identifier for a specific snapshot of this blob * @return a {@link BlobAsyncClient} used to interact with the specific snapshot. */ public BlobAsyncClient getSnapshotClient(String snapshot) { return new BlobAsyncClient(new AzureBlobStorageBuilder() .url(getBlobUrl().toString()) .pipeline(azureBlobStorage.getHttpPipeline()) .build(), snapshot); } /** * Initializes a {@link ContainerAsyncClient} object pointing to the container this blob is in. This method does not * create a container. It simply constructs the client to the container and offers access to methods relevant to * containers. * * @return A {@link ContainerAsyncClient} object pointing to the container containing the blob */ public ContainerAsyncClient getContainerAsyncClient() { BlobURLParts parts = URLParser.parse(getBlobUrl()); return new ContainerAsyncClient(new AzureBlobStorageBuilder() .url(String.format("%s: .pipeline(azureBlobStorage.getHttpPipeline()) .build()); } /** * Gets the URL of the blob represented by this client. * * @return the URL. * @throws RuntimeException If the blob is using a malformed URL. */ public URL getBlobUrl() { try { UrlBuilder urlBuilder = UrlBuilder.parse(azureBlobStorage.getUrl()); if (snapshot != null) { urlBuilder.query("snapshot=" + snapshot); } return urlBuilder.toURL(); } catch (MalformedURLException e) { throw new RuntimeException(String.format("Invalid URL on %s: %s" + getClass().getSimpleName(), azureBlobStorage.getUrl()), e); } } /** * Determines if the blob this client represents exists in the cloud. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.exists} * * @return true if the blob exists, false if it doesn't */ public Mono<Boolean> exists() { return existsWithResponse().flatMap(FluxUtil::toMono); } /** * Determines if the blob this client represents exists in the cloud. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.existsWithResponse} * * @return true if the blob exists, false if it doesn't */ public Mono<Response<Boolean>> existsWithResponse() { return withContext(context -> existsWithResponse(context)); } Mono<Response<Boolean>> existsWithResponse(Context context) { return this.getPropertiesWithResponse(null, context) .map(cp -> (Response<Boolean>) new SimpleResponse<>(cp, true)) .onErrorResume(t -> t instanceof StorageException && ((StorageException) t).statusCode() == 404, t -> { HttpResponse response = ((StorageException) t).response(); return Mono.just(new SimpleResponse<>(response.request(), response.statusCode(), response.headers(), false)); }); } /** * Copies the data at the source URL to a blob. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.startCopyFromURL * * <p>For more information, see the * <a href="https: * * @param sourceURL The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * @return A reactive response containing the copy ID for the long running operation. */ public Mono<String> startCopyFromURL(URL sourceURL) { return startCopyFromURLWithResponse(sourceURL, null, null, null).flatMap(FluxUtil::toMono); } /** * Copies the data at the source URL to a blob. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.startCopyFromURLWithResponse * * <p>For more information, see the * <a href="https: * * @param sourceURL The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * @param metadata {@link Metadata} * @param sourceModifiedAccessConditions {@link ModifiedAccessConditions} against the source. Standard HTTP Access * conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions * related to when the blob was changed relative to the given request. The request will fail if the specified * condition is not satisfied. * @param destAccessConditions {@link BlobAccessConditions} against the destination. * @return A reactive response containing the copy ID for the long running operation. */ public Mono<Response<String>> startCopyFromURLWithResponse(URL sourceURL, Metadata metadata, ModifiedAccessConditions sourceModifiedAccessConditions, BlobAccessConditions destAccessConditions) { return withContext(context -> startCopyFromURLWithResponse(sourceURL, metadata, sourceModifiedAccessConditions, destAccessConditions, context)); } Mono<Response<String>> startCopyFromURLWithResponse(URL sourceURL, Metadata metadata, ModifiedAccessConditions sourceModifiedAccessConditions, BlobAccessConditions destAccessConditions, Context context) { metadata = metadata == null ? * * <p>For more information, see the * <a href="https: * * @param copyId The id of the copy operation to abort. Returned as the {@code copyId} field on the {@link * BlobStartCopyFromURLHeaders} object. * @return A reactive response signalling completion. */ public Mono<Void> abortCopyFromURL(String copyId) { return abortCopyFromURLWithResponse(copyId, null).flatMap(FluxUtil::toMono); } /** * Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.abortCopyFromURLWithResponse * * <p>For more information, see the * <a href="https: * * @param copyId The id of the copy operation to abort. Returned as the {@code copyId} field on the {@link * BlobStartCopyFromURLHeaders} object. * @param leaseAccessConditions By setting lease access conditions, requests will fail if the provided lease does * not match the active lease on the blob. * @return A reactive response signalling completion. */ public Mono<VoidResponse> abortCopyFromURLWithResponse(String copyId, LeaseAccessConditions leaseAccessConditions) { return withContext(context -> abortCopyFromURLWithResponse(copyId, leaseAccessConditions, context)); } Mono<VoidResponse> abortCopyFromURLWithResponse(String copyId, LeaseAccessConditions leaseAccessConditions, Context context) { return postProcessResponse(this.azureBlobStorage.blobs().abortCopyFromURLWithRestResponseAsync( null, null, copyId, null, null, leaseAccessConditions, context)) .map(VoidResponse::new); } /** * Copies the data at the source URL to a blob and waits for the copy to complete before returning a response. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.copyFromURL * * <p>For more information, see the * <a href="https: * * @param copySource The source URL to copy from. * @return A reactive response containing the copy ID for the long running operation. */ public Mono<String> copyFromURL(URL copySource) { return copyFromURLWithResponse(copySource, null, null, null).flatMap(FluxUtil::toMono); } /** * Copies the data at the source URL to a blob and waits for the copy to complete before returning a response. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.copyFromURLWithResponse * * <p>For more information, see the * <a href="https: * * @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * @param metadata {@link Metadata} * @param sourceModifiedAccessConditions {@link ModifiedAccessConditions} against the source. Standard HTTP Access * conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions * related to when the blob was changed relative to the given request. The request will fail if the specified * condition is not satisfied. * @param destAccessConditions {@link BlobAccessConditions} against the destination. * @return A reactive response containing the copy ID for the long running operation. */ public Mono<Response<String>> copyFromURLWithResponse(URL copySource, Metadata metadata, ModifiedAccessConditions sourceModifiedAccessConditions, BlobAccessConditions destAccessConditions) { return withContext(context -> copyFromURLWithResponse(copySource, metadata, sourceModifiedAccessConditions, destAccessConditions, context)); } Mono<Response<String>> copyFromURLWithResponse(URL copySource, Metadata metadata, ModifiedAccessConditions sourceModifiedAccessConditions, BlobAccessConditions destAccessConditions, Context context) { metadata = metadata == null ? new Metadata() : metadata; sourceModifiedAccessConditions = sourceModifiedAccessConditions == null ? new ModifiedAccessConditions() : sourceModifiedAccessConditions; destAccessConditions = destAccessConditions == null ? new BlobAccessConditions() : destAccessConditions; SourceModifiedAccessConditions sourceConditions = new SourceModifiedAccessConditions() .sourceIfModifiedSince(sourceModifiedAccessConditions.ifModifiedSince()) .sourceIfUnmodifiedSince(sourceModifiedAccessConditions.ifUnmodifiedSince()) .sourceIfMatch(sourceModifiedAccessConditions.ifMatch()) .sourceIfNoneMatch(sourceModifiedAccessConditions.ifNoneMatch()); return postProcessResponse(this.azureBlobStorage.blobs().copyFromURLWithRestResponseAsync( null, null, copySource, null, metadata, null, null, null, sourceConditions, destAccessConditions.modifiedAccessConditions(), destAccessConditions.leaseAccessConditions(), context)) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().copyId())); } /** * Reads the entire blob. Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or * {@link AppendBlobClient}. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.download} * * <p>For more information, see the * <a href="https: * * @return A reactive response containing the blob data. */ public Mono<Flux<ByteBuffer>> download() { return downloadWithResponse(null, null, null, false).flatMap(FluxUtil::toMono); } /** * Reads a range of bytes from a blob. Uploading data must be done from the {@link BlockBlobClient}, {@link * PageBlobClient}, or {@link AppendBlobClient}. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.downloadWithResponse * * <p>For more information, see the * <a href="https: * * @param range {@link BlobRange} * @param options {@link ReliableDownloadOptions} * @param accessConditions {@link BlobAccessConditions} * @param rangeGetContentMD5 Whether the contentMD5 for the specified blob range should be returned. * @return A reactive response containing the blob data. */ public Mono<Response<Flux<ByteBuffer>>> downloadWithResponse(BlobRange range, ReliableDownloadOptions options, BlobAccessConditions accessConditions, boolean rangeGetContentMD5) { return withContext(context -> downloadWithResponse(range, options, accessConditions, rangeGetContentMD5, context)); } Mono<Response<Flux<ByteBuffer>>> downloadWithResponse(BlobRange range, ReliableDownloadOptions options, BlobAccessConditions accessConditions, boolean rangeGetContentMD5, Context context) { return download(range, accessConditions, rangeGetContentMD5, context) .map(response -> new SimpleResponse<>( response.rawResponse(), response.body(options).map(ByteBuf::nioBuffer).switchIfEmpty(Flux.just(ByteBuffer.allocate(0))))); } /** * Reads a range of bytes from a blob. The response also includes the blob's properties and metadata. For more * information, see the <a href="https: * <p> * Note that the response body has reliable download functionality built in, meaning that a failed download stream * will be automatically retried. This behavior may be configured with {@link ReliableDownloadOptions}. * * @param range {@link BlobRange} * @param accessConditions {@link BlobAccessConditions} * @param rangeGetContentMD5 Whether the contentMD5 for the specified blob range should be returned. * @return Emits the successful response. * @apiNote * "Sample code for BlobAsyncClient.download")] \n For more samples, please see the [Samples * file](%https: */ Mono<DownloadAsyncResponse> download(BlobRange range, BlobAccessConditions accessConditions, boolean rangeGetContentMD5) { return withContext(context -> download(range, accessConditions, rangeGetContentMD5, context)); } Mono<DownloadAsyncResponse> download(BlobRange range, BlobAccessConditions accessConditions, boolean rangeGetContentMD5, Context context) { range = range == null ? new BlobRange(0) : range; Boolean getMD5 = rangeGetContentMD5 ? rangeGetContentMD5 : null; accessConditions = accessConditions == null ? new BlobAccessConditions() : accessConditions; HTTPGetterInfo info = new HTTPGetterInfo() .offset(range.offset()) .count(range.count()) .eTag(accessConditions.modifiedAccessConditions().ifMatch()); return postProcessResponse(this.azureBlobStorage.blobs().downloadWithRestResponseAsync( null, null, snapshot, null, null, range.toHeaderValue(), getMD5, null, null, null, null, null, accessConditions.leaseAccessConditions(), accessConditions.modifiedAccessConditions(), context)) .map(response -> { info.eTag(response.deserializedHeaders().eTag()); return new DownloadAsyncResponse(response, info, newInfo -> this.download(new BlobRange(newInfo.offset(), newInfo.count()), new BlobAccessConditions().modifiedAccessConditions( new ModifiedAccessConditions().ifMatch(info.eTag())), false, context)); }); } /** * Downloads the entire blob into a file specified by the path. The file will be created if it doesn't exist. * Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link * AppendBlobClient}. * <p> * This method makes an extra HTTP call to get the length of the blob in the beginning. To avoid this extra call, * use the other overload providing the {@link BlobRange} parameter. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.downloadToFile * * <p>For more information, see the * <a href="https: * * @param filePath A non-null {@link OutputStream} instance where the downloaded data will be written. * @return An empty response */ public Mono<Void> downloadToFile(String filePath) { return downloadToFile(filePath, null, BLOB_DEFAULT_DOWNLOAD_BLOCK_SIZE, null, null, false); } /** * Downloads a range of bytes blob into a file specified by the path. The file will be created if it doesn't exist. * Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link * AppendBlobClient}. * <p> * This method makes an extra HTTP call to get the length of the blob in the beginning. To avoid this extra call, * provide the {@link BlobRange} parameter. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.downloadToFile * * <p>For more information, see the * <a href="https: * * @param filePath A non-null {@link OutputStream} instance where the downloaded data will be written. * @param range {@link BlobRange} * @param blockSize the size of a chunk to download at a time, in bytes * @param options {@link ReliableDownloadOptions} * @param accessConditions {@link BlobAccessConditions} * @param rangeGetContentMD5 Whether the contentMD5 for the specified blob range should be returned. * @return An empty response * @throws IllegalArgumentException If {@code blockSize} is less than 0 or greater than 100MB. * @throws UncheckedIOException If an I/O error occurs. */ public Mono<Void> downloadToFile(String filePath, BlobRange range, Integer blockSize, ReliableDownloadOptions options, BlobAccessConditions accessConditions, boolean rangeGetContentMD5) { return withContext(context -> downloadToFile(filePath, range, blockSize, options, accessConditions, rangeGetContentMD5, context)); } Mono<Void> downloadToFile(String filePath, BlobRange range, Integer blockSize, ReliableDownloadOptions options, BlobAccessConditions accessConditions, boolean rangeGetContentMD5, Context context) { if (blockSize < 0 || blockSize > BLOB_MAX_DOWNLOAD_BLOCK_SIZE) { throw new IllegalArgumentException("Block size should not exceed 100MB"); } return Mono.using(() -> downloadToFileResourceSupplier(filePath), channel -> Mono.justOrEmpty(range) .switchIfEmpty(getFullBlobRange(accessConditions)) .flatMapMany(rg -> Flux.fromIterable(sliceBlobRange(rg, blockSize))) .flatMap(chunk -> this.download(chunk, accessConditions, rangeGetContentMD5, context) .subscribeOn(Schedulers.elastic()) .flatMap(dar -> FluxUtil.bytebufStreamToFile(dar.body(options), channel, chunk.offset() - (range == null ? 0 : range.offset())))) .then(), this::downloadToFileCleanup); } private AsynchronousFileChannel downloadToFileResourceSupplier(String filePath) { try { return AsynchronousFileChannel.open(Paths.get(filePath), StandardOpenOption.READ, StandardOpenOption.WRITE); } catch (IOException e) { throw new UncheckedIOException(e); } } private void downloadToFileCleanup(AsynchronousFileChannel channel) { try { channel.close(); } catch (IOException e) { throw new UncheckedIOException(e); } } private Mono<BlobRange> getFullBlobRange(BlobAccessConditions accessConditions) { return getPropertiesWithResponse(accessConditions).map(rb -> new BlobRange(0, rb.value().blobSize())); } private List<BlobRange> sliceBlobRange(BlobRange blobRange, Integer blockSize) { if (blockSize == null) { blockSize = BLOB_DEFAULT_DOWNLOAD_BLOCK_SIZE; } long offset = blobRange.offset(); long length = blobRange.count(); List<BlobRange> chunks = new ArrayList<>(); for (long pos = offset; pos < offset + length; pos += blockSize) { long count = blockSize; if (pos + count > offset + length) { count = offset + length - pos; } chunks.add(new BlobRange(pos, count)); } return chunks; } /** * Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.delete} * * <p>For more information, see the * <a href="https: * * @return A reactive response signalling completion. */ public Mono<Void> delete() { return deleteWithResponse(null, null).flatMap(FluxUtil::toMono); } /** * Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.deleteWithResponse * * <p>For more information, see the * <a href="https: * * @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include} * will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being * deleted, you must pass null. * @param accessConditions {@link BlobAccessConditions} * @return A reactive response signalling completion. */ public Mono<VoidResponse> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions, BlobAccessConditions accessConditions) { return withContext(context -> deleteWithResponse(deleteBlobSnapshotOptions, accessConditions, context)); } Mono<VoidResponse> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions, BlobAccessConditions accessConditions, Context context) { accessConditions = accessConditions == null ? new BlobAccessConditions() : accessConditions; return postProcessResponse(this.azureBlobStorage.blobs().deleteWithRestResponseAsync( null, null, snapshot, null, null, deleteBlobSnapshotOptions, null, accessConditions.leaseAccessConditions(), accessConditions.modifiedAccessConditions(), context)) .map(VoidResponse::new); } /** * Returns the blob's metadata and properties. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.getProperties} * * <p>For more information, see the * <a href="https: * * @return A reactive response containing the blob properties and metadata. */ public Mono<BlobProperties> getProperties() { return getPropertiesWithResponse(null).flatMap(FluxUtil::toMono); } /** * Returns the blob's metadata and properties. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.getPropertiesWithResponse * * <p>For more information, see the * <a href="https: * * @param accessConditions {@link BlobAccessConditions} * @return A reactive response containing the blob properties and metadata. */ public Mono<Response<BlobProperties>> getPropertiesWithResponse(BlobAccessConditions accessConditions) { return withContext(context -> getPropertiesWithResponse(accessConditions, context)); } Mono<Response<BlobProperties>> getPropertiesWithResponse(BlobAccessConditions accessConditions, Context context) { accessConditions = accessConditions == null ? new BlobAccessConditions() : accessConditions; return postProcessResponse(this.azureBlobStorage.blobs().getPropertiesWithRestResponseAsync( null, null, snapshot, null, null, null, null, null, null, accessConditions.leaseAccessConditions(), accessConditions.modifiedAccessConditions(), context)) .map(rb -> new SimpleResponse<>(rb, new BlobProperties(rb.deserializedHeaders()))); } /** * Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In * order to preserve existing values, they must be passed alongside the header being changed. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.setHTTPHeaders * * <p>For more information, see the * <a href="https: * * @param headers {@link BlobHTTPHeaders} * @return A reactive response signalling completion. */ public Mono<Void> setHTTPHeaders(BlobHTTPHeaders headers) { return setHTTPHeadersWithResponse(headers, null).flatMap(FluxUtil::toMono); } /** * Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In * order to preserve existing values, they must be passed alongside the header being changed. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.setHTTPHeadersWithResponse * * <p>For more information, see the * <a href="https: * * @param headers {@link BlobHTTPHeaders} * @param accessConditions {@link BlobAccessConditions} * @return A reactive response signalling completion. */ public Mono<VoidResponse> setHTTPHeadersWithResponse(BlobHTTPHeaders headers, BlobAccessConditions accessConditions) { return withContext(context -> setHTTPHeadersWithResponse(headers, accessConditions, context)); } Mono<VoidResponse> setHTTPHeadersWithResponse(BlobHTTPHeaders headers, BlobAccessConditions accessConditions, Context context) { accessConditions = accessConditions == null ? new BlobAccessConditions() : accessConditions; return postProcessResponse(this.azureBlobStorage.blobs().setHTTPHeadersWithRestResponseAsync( null, null, null, null, headers, accessConditions.leaseAccessConditions(), accessConditions.modifiedAccessConditions(), context)) .map(VoidResponse::new); } /** * Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values * must be preserved, they must be downloaded and included in the call to this method. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.setMetadata * * <p>For more information, see the * <a href="https: * * @param metadata {@link Metadata} * @return A reactive response signalling completion. */ public Mono<Void> setMetadata(Metadata metadata) { return setMetadataWithResponse(metadata, null).flatMap(FluxUtil::toMono); } /** * Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values * must be preserved, they must be downloaded and included in the call to this method. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.setMetadataWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata {@link Metadata} * @param accessConditions {@link BlobAccessConditions} * @return A reactive response signalling completion. */ public Mono<VoidResponse> setMetadataWithResponse(Metadata metadata, BlobAccessConditions accessConditions) { return withContext(context -> setMetadataWithResponse(metadata, accessConditions, context)); } Mono<VoidResponse> setMetadataWithResponse(Metadata metadata, BlobAccessConditions accessConditions, Context context) { metadata = metadata == null ? new Metadata() : metadata; accessConditions = accessConditions == null ? new BlobAccessConditions() : accessConditions; return postProcessResponse(this.azureBlobStorage.blobs().setMetadataWithRestResponseAsync( null, null, null, metadata, null, null, null, null, accessConditions.leaseAccessConditions(), null, accessConditions.modifiedAccessConditions(), context)) .map(VoidResponse::new); } /** * Creates a read-only snapshot of the blob. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.createSnapshot} * * <p>For more information, see the * <a href="https: * * @return A response containing a {@link BlobAsyncClient} which is used to interact with the created snapshot, use * {@link BlobAsyncClient */ public Mono<BlobAsyncClient> createSnapshot() { return createSnapshotWithResponse(null, null).flatMap(FluxUtil::toMono); } /** * Creates a read-only snapshot of the blob. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.createSnapshotWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata {@link Metadata} * @param accessConditions {@link BlobAccessConditions} * @return A response containing a {@link BlobAsyncClient} which is used to interact with the created snapshot, use * {@link BlobAsyncClient */ public Mono<Response<BlobAsyncClient>> createSnapshotWithResponse(Metadata metadata, BlobAccessConditions accessConditions) { return withContext(context -> createSnapshotWithResponse(metadata, accessConditions, context)); } Mono<Response<BlobAsyncClient>> createSnapshotWithResponse(Metadata metadata, BlobAccessConditions accessConditions, Context context) { metadata = metadata == null ? new Metadata() : metadata; accessConditions = accessConditions == null ? new BlobAccessConditions() : accessConditions; return postProcessResponse(this.azureBlobStorage.blobs().createSnapshotWithRestResponseAsync( null, null, null, metadata, null, null, null, null, null, accessConditions.modifiedAccessConditions(), accessConditions.leaseAccessConditions(), context)) .map(rb -> new SimpleResponse<>(rb, this.getSnapshotClient(rb.deserializedHeaders().snapshot()))); } /** * Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in * a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's * etag. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.setTier * * <p>For more information, see the * <a href="https: * * @param tier The new tier for the blob. * @return A reactive response signalling completion. */ public Mono<Void> setTier(AccessTier tier) { return setTierWithResponse(tier, null).flatMap(FluxUtil::toMono); } /** * Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in * a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's * etag. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.setTierWithResponse * * <p>For more information, see the * <a href="https: * * @param tier The new tier for the blob. * @param leaseAccessConditions By setting lease access conditions, requests will fail if the provided lease does * not match the active lease on the blob. * @return A reactive response signalling completion. */ public Mono<VoidResponse> setTierWithResponse(AccessTier tier, LeaseAccessConditions leaseAccessConditions) { return withContext(context -> setTierWithResponse(tier, leaseAccessConditions, context)); } Mono<VoidResponse> setTierWithResponse(AccessTier tier, LeaseAccessConditions leaseAccessConditions, Context context) { Utility.assertNotNull("tier", tier); AccessTierRequired accessTierRequired = AccessTierRequired.fromString(tier.toString()); return postProcessResponse(this.azureBlobStorage.blobs().setTierWithRestResponseAsync( null, null, accessTierRequired, null, null, null, leaseAccessConditions, context)) .map(VoidResponse::new); } /** * Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.undelete} * * <p>For more information, see the * <a href="https: * * @return A reactive response signalling completion. */ public Mono<Void> undelete() { return undeleteWithResponse().flatMap(FluxUtil::toMono); } /** * Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.undeleteWithResponse} * * <p>For more information, see the * <a href="https: * * @return A reactive response signalling completion. */ public Mono<VoidResponse> undeleteWithResponse() { return withContext(context -> undeleteWithResponse(context)); } Mono<VoidResponse> undeleteWithResponse(Context context) { return postProcessResponse(this.azureBlobStorage.blobs().undeleteWithRestResponseAsync(null, null, context)) .map(VoidResponse::new); } /** * Acquires a lease on the blob for write and delete operations. The lease duration must be between 15 to 60 * seconds, or infinite (-1). * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.acquireLease * * <p>For more information, see the * <a href="https: * * @param proposedId A {@code String} in any valid GUID format. May be null. * @param duration The duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A * non-infinite lease can be between 15 and 60 seconds. * @return A reactive response containing the lease ID. */ public Mono<String> acquireLease(String proposedId, int duration) { return acquireLeaseWithResponse(proposedId, duration, null).flatMap(FluxUtil::toMono); } /** * Acquires a lease on the blob for write and delete operations. The lease duration must be between 15 to 60 * seconds, or infinite (-1). * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.acquireLeaseWithResponse * * <p>For more information, see the * <a href="https: * * @param proposedId A {@code String} in any valid GUID format. May be null. * @param duration The duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A * non-infinite lease can be between 15 and 60 seconds. * @param modifiedAccessConditions Standard HTTP Access conditions related to the modification of data. ETag and * LastModifiedTime are used to construct conditions related to when the blob was changed relative to the given * request. The request will fail if the specified condition is not satisfied. * @return A reactive response containing the lease ID. * @throws IllegalArgumentException If {@code duration} is outside the bounds of 15 to 60 or isn't -1. */ public Mono<Response<String>> acquireLeaseWithResponse(String proposedId, int duration, ModifiedAccessConditions modifiedAccessConditions) { return withContext(context -> acquireLeaseWithResponse(proposedId, duration, modifiedAccessConditions, context)); } Mono<Response<String>> acquireLeaseWithResponse(String proposedId, int duration, ModifiedAccessConditions modifiedAccessConditions, Context context) { if (!(duration == -1 || (duration >= 15 && duration <= 60))) { throw new IllegalArgumentException("Duration must be -1 or between 15 and 60."); } return postProcessResponse(this.azureBlobStorage.blobs().acquireLeaseWithRestResponseAsync( null, null, null, duration, proposedId, null, modifiedAccessConditions, context)) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().leaseId())); } /** * Renews the blob's previously-acquired lease. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.renewLease * * <p>For more information, see the * <a href="https: * * @param leaseId The leaseId of the active lease on the blob. * @return A reactive response containing the renewed lease ID. */ public Mono<String> renewLease(String leaseId) { return renewLeaseWithResponse(leaseId, null).flatMap(FluxUtil::toMono); } /** * Renews the blob's previously-acquired lease. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.renewLeaseWithResponse * * <p>For more information, see the * <a href="https: * * @param leaseId The leaseId of the active lease on the blob. * @param modifiedAccessConditions Standard HTTP Access conditions related to the modification of data. ETag and * LastModifiedTime are used to construct conditions related to when the blob was changed relative to the given * request. The request will fail if the specified condition is not satisfied. * @return A reactive response containing the renewed lease ID. */ public Mono<Response<String>> renewLeaseWithResponse(String leaseId, ModifiedAccessConditions modifiedAccessConditions) { return withContext(context -> renewLeaseWithResponse(leaseId, modifiedAccessConditions, context)); } Mono<Response<String>> renewLeaseWithResponse(String leaseId, ModifiedAccessConditions modifiedAccessConditions, Context context) { return postProcessResponse(this.azureBlobStorage.blobs().renewLeaseWithRestResponseAsync(null, null, leaseId, null, null, modifiedAccessConditions, context)) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().leaseId())); } /** * Releases the blob's previously-acquired lease. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.releaseLease * * <p>For more information, see the * <a href="https: * * @param leaseId The leaseId of the active lease on the blob. * @return A reactive response signalling completion. */ public Mono<Void> releaseLease(String leaseId) { return releaseLeaseWithResponse(leaseId, null).flatMap(FluxUtil::toMono); } /** * Releases the blob's previously-acquired lease. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.releaseLeaseWithResponse * * <p>For more information, see the * <a href="https: * * @param leaseId The leaseId of the active lease on the blob. * @param modifiedAccessConditions Standard HTTP Access conditions related to the modification of data. ETag and * LastModifiedTime are used to construct conditions related to when the blob was changed relative to the given * request. The request will fail if the specified condition is not satisfied. * @return A reactive response signalling completion. */ public Mono<VoidResponse> releaseLeaseWithResponse(String leaseId, ModifiedAccessConditions modifiedAccessConditions) { return withContext(context -> releaseLeaseWithResponse(leaseId, modifiedAccessConditions, context)); } Mono<VoidResponse> releaseLeaseWithResponse(String leaseId, ModifiedAccessConditions modifiedAccessConditions, Context context) { return postProcessResponse(this.azureBlobStorage.blobs().releaseLeaseWithRestResponseAsync(null, null, leaseId, null, null, modifiedAccessConditions, context)) .map(VoidResponse::new); } /** * BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1) constant * to break a fixed-duration lease when it expires or an infinite lease immediately. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.breakLease} * * <p>For more information, see the * <a href="https: * * @return A reactive response containing the remaining time in the broken lease in seconds. */ public Mono<Integer> breakLease() { return breakLeaseWithResponse(null, null).flatMap(FluxUtil::toMono); } /** * BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1) constant * to break a fixed-duration lease when it expires or an infinite lease immediately. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.breakLeaseWithResponse * * <p>For more information, see the * <a href="https: * * @param breakPeriodInSeconds An optional {@code Integer} representing the proposed duration of seconds that the * lease should continue before it is broken, between 0 and 60 seconds. This break period is only used if it is * shorter than the time remaining on the lease. If longer, the time remaining on the lease is used. A new lease * will not be available before the break period has expired, but the lease may be held for longer than the break * period. * @param modifiedAccessConditions Standard HTTP Access conditions related to the modification of data. ETag and * LastModifiedTime are used to construct conditions related to when the blob was changed relative to the given * request. The request will fail if the specified condition is not satisfied. * @return A reactive response containing the remaining time in the broken lease in seconds. */ public Mono<Response<Integer>> breakLeaseWithResponse(Integer breakPeriodInSeconds, ModifiedAccessConditions modifiedAccessConditions) { return withContext(context -> breakLeaseWithResponse(breakPeriodInSeconds, modifiedAccessConditions, context)); } Mono<Response<Integer>> breakLeaseWithResponse(Integer breakPeriodInSeconds, ModifiedAccessConditions modifiedAccessConditions, Context context) { return postProcessResponse(this.azureBlobStorage.blobs().breakLeaseWithRestResponseAsync(null, null, null, breakPeriodInSeconds, null, modifiedAccessConditions, context)) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().leaseTime())); } /** * ChangeLease changes the blob's lease ID. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.changeLease * * <p>For more information, see the * <a href="https: * * @param leaseId The leaseId of the active lease on the blob. * @param proposedId A {@code String} in any valid GUID format. * @return A reactive response containing the new lease ID. */ public Mono<String> changeLease(String leaseId, String proposedId) { return changeLeaseWithResponse(leaseId, proposedId, null).flatMap(FluxUtil::toMono); } /** * ChangeLease changes the blob's lease ID. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.changeLeaseWithResponse * * <p>For more information, see the * <a href="https: * * @param leaseId The leaseId of the active lease on the blob. * @param proposedId A {@code String} in any valid GUID format. * @param modifiedAccessConditions Standard HTTP Access conditions related to the modification of data. ETag and * LastModifiedTime are used to construct conditions related to when the blob was changed relative to the given * request. The request will fail if the specified condition is not satisfied. * @return A reactive response containing the new lease ID. */ public Mono<Response<String>> changeLeaseWithResponse(String leaseId, String proposedId, ModifiedAccessConditions modifiedAccessConditions) { return withContext(context -> changeLeaseWithResponse(leaseId, proposedId, modifiedAccessConditions, context)); } Mono<Response<String>> changeLeaseWithResponse(String leaseId, String proposedId, ModifiedAccessConditions modifiedAccessConditions, Context context) { return postProcessResponse(this.azureBlobStorage.blobs().changeLeaseWithRestResponseAsync(null, null, leaseId, proposedId, null, null, modifiedAccessConditions, context)) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().leaseId())); } /** * Returns the sku name and account kind for the account. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.getAccountInfo} * * <p>For more information, see the * <a href="https: * * @return a reactor response containing the sku name and account kind. */ public Mono<StorageAccountInfo> getAccountInfo() { return getAccountInfoWithResponse().flatMap(FluxUtil::toMono); } /** * Returns the sku name and account kind for the account. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.BlobAsyncClient.getAccountInfo} * * <p>For more information, see the * <a href="https: * * @return a reactor response containing the sku name and account kind. */ public Mono<Response<StorageAccountInfo>> getAccountInfoWithResponse() { return withContext(context -> getAccountInfoWithResponse(context)); } Mono<Response<StorageAccountInfo>> getAccountInfoWithResponse(Context context) { return postProcessResponse( this.azureBlobStorage.blobs().getAccountInfoWithRestResponseAsync(null, null, context)) .map(rb -> new SimpleResponse<>(rb, new StorageAccountInfo(rb.deserializedHeaders()))); } /** * Generates a user delegation SAS with the specified parameters * * @param userDelegationKey The {@code UserDelegationKey} user delegation key for the SAS * @param accountName The {@code String} account name for the SAS * @param permissions The {@code ContainerSASPermissions} permission for the SAS * @param expiryTime The {@code OffsetDateTime} expiry time for the SAS * @return A string that represents the SAS token */ public String generateUserDelegationSAS(UserDelegationKey userDelegationKey, String accountName, BlobSASPermission permissions, OffsetDateTime expiryTime) { return this.generateUserDelegationSAS(userDelegationKey, accountName, permissions, expiryTime, null /* startTime */, null /* version */, null /*sasProtocol */, null /* ipRange */, null /* cacheControl */, null /*contentDisposition */, null /* contentEncoding */, null /* contentLanguage */, null /* contentType */); } /** * Generates a user delegation SAS token with the specified parameters * * @param userDelegationKey The {@code UserDelegationKey} user delegation key for the SAS * @param accountName The {@code String} account name for the SAS * @param permissions The {@code ContainerSASPermissions} permission for the SAS * @param expiryTime The {@code OffsetDateTime} expiry time for the SAS * @param startTime An optional {@code OffsetDateTime} start time for the SAS * @param version An optional {@code String} version for the SAS * @param sasProtocol An optional {@code SASProtocol} protocol for the SAS * @param ipRange An optional {@code IPRange} ip address range for the SAS * @return A string that represents the SAS token */ public String generateUserDelegationSAS(UserDelegationKey userDelegationKey, String accountName, BlobSASPermission permissions, OffsetDateTime expiryTime, OffsetDateTime startTime, String version, SASProtocol sasProtocol, IPRange ipRange) { return this.generateUserDelegationSAS(userDelegationKey, accountName, permissions, expiryTime, startTime, version, sasProtocol, ipRange, null /* cacheControl */, null /* contentDisposition */, null /* contentEncoding */, null /* contentLanguage */, null /* contentType */); } /** * Generates a user delegation SAS token with the specified parameters * * @param userDelegationKey The {@code UserDelegationKey} user delegation key for the SAS * @param accountName The {@code String} account name for the SAS * @param permissions The {@code ContainerSASPermissions} permission for the SAS * @param expiryTime The {@code OffsetDateTime} expiry time for the SAS * @param startTime An optional {@code OffsetDateTime} start time for the SAS * @param version An optional {@code String} version for the SAS * @param sasProtocol An optional {@code SASProtocol} protocol for the SAS * @param ipRange An optional {@code IPRange} ip address range for the SAS * @param cacheControl An optional {@code String} cache-control header for the SAS. * @param contentDisposition An optional {@code String} content-disposition header for the SAS. * @param contentEncoding An optional {@code String} content-encoding header for the SAS. * @param contentLanguage An optional {@code String} content-language header for the SAS. * @param contentType An optional {@code String} content-type header for the SAS. * @return A string that represents the SAS token */ public String generateUserDelegationSAS(UserDelegationKey userDelegationKey, String accountName, BlobSASPermission permissions, OffsetDateTime expiryTime, OffsetDateTime startTime, String version, SASProtocol sasProtocol, IPRange ipRange, String cacheControl, String contentDisposition, String contentEncoding, String contentLanguage, String contentType) { ServiceSASSignatureValues serviceSASSignatureValues = new ServiceSASSignatureValues(version, sasProtocol, startTime, expiryTime, permissions == null ? null : permissions.toString(), ipRange, null /* identifier*/, cacheControl, contentDisposition, contentEncoding, contentLanguage, contentType); ServiceSASSignatureValues values = configureServiceSASSignatureValues(serviceSASSignatureValues, accountName); SASQueryParameters sasQueryParameters = values.generateSASQueryParameters(userDelegationKey); return sasQueryParameters.encode(); } /** * Generates a SAS token with the specified parameters * * @param permissions The {@code ContainerSASPermissions} permission for the SAS * @param expiryTime The {@code OffsetDateTime} expiry time for the SAS * @return A string that represents the SAS token */ public String generateSAS(BlobSASPermission permissions, OffsetDateTime expiryTime) { return this.generateSAS(null, permissions, expiryTime, null /* startTime */, /* identifier */ null /* version */, null /* sasProtocol */, null /* ipRange */, null /* cacheControl */, null /* contentLanguage*/, null /* contentEncoding */, null /* contentLanguage */, null /* contentType */); } /** * Generates a SAS token with the specified parameters * * @param identifier The {@code String} name of the access policy on the container this SAS references if any * @return A string that represents the SAS token */ public String generateSAS(String identifier) { return this.generateSAS(identifier, null /* permissions */, null /* expiryTime */, null /* startTime */, null /* version */, null /* sasProtocol */, null /* ipRange */, null /* cacheControl */, null /* contentLanguage*/, null /* contentEncoding */, null /* contentLanguage */, null /* contentType */); } /** * Generates a SAS token with the specified parameters * * @param identifier The {@code String} name of the access policy on the container this SAS references if any * @param permissions The {@code ContainerSASPermissions} permission for the SAS * @param expiryTime The {@code OffsetDateTime} expiry time for the SAS * @param startTime An optional {@code OffsetDateTime} start time for the SAS * @param version An optional {@code String} version for the SAS * @param sasProtocol An optional {@code SASProtocol} protocol for the SAS * @param ipRange An optional {@code IPRange} ip address range for the SAS * @return A string that represents the SAS token */ public String generateSAS(String identifier, BlobSASPermission permissions, OffsetDateTime expiryTime, OffsetDateTime startTime, String version, SASProtocol sasProtocol, IPRange ipRange) { return this.generateSAS(identifier, permissions, expiryTime, startTime, version, sasProtocol, ipRange, null /* cacheControl */, null /* contentLanguage*/, null /* contentEncoding */, null /* contentLanguage */, null /* contentType */); } /** * Generates a SAS token with the specified parameters * * @param identifier The {@code String} name of the access policy on the container this SAS references if any * @param permissions The {@code ContainerSASPermissions} permission for the SAS * @param expiryTime The {@code OffsetDateTime} expiry time for the SAS * @param startTime An optional {@code OffsetDateTime} start time for the SAS * @param version An optional {@code String} version for the SAS * @param sasProtocol An optional {@code SASProtocol} protocol for the SAS * @param ipRange An optional {@code IPRange} ip address range for the SAS * @param cacheControl An optional {@code String} cache-control header for the SAS. * @param contentDisposition An optional {@code String} content-disposition header for the SAS. * @param contentEncoding An optional {@code String} content-encoding header for the SAS. * @param contentLanguage An optional {@code String} content-language header for the SAS. * @param contentType An optional {@code String} content-type header for the SAS. * @return A string that represents the SAS token */ public String generateSAS(String identifier, BlobSASPermission permissions, OffsetDateTime expiryTime, OffsetDateTime startTime, String version, SASProtocol sasProtocol, IPRange ipRange, String cacheControl, String contentDisposition, String contentEncoding, String contentLanguage, String contentType) { ServiceSASSignatureValues serviceSASSignatureValues = new ServiceSASSignatureValues(version, sasProtocol, startTime, expiryTime, permissions == null ? null : permissions.toString(), ipRange, identifier, cacheControl, contentDisposition, contentEncoding, contentLanguage, contentType); SharedKeyCredential sharedKeyCredential = Utility.getSharedKeyCredential(this.azureBlobStorage.getHttpPipeline()); Utility.assertNotNull("sharedKeyCredential", sharedKeyCredential); ServiceSASSignatureValues values = configureServiceSASSignatureValues(serviceSASSignatureValues, sharedKeyCredential.accountName()); SASQueryParameters sasQueryParameters = values.generateSASQueryParameters(sharedKeyCredential); return sasQueryParameters.encode(); } /** * Sets serviceSASSignatureValues parameters dependent on the current blob type */ ServiceSASSignatureValues configureServiceSASSignatureValues(ServiceSASSignatureValues serviceSASSignatureValues, String accountName) { serviceSASSignatureValues.canonicalName(this.azureBlobStorage.getUrl(), accountName); serviceSASSignatureValues.snapshotId(getSnapshotId()); if (isSnapshot()) { serviceSASSignatureValues.resource(Constants.UrlConstants.SAS_BLOB_SNAPSHOT_CONSTANT); } else { serviceSASSignatureValues.resource(Constants.UrlConstants.SAS_BLOB_CONSTANT); } return serviceSASSignatureValues; } /** * Gets the snapshotId for a blob resource * * @return A string that represents the snapshotId of the snapshot blob */ public String getSnapshotId() { return this.snapshot; } /** * Determines if a blob is a snapshot * * @return A boolean that indicates if a blob is a snapshot */ public boolean isSnapshot() { return this.snapshot != null; } }
Would we want to make the mapping function a function on the class? ``` private PagedResponseBase<ServiceListContainersSegmentHeaders, ContainerItem> mapContainerListing(ServicesListContainersSegmentResponse response) { return new PagedResponseBase<>(response.request(), response.statusCode(), response.headers(), response.value().containerItems(), response.value().nextMarker(), response.deserializedHeaders()); } ```
public PagedFlux<ContainerItem> listContainers(ListContainersOptions options) { return new PagedFlux<>( () -> listContainersSegment(null, options) .map(response -> new PagedResponseBase<>( response.request(), response.statusCode(), response.headers(), response.value().containerItems(), response.value().nextMarker(), response.deserializedHeaders())), (marker) -> listContainersSegment(marker, options) .map(response -> new PagedResponseBase<>( response.request(), response.statusCode(), response.headers(), response.value().containerItems(), response.value().nextMarker(), response.deserializedHeaders()))); }
.map(response -> new PagedResponseBase<>(
public PagedFlux<ContainerItem> listContainers(ListContainersOptions options) { return listContainersWithOptionalTimeout(options, null); }
class BlobServiceAsyncClient { private final AzureBlobStorageImpl azureBlobStorage; /** * Package-private constructor for use by {@link BlobServiceClientBuilder}. * * @param azureBlobStorageBuilder the API client builder for blob storage API */ BlobServiceAsyncClient(AzureBlobStorageBuilder azureBlobStorageBuilder) { this.azureBlobStorage = azureBlobStorageBuilder.build(); } /** * Initializes a {@link ContainerAsyncClient} object pointing to the specified container. This method does not * create a container. It simply constructs the URL to the container and offers access to methods relevant to * containers. * * @param containerName The name of the container to point to. * @return A {@link ContainerAsyncClient} object pointing to the specified container */ public ContainerAsyncClient getContainerAsyncClient(String containerName) { return new ContainerAsyncClient(new AzureBlobStorageBuilder() .url(Utility.appendToURLPath(getAccountUrl(), containerName).toString()) .pipeline(azureBlobStorage.getHttpPipeline())); } /** * Creates a new container within a storage account. If a container with the same name already exists, the operation * fails. For more information, see the * <a href="https: * * @param containerName Name of the container to create * @return A response containing a {@link ContainerAsyncClient} used to interact with the container created. */ public Mono<Response<ContainerAsyncClient>> createContainer(String containerName) { return createContainer(containerName, null, null); } /** * Creates a new container within a storage account. If a container with the same name already exists, the operation * fails. For more information, see the * <a href="https: * * @param containerName Name of the container to create * @param metadata {@link Metadata} * @param accessType Specifies how the data in this container is available to the public. See the * x-ms-blob-public-access header in the Azure Docs for more information. Pass null for no public access. * @return A response containing a {@link ContainerAsyncClient} used to interact with the container created. */ public Mono<Response<ContainerAsyncClient>> createContainer(String containerName, Metadata metadata, PublicAccessType accessType) { ContainerAsyncClient containerAsyncClient = getContainerAsyncClient(containerName); return containerAsyncClient.create(metadata, accessType) .map(response -> new SimpleResponse<>(response, containerAsyncClient)); } /** * Deletes the specified container in the storage account. If the container doesn't exist the operation fails. For * more information see the <a href="https: * * @param containerName Name of the container to delete * @return A response containing status code and HTTP headers */ public Mono<VoidResponse> deleteContainer(String containerName) { return getContainerAsyncClient(containerName).delete(); } /** * Gets the URL of the storage account represented by this client. * * @return the URL. * @throws RuntimeException If the account URL is malformed. */ public URL getAccountUrl() { try { return new URL(azureBlobStorage.getUrl()); } catch (MalformedURLException e) { throw new RuntimeException(String.format("Invalid URL on %s: %s" + getClass().getSimpleName(), azureBlobStorage.getUrl()), e); } } /** * Returns a reactive Publisher emitting all the containers in this account lazily as needed. For more information, * see the <a href="https: * * @return A reactive response emitting the list of containers. */ public PagedFlux<ContainerItem> listContainers() { return this.listContainers(new ListContainersOptions()); } /** * Returns a reactive Publisher emitting all the containers in this account lazily as needed. For more information, * see the <a href="https: * * @param options A {@link ListContainersOptions} which specifies what data should be returned by the service. * @return A reactive response emitting the list of containers. */ /* * Returns a Mono segment of containers starting from the specified Marker. * Use an empty marker to start enumeration from the beginning. Container names are returned in lexicographic order. * After getting a segment, process it, and then call ListContainers again (passing the the previously-returned * Marker) to get the next segment. For more information, see * the <a href="https: * * @param marker * Identifies the portion of the list to be returned with the next list operation. * This value is returned in the response of a previous list operation as the * ListContainersSegmentResponse.body().nextMarker(). Set to null to list the first segment. * @param options * A {@link ListContainersOptions} which specifies what data should be returned by the service. * * @return Emits the successful response. * * @apiNote * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=service_list "Sample code for ServiceURL.listContainersSegment")] \n * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=service_list_helper "Helper code for ServiceURL.listContainersSegment")] \n * For more samples, please see the [Samples file](%https: */ private Mono<ServicesListContainersSegmentResponse> listContainersSegment(String marker, ListContainersOptions options) { options = options == null ? new ListContainersOptions() : options; return postProcessResponse( this.azureBlobStorage.services().listContainersSegmentWithRestResponseAsync( options.prefix(), marker, options.maxResults(), options.details().toIncludeType(), null, null, Context.NONE)); } /** * Gets the properties of a storage account’s Blob service. For more information, see the * <a href="https: * * @return A reactive response containing the storage account properties. */ public Mono<Response<StorageServiceProperties>> getProperties() { return postProcessResponse( this.azureBlobStorage.services().getPropertiesWithRestResponseAsync(null, null, Context.NONE)) .map(rb -> new SimpleResponse<>(rb, rb.value())); } /** * Sets properties for a storage account's Blob service endpoint. For more information, see the * <a href="https: * Note that setting the default service version has no effect when using this client because this client explicitly * sets the version header on each request, overriding the default. * * @param properties Configures the service. * @return A reactive response containing the storage account properties. */ public Mono<VoidResponse> setProperties(StorageServiceProperties properties) { return postProcessResponse( this.azureBlobStorage.services().setPropertiesWithRestResponseAsync(properties, null, null, Context.NONE)) .map(VoidResponse::new); } /** * Gets a user delegation key for use with this account's blob storage. Note: This method call is only valid when * using {@link TokenCredential} in this object's {@link HttpPipeline}. * * @param start Start time for the key's validity. Null indicates immediate start. * @param expiry Expiration of the key's validity. * @return A reactive response containing the user delegation key. * @throws IllegalArgumentException If {@code start} isn't null and is after {@code expiry}. */ public Mono<Response<UserDelegationKey>> getUserDelegationKey(OffsetDateTime start, OffsetDateTime expiry) { Utility.assertNotNull("expiry", expiry); if (start != null && !start.isBefore(expiry)) { throw new IllegalArgumentException("`start` must be null or a datetime before `expiry`."); } return postProcessResponse( this.azureBlobStorage.services().getUserDelegationKeyWithRestResponseAsync( new KeyInfo() .start(start == null ? "" : Utility.ISO_8601_UTC_DATE_FORMATTER.format(start)) .expiry(Utility.ISO_8601_UTC_DATE_FORMATTER.format(expiry)), null, null, Context.NONE) ).map(rb -> new SimpleResponse<>(rb, rb.value())); } /** * Retrieves statistics related to replication for the Blob service. It is only available on the secondary location * endpoint when read-access geo-redundant replication is enabled for the storage account. For more information, see * the * <a href="https: * * @return A reactive response containing the storage account statistics. */ public Mono<Response<StorageServiceStats>> getStatistics() { return postProcessResponse( this.azureBlobStorage.services().getStatisticsWithRestResponseAsync(null, null, Context.NONE)) .map(rb -> new SimpleResponse<>(rb, rb.value())); } /** * Returns the sku name and account kind for the account. For more information, please see the * <a href="https: * * @return A reactive response containing the storage account info. */ public Mono<Response<StorageAccountInfo>> getAccountInfo() { return postProcessResponse(this.azureBlobStorage.services().getAccountInfoWithRestResponseAsync(Context.NONE)) .map(rb -> new SimpleResponse<>(rb, new StorageAccountInfo(rb.deserializedHeaders()))); } /** * Generates an account SAS token with the specified parameters * * @param accountSASService The {@code AccountSASService} services for the account SAS * @param accountSASResourceType An optional {@code AccountSASResourceType} resources for the account SAS * @param accountSASPermission The {@code AccountSASPermission} permission for the account SAS * @param expiryTime The {@code OffsetDateTime} expiry time for the account SAS * @return A string that represents the SAS token */ public String generateAccountSAS(AccountSASService accountSASService, AccountSASResourceType accountSASResourceType, AccountSASPermission accountSASPermission, OffsetDateTime expiryTime) { return this.generateAccountSAS(accountSASService, accountSASResourceType, accountSASPermission, expiryTime, null /* startTime */, null /* version */, null /* ipRange */, null /* sasProtocol */); } /** * Generates an account SAS token with the specified parameters * * @param accountSASService The {@code AccountSASService} services for the account SAS * @param accountSASResourceType An optional {@code AccountSASResourceType} resources for the account SAS * @param accountSASPermission The {@code AccountSASPermission} permission for the account SAS * @param expiryTime The {@code OffsetDateTime} expiry time for the account SAS * @param startTime The {@code OffsetDateTime} start time for the account SAS * @param version The {@code String} version for the account SAS * @param ipRange An optional {@code IPRange} ip address range for the SAS * @param sasProtocol An optional {@code SASProtocol} protocol for the SAS * @return A string that represents the SAS token */ public String generateAccountSAS(AccountSASService accountSASService, AccountSASResourceType accountSASResourceType, AccountSASPermission accountSASPermission, OffsetDateTime expiryTime, OffsetDateTime startTime, String version, IPRange ipRange, SASProtocol sasProtocol) { AccountSASSignatureValues accountSASSignatureValues = new AccountSASSignatureValues(); accountSASSignatureValues.services(accountSASService == null ? null : accountSASService.toString()); accountSASSignatureValues.resourceTypes(accountSASResourceType == null ? null : accountSASResourceType.toString()); accountSASSignatureValues.permissions(accountSASPermission == null ? null : accountSASPermission.toString()); accountSASSignatureValues.expiryTime(expiryTime); accountSASSignatureValues.startTime(startTime); if (version != null) { accountSASSignatureValues.version(version); } accountSASSignatureValues.ipRange(ipRange); accountSASSignatureValues.protocol(sasProtocol); SharedKeyCredential sharedKeyCredential = Utility.getSharedKeyCredential(this.azureBlobStorage.getHttpPipeline()); SASQueryParameters sasQueryParameters = accountSASSignatureValues.generateSASQueryParameters(sharedKeyCredential); return sasQueryParameters.encode(); } }
class BlobServiceAsyncClient { private final ClientLogger logger = new ClientLogger(BlobServiceAsyncClient.class); private final AzureBlobStorageImpl azureBlobStorage; /** * Package-private constructor for use by {@link BlobServiceClientBuilder}. * * @param azureBlobStorage the API client for blob storage */ BlobServiceAsyncClient(AzureBlobStorageImpl azureBlobStorage) { this.azureBlobStorage = azureBlobStorage; } /** * Initializes a {@link ContainerAsyncClient} object pointing to the specified container. This method does not * create a container. It simply constructs the URL to the container and offers access to methods relevant to * containers. * * @param containerName The name of the container to point to. * @return A {@link ContainerAsyncClient} object pointing to the specified container */ public ContainerAsyncClient getContainerAsyncClient(String containerName) { return new ContainerAsyncClient(new AzureBlobStorageBuilder() .url(Utility.appendToURLPath(getAccountUrl(), containerName).toString()) .pipeline(azureBlobStorage.getHttpPipeline()) .build()); } /** * Creates a new container within a storage account. If a container with the same name already exists, the operation * fails. For more information, see the * <a href="https: * * @param containerName Name of the container to create * @return A {@link Mono} containing a {@link ContainerAsyncClient} used to interact with the container created. */ public Mono<ContainerAsyncClient> createContainer(String containerName) { return createContainerWithResponse(containerName, null, null).flatMap(FluxUtil::toMono); } /** * Creates a new container within a storage account. If a container with the same name already exists, the operation * fails. For more information, see the * <a href="https: * * @param containerName Name of the container to create * @param metadata {@link Metadata} * @param accessType Specifies how the data in this container is available to the public. See the * x-ms-blob-public-access header in the Azure Docs for more information. Pass null for no public access. * @return A {@link Mono} containing a {@link Response} whose {@link Response * ContainerAsyncClient} used to interact with the container created. */ public Mono<Response<ContainerAsyncClient>> createContainerWithResponse(String containerName, Metadata metadata, PublicAccessType accessType) { return withContext(context -> createContainerWithResponse(containerName, metadata, accessType, context)); } Mono<Response<ContainerAsyncClient>> createContainerWithResponse(String containerName, Metadata metadata, PublicAccessType accessType, Context context) { ContainerAsyncClient containerAsyncClient = getContainerAsyncClient(containerName); return containerAsyncClient.createWithResponse(metadata, accessType, context) .map(response -> new SimpleResponse<>(response, containerAsyncClient)); } /** * Deletes the specified container in the storage account. If the container doesn't exist the operation fails. For * more information see the <a href="https: * Docs</a>. * * @param containerName Name of the container to delete * @return A {@link Mono} containing containing status code and HTTP headers */ public Mono<Void> deleteContainer(String containerName) { return deleteContainerWithResponse(containerName).flatMap(FluxUtil::toMono); } /** * Deletes the specified container in the storage account. If the container doesn't exist the operation fails. For * more information see the <a href="https: * Docs</a>. * * @param containerName Name of the container to delete * @return A {@link Mono} containing containing status code and HTTP headers */ public Mono<VoidResponse> deleteContainerWithResponse(String containerName) { return withContext(context -> deleteContainerWithResponse(containerName, context)); } Mono<VoidResponse> deleteContainerWithResponse(String containerName, Context context) { return getContainerAsyncClient(containerName).deleteWithResponse(null, context); } /** * Gets the URL of the storage account represented by this client. * * @return the URL. * @throws RuntimeException If the account URL is malformed. */ public URL getAccountUrl() { try { return new URL(azureBlobStorage.getUrl()); } catch (MalformedURLException e) { throw logger.logExceptionAsError(new RuntimeException(String.format("Invalid URL on %s: %s" + getClass().getSimpleName(), azureBlobStorage.getUrl()), e)); } } /** * Returns a reactive Publisher emitting all the containers in this account lazily as needed. For more information, * see the <a href="https: * * @return A reactive response emitting the list of containers. */ public PagedFlux<ContainerItem> listContainers() { return this.listContainers(new ListContainersOptions()); } /** * Returns a reactive Publisher emitting all the containers in this account lazily as needed. For more information, * see the <a href="https: * * @param options A {@link ListContainersOptions} which specifies what data should be returned by the service. * @return A reactive response emitting the list of containers. */ /* * Implementation for this paged listing operation, supporting an optional timeout provided by the synchronous * BlobServiceClient. Applies the given timeout to each Mono<ServiceListContainersSegmentResponse> backing the * PagedFlux. * * @param options A {@link ListContainersOptions} which specifies what data should be returned by the service. * @param timeout An optional timeout to be applied to the network asynchronous operations. * @return A reactive response emitting the list of containers. */ PagedFlux<ContainerItem> listContainersWithOptionalTimeout(ListContainersOptions options, Duration timeout) { Function<String, Mono<PagedResponse<ContainerItem>>> func = marker -> listContainersSegment(marker, options, timeout) .map(response -> new PagedResponseBase<>( response.request(), response.statusCode(), response.headers(), response.value().containerItems(), response.value().nextMarker(), response.deserializedHeaders())); return new PagedFlux<>( () -> func.apply(null), marker -> func.apply(marker)); } /* * Returns a Mono segment of containers starting from the specified Marker. * Use an empty marker to start enumeration from the beginning. Container names are returned in lexicographic order. * After getting a segment, process it, and then call ListContainers again (passing the the previously-returned * Marker) to get the next segment. For more information, see * the <a href="https: * * @param marker * Identifies the portion of the list to be returned with the next list operation. * This value is returned in the response of a previous list operation as the * ListContainersSegmentResponse.body().nextMarker(). Set to null to list the first segment. * @param options * A {@link ListContainersOptions} which specifies what data should be returned by the service. * * @return Emits the successful response. * * @apiNote * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=service_list "Sample code for ServiceURL.listContainersSegment")] \n * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=service_list_helper "Helper code for ServiceURL.listContainersSegment")] \n * For more samples, please see the [Samples file](%https: */ private Mono<ServicesListContainersSegmentResponse> listContainersSegment(String marker, ListContainersOptions options, Duration timeout) { options = options == null ? new ListContainersOptions() : options; return postProcessResponse(Utility.applyOptionalTimeout( this.azureBlobStorage.services().listContainersSegmentWithRestResponseAsync( options.prefix(), marker, options.maxResults(), options.details().toIncludeType(), null, null, Context.NONE), timeout)); } /** * Gets the properties of a storage account’s Blob service. For more information, see the * <a href="https: * * @return A reactive response containing the storage account properties. */ public Mono<StorageServiceProperties> getProperties() { return getPropertiesWithResponse().flatMap(FluxUtil::toMono); } /** * Gets the properties of a storage account’s Blob service. For more information, see the * <a href="https: * * @return A {@link Mono} containing a {@link Response} whose {@link Response * account properties. */ public Mono<Response<StorageServiceProperties>> getPropertiesWithResponse() { return withContext(context -> getPropertiesWithResponse(context)); } Mono<Response<StorageServiceProperties>> getPropertiesWithResponse(Context context) { return postProcessResponse( this.azureBlobStorage.services().getPropertiesWithRestResponseAsync(null, null, context)) .map(rb -> new SimpleResponse<>(rb, rb.value())); } /** * Sets properties for a storage account's Blob service endpoint. For more information, see the * <a href="https: * Note that setting the default service version has no effect when using this client because this client explicitly * sets the version header on each request, overriding the default. * * @param properties Configures the service. * @return A {@link Mono} containing the storage account properties. */ public Mono<Void> setProperties(StorageServiceProperties properties) { return setPropertiesWithReponse(properties).flatMap(FluxUtil::toMono); } /** * Sets properties for a storage account's Blob service endpoint. For more information, see the * <a href="https: * Note that setting the default service version has no effect when using this client because this client explicitly * sets the version header on each request, overriding the default. * * @param properties Configures the service. * @return A {@link Mono} containing the storage account properties. */ public Mono<VoidResponse> setPropertiesWithReponse(StorageServiceProperties properties) { return withContext(context -> setPropertiesWithReponse(properties, context)); } Mono<VoidResponse> setPropertiesWithReponse(StorageServiceProperties properties, Context context) { return postProcessResponse( this.azureBlobStorage.services().setPropertiesWithRestResponseAsync(properties, null, null, context)) .map(VoidResponse::new); } /** * Gets a user delegation key for use with this account's blob storage. Note: This method call is only valid when * using {@link TokenCredential} in this object's {@link HttpPipeline}. * * @param start Start time for the key's validity. Null indicates immediate start. * @param expiry Expiration of the key's validity. * @return A {@link Mono} containing the user delegation key. * @throws IllegalArgumentException If {@code start} isn't null and is after {@code expiry}. */ public Mono<UserDelegationKey> getUserDelegationKey(OffsetDateTime start, OffsetDateTime expiry) { return withContext(context -> getUserDelegationKeyWithResponse(start, expiry, context)).flatMap(FluxUtil::toMono); } /** * Gets a user delegation key for use with this account's blob storage. Note: This method call is only valid when * using {@link TokenCredential} in this object's {@link HttpPipeline}. * * @param start Start time for the key's validity. Null indicates immediate start. * @param expiry Expiration of the key's validity. * @return A {@link Mono} containing a {@link Response} whose {@link Response * delegation key. * @throws IllegalArgumentException If {@code start} isn't null and is after {@code expiry}. */ public Mono<Response<UserDelegationKey>> getUserDelegationKeyWithResponse(OffsetDateTime start, OffsetDateTime expiry) { return withContext(context -> getUserDelegationKeyWithResponse(start, expiry, context)); } Mono<Response<UserDelegationKey>> getUserDelegationKeyWithResponse(OffsetDateTime start, OffsetDateTime expiry, Context context) { Utility.assertNotNull("expiry", expiry); if (start != null && !start.isBefore(expiry)) { throw logger.logExceptionAsError(new IllegalArgumentException("`start` must be null or a datetime before `expiry`.")); } return postProcessResponse( this.azureBlobStorage.services().getUserDelegationKeyWithRestResponseAsync( new KeyInfo() .start(start == null ? "" : Utility.ISO_8601_UTC_DATE_FORMATTER.format(start)) .expiry(Utility.ISO_8601_UTC_DATE_FORMATTER.format(expiry)), null, null, context) ).map(rb -> new SimpleResponse<>(rb, rb.value())); } /** * Retrieves statistics related to replication for the Blob service. It is only available on the secondary location * endpoint when read-access geo-redundant replication is enabled for the storage account. For more information, see * the * <a href="https: * * @return A {@link Mono} containing the storage account statistics. */ public Mono<StorageServiceStats> getStatistics() { return getStatisticsWithResponse().flatMap(FluxUtil::toMono); } /** * Retrieves statistics related to replication for the Blob service. It is only available on the secondary location * endpoint when read-access geo-redundant replication is enabled for the storage account. For more information, see * the * <a href="https: * * @return A {@link Mono} containing a {@link Response} whose {@link Response * account statistics. */ public Mono<Response<StorageServiceStats>> getStatisticsWithResponse() { return withContext(context -> getStatisticsWithResponse(context)); } Mono<Response<StorageServiceStats>> getStatisticsWithResponse(Context context) { return postProcessResponse( this.azureBlobStorage.services().getStatisticsWithRestResponseAsync(null, null, context)) .map(rb -> new SimpleResponse<>(rb, rb.value())); } /** * Returns the sku name and account kind for the account. For more information, please see the * <a href="https: * * @return A {@link Mono} containing containing the storage account info. */ public Mono<StorageAccountInfo> getAccountInfo() { return getAccountInfoWithResponse().flatMap(FluxUtil::toMono); } /** * Returns the sku name and account kind for the account. For more information, please see the * <a href="https: * * @return A {@link Mono} containing a {@link Response} whose {@link Response * info. */ public Mono<Response<StorageAccountInfo>> getAccountInfoWithResponse() { return withContext(context -> getAccountInfoWithResponse(context)); } Mono<Response<StorageAccountInfo>> getAccountInfoWithResponse(Context context) { return postProcessResponse(this.azureBlobStorage.services().getAccountInfoWithRestResponseAsync(context)) .map(rb -> new SimpleResponse<>(rb, new StorageAccountInfo(rb.deserializedHeaders()))); } /** * Generates an account SAS token with the specified parameters * * @param accountSASService The {@code AccountSASService} services for the account SAS * @param accountSASResourceType An optional {@code AccountSASResourceType} resources for the account SAS * @param accountSASPermission The {@code AccountSASPermission} permission for the account SAS * @param expiryTime The {@code OffsetDateTime} expiry time for the account SAS * @return A string that represents the SAS token */ public String generateAccountSAS(AccountSASService accountSASService, AccountSASResourceType accountSASResourceType, AccountSASPermission accountSASPermission, OffsetDateTime expiryTime) { return this.generateAccountSAS(accountSASService, accountSASResourceType, accountSASPermission, expiryTime, null /* startTime */, null /* version */, null /* ipRange */, null /* sasProtocol */); } /** * Generates an account SAS token with the specified parameters * * @param accountSASService The {@code AccountSASService} services for the account SAS * @param accountSASResourceType An optional {@code AccountSASResourceType} resources for the account SAS * @param accountSASPermission The {@code AccountSASPermission} permission for the account SAS * @param expiryTime The {@code OffsetDateTime} expiry time for the account SAS * @param startTime The {@code OffsetDateTime} start time for the account SAS * @param version The {@code String} version for the account SAS * @param ipRange An optional {@code IPRange} ip address range for the SAS * @param sasProtocol An optional {@code SASProtocol} protocol for the SAS * @return A string that represents the SAS token */ public String generateAccountSAS(AccountSASService accountSASService, AccountSASResourceType accountSASResourceType, AccountSASPermission accountSASPermission, OffsetDateTime expiryTime, OffsetDateTime startTime, String version, IPRange ipRange, SASProtocol sasProtocol) { AccountSASSignatureValues accountSASSignatureValues = new AccountSASSignatureValues(); accountSASSignatureValues.services(accountSASService == null ? null : accountSASService.toString()); accountSASSignatureValues.resourceTypes(accountSASResourceType == null ? null : accountSASResourceType.toString()); accountSASSignatureValues.permissions(accountSASPermission == null ? null : accountSASPermission.toString()); accountSASSignatureValues.expiryTime(expiryTime); accountSASSignatureValues.startTime(startTime); if (version != null) { accountSASSignatureValues.version(version); } accountSASSignatureValues.ipRange(ipRange); accountSASSignatureValues.protocol(sasProtocol); SharedKeyCredential sharedKeyCredential = Utility.getSharedKeyCredential(this.azureBlobStorage.getHttpPipeline()); SASQueryParameters sasQueryParameters = accountSASSignatureValues.generateSASQueryParameters(sharedKeyCredential); return sasQueryParameters.encode(); } }
Same question about making this mapping a class level method, this would reduce all four of these blocks into a single place.
public PagedFlux<BlobItem> listBlobsFlat(ListBlobsOptions options) { return new PagedFlux<>( () -> listBlobsFlatSegment(null, options) .map(response -> new PagedResponseBase<>( response.request(), response.statusCode(), response.headers(), response.value().segment().blobItems(), response.value().nextMarker(), response.deserializedHeaders() )), (marker) -> listBlobsFlatSegment(marker, options) .map(response -> new PagedResponseBase<>( response.request(), response.statusCode(), response.headers(), response.value().segment().blobItems(), response.value().nextMarker(), response.deserializedHeaders() )) ); }
.map(response -> new PagedResponseBase<>(
public PagedFlux<BlobItem> listBlobsFlat(ListBlobsOptions options) { return listBlobsFlatWithOptionalTimeout(options, null); }
class ContainerAsyncClient { public static final String ROOT_CONTAINER_NAME = "$root"; public static final String STATIC_WEBSITE_CONTAINER_NAME = "$web"; public static final String LOG_CONTAINER_NAME = "$logs"; private final AzureBlobStorageImpl azureBlobStorage; /** * Package-private constructor for use by {@link ContainerClientBuilder}. * * @param azureBlobStorageBuilder the API client builder for blob storage API */ ContainerAsyncClient(AzureBlobStorageBuilder azureBlobStorageBuilder) { this.azureBlobStorage = azureBlobStorageBuilder.build(); } /** * Creates a new {@link BlockBlobAsyncClient} object by concatenating the blobName to the end of * ContainerAsyncClient's URL. The new BlockBlobAsyncClient uses the same request policy pipeline as the * ContainerAsyncClient. To change the pipeline, create the BlockBlobAsyncClient and then call its WithPipeline * method passing in the desired pipeline object. Or, call this package's NewBlockBlobAsyncClient instead of calling * this object's NewBlockBlobAsyncClient method. * * @param blobName A {@code String} representing the name of the blob. * @return A new {@link BlockBlobAsyncClient} object which references the blob with the specified name in this * container. */ public BlockBlobAsyncClient getBlockBlobAsyncClient(String blobName) { return getBlockBlobAsyncClient(blobName, null); } /** * Creates a new {@link BlockBlobAsyncClient} object by concatenating the blobName to the end of * ContainerAsyncClient's URL. The new BlockBlobAsyncClient uses the same request policy pipeline as the * ContainerAsyncClient. To change the pipeline, create the BlockBlobAsyncClient and then call its WithPipeline * method passing in the desired pipeline object. Or, call this package's NewBlockBlobAsyncClient instead of calling * this object's NewBlockBlobAsyncClient method. * * @param blobName A {@code String} representing the name of the blob. * @param snapshot the snapshot identifier for the blob. * @return A new {@link BlockBlobAsyncClient} object which references the blob with the specified name in this * container. */ public BlockBlobAsyncClient getBlockBlobAsyncClient(String blobName, String snapshot) { return new BlockBlobAsyncClient(new AzureBlobStorageBuilder() .url(Utility.appendToURLPath(getContainerUrl(), blobName).toString()) .pipeline(azureBlobStorage.getHttpPipeline()), snapshot); } /** * Creates creates a new PageBlobAsyncClient object by concatenating blobName to the end of ContainerAsyncClient's * URL. The new PageBlobAsyncClient uses the same request policy pipeline as the ContainerAsyncClient. To change the * pipeline, create the PageBlobAsyncClient and then call its WithPipeline method passing in the desired pipeline * object. Or, call this package's NewPageBlobAsyncClient instead of calling this object's NewPageBlobAsyncClient * method. * * @param blobName A {@code String} representing the name of the blob. * @return A new {@link PageBlobAsyncClient} object which references the blob with the specified name in this * container. */ public PageBlobAsyncClient getPageBlobAsyncClient(String blobName) { return getPageBlobAsyncClient(blobName, null); } /** * Creates creates a new PageBlobAsyncClient object by concatenating blobName to the end of ContainerAsyncClient's * URL. The new PageBlobAsyncClient uses the same request policy pipeline as the ContainerAsyncClient. To change the * pipeline, create the PageBlobAsyncClient and then call its WithPipeline method passing in the desired pipeline * object. Or, call this package's NewPageBlobAsyncClient instead of calling this object's NewPageBlobAsyncClient * method. * * @param blobName A {@code String} representing the name of the blob. * @param snapshot the snapshot identifier for the blob. * @return A new {@link PageBlobAsyncClient} object which references the blob with the specified name in this * container. */ public PageBlobAsyncClient getPageBlobAsyncClient(String blobName, String snapshot) { return new PageBlobAsyncClient(new AzureBlobStorageBuilder() .url(Utility.appendToURLPath(getContainerUrl(), blobName).toString()) .pipeline(azureBlobStorage.getHttpPipeline()), snapshot); } /** * Creates creates a new AppendBlobAsyncClient object by concatenating blobName to the end of ContainerAsyncClient's * URL. The new AppendBlobAsyncClient uses the same request policy pipeline as the ContainerAsyncClient. To change * the pipeline, create the AppendBlobAsyncClient and then call its WithPipeline method passing in the desired * pipeline object. Or, call this package's NewAppendBlobAsyncClient instead of calling this object's * NewAppendBlobAsyncClient method. * * @param blobName A {@code String} representing the name of the blob. * @return A new {@link AppendBlobAsyncClient} object which references the blob with the specified name in this * container. */ public AppendBlobAsyncClient getAppendBlobAsyncClient(String blobName) { return getAppendBlobAsyncClient(blobName, null); } /** * Creates creates a new AppendBlobAsyncClient object by concatenating blobName to the end of ContainerAsyncClient's * URL. The new AppendBlobAsyncClient uses the same request policy pipeline as the ContainerAsyncClient. To change * the pipeline, create the AppendBlobAsyncClient and then call its WithPipeline method passing in the desired * pipeline object. Or, call this package's NewAppendBlobAsyncClient instead of calling this object's * NewAppendBlobAsyncClient method. * * @param blobName A {@code String} representing the name of the blob. * @param snapshot the snapshot identifier for the blob. * @return A new {@link AppendBlobAsyncClient} object which references the blob with the specified name in this * container. */ public AppendBlobAsyncClient getAppendBlobAsyncClient(String blobName, String snapshot) { return new AppendBlobAsyncClient(new AzureBlobStorageBuilder() .url(Utility.appendToURLPath(getContainerUrl(), blobName).toString()) .pipeline(azureBlobStorage.getHttpPipeline()), snapshot); } /** * Creates a new BlobAsyncClient object by concatenating blobName to the end of ContainerAsyncClient's URL. The new * BlobAsyncClient uses the same request policy pipeline as the ContainerAsyncClient. To change the pipeline, create * the BlobAsyncClient and then call its WithPipeline method passing in the desired pipeline object. Or, call this * package's getBlobAsyncClient instead of calling this object's getBlobAsyncClient method. * * @param blobName A {@code String} representing the name of the blob. * @return A new {@link BlobAsyncClient} object which references the blob with the specified name in this container. */ public BlobAsyncClient getBlobAsyncClient(String blobName) { return getBlobAsyncClient(blobName, null); } /** * Creates a new BlobAsyncClient object by concatenating blobName to the end of ContainerAsyncClient's URL. The new * BlobAsyncClient uses the same request policy pipeline as the ContainerAsyncClient. To change the pipeline, create * the BlobAsyncClient and then call its WithPipeline method passing in the desired pipeline object. Or, call this * package's getBlobAsyncClient instead of calling this object's getBlobAsyncClient method. * * @param blobName A {@code String} representing the name of the blob. * @param snapshot the snapshot identifier for the blob. * @return A new {@link BlobAsyncClient} object which references the blob with the specified name in this container. */ public BlobAsyncClient getBlobAsyncClient(String blobName, String snapshot) { return new BlobAsyncClient(new AzureBlobStorageBuilder() .url(Utility.appendToURLPath(getContainerUrl(), blobName).toString()) .pipeline(azureBlobStorage.getHttpPipeline()), snapshot); } /** * Initializes a {@link BlobServiceAsyncClient} object pointing to the storage account this container is in. * * @return A {@link BlobServiceAsyncClient} object pointing to the specified storage account */ public BlobServiceAsyncClient getBlobServiceAsyncClient() { return new BlobServiceAsyncClient(new AzureBlobStorageBuilder() .url(Utility.stripLastPathSegment(getContainerUrl()).toString()) .pipeline(azureBlobStorage.getHttpPipeline())); } /** * Gets the URL of the container represented by this client. * * @return the URL. * @throws RuntimeException If the container has a malformed URL. */ public URL getContainerUrl() { try { return new URL(azureBlobStorage.getUrl()); } catch (MalformedURLException e) { throw new RuntimeException(String.format("Invalid URL on %s: %s" + getClass().getSimpleName(), azureBlobStorage.getUrl()), e); } } /** * Gets if the container this client represents exists in the cloud. * * @return true if the container exists, false if it doesn't */ public Mono<Response<Boolean>> exists() { return this.getProperties(null) .map(cp -> (Response<Boolean>) new SimpleResponse<>(cp, true)) .onErrorResume(t -> t instanceof StorageException && ((StorageException) t).statusCode() == 404, t -> { HttpResponse response = ((StorageException) t).response(); return Mono.just(new SimpleResponse<>(response.request(), response.statusCode(), response.headers(), false)); }); } /** * Creates a new container within a storage account. If a container with the same name already exists, the operation * fails. For more information, see the * <a href="https: * * @return A reactive response signalling completion. */ public Mono<VoidResponse> create() { return this.create(null, null); } /** * Creates a new container within a storage account. If a container with the same name already exists, the operation * fails. For more information, see the * <a href="https: * * @param metadata {@link Metadata} * @param accessType Specifies how the data in this container is available to the public. See the * x-ms-blob-public-access header in the Azure Docs for more information. Pass null for no public access. * @return A reactive response signalling completion. */ public Mono<VoidResponse> create(Metadata metadata, PublicAccessType accessType) { metadata = metadata == null ? new Metadata() : metadata; return postProcessResponse(this.azureBlobStorage.containers().createWithRestResponseAsync( null, null, metadata, accessType, null, Context.NONE)) .map(VoidResponse::new); } /** * Marks the specified container for deletion. The container and any blobs contained within it are later deleted * during garbage collection. For more information, see the * <a href="https: * * @return A reactive response signalling completion. */ public Mono<VoidResponse> delete() { return this.delete(null); } /** * Marks the specified container for deletion. The container and any blobs contained within it are later deleted * during garbage collection. For more information, see the * <a href="https: * * @param accessConditions {@link ContainerAccessConditions} * @return A reactive response signalling completion. * @throws UnsupportedOperationException If {@link ContainerAccessConditions * {@link ModifiedAccessConditions */ public Mono<VoidResponse> delete(ContainerAccessConditions accessConditions) { accessConditions = accessConditions == null ? new ContainerAccessConditions() : accessConditions; if (!validateNoEtag(accessConditions.modifiedAccessConditions())) { throw new UnsupportedOperationException("ETag access conditions are not supported for this API."); } return postProcessResponse(this.azureBlobStorage.containers() .deleteWithRestResponseAsync(null, null, null, accessConditions.leaseAccessConditions(), accessConditions.modifiedAccessConditions(), Context.NONE)) .map(VoidResponse::new); } /** * Returns the container's metadata and system properties. For more information, see the * <a href="https: * * @return A reactive response containing the container properties. */ public Mono<Response<ContainerProperties>> getProperties() { return this.getProperties(null); } /** * Returns the container's metadata and system properties. For more information, see the * <a href="https: * * @param leaseAccessConditions By setting lease access conditions, requests will fail if the provided lease does * not match the active lease on the blob. * @return A reactive response containing the container properties. */ public Mono<Response<ContainerProperties>> getProperties(LeaseAccessConditions leaseAccessConditions) { return postProcessResponse(this.azureBlobStorage.containers() .getPropertiesWithRestResponseAsync(null, null, null, leaseAccessConditions, Context.NONE)) .map(rb -> new SimpleResponse<>(rb, new ContainerProperties(rb.deserializedHeaders()))); } /** * Sets the container's metadata. For more information, see the * <a href="https: * * @param metadata {@link Metadata} * @return A reactive response signalling completion. */ public Mono<VoidResponse> setMetadata(Metadata metadata) { return this.setMetadata(metadata, null); } /** * Sets the container's metadata. For more information, see the * <a href="https: * * @param metadata {@link Metadata} * @param accessConditions {@link ContainerAccessConditions} * @return A reactive response signalling completion. * @throws UnsupportedOperationException If {@link ContainerAccessConditions * anything set other than {@link ModifiedAccessConditions */ public Mono<VoidResponse> setMetadata(Metadata metadata, ContainerAccessConditions accessConditions) { metadata = metadata == null ? new Metadata() : metadata; accessConditions = accessConditions == null ? new ContainerAccessConditions() : accessConditions; if (!validateNoEtag(accessConditions.modifiedAccessConditions()) || accessConditions.modifiedAccessConditions().ifUnmodifiedSince() != null) { throw new UnsupportedOperationException( "If-Modified-Since is the only HTTP access condition supported for this API"); } return postProcessResponse(this.azureBlobStorage.containers() .setMetadataWithRestResponseAsync(null, null, metadata, null, accessConditions.leaseAccessConditions(), accessConditions.modifiedAccessConditions(), Context.NONE)) .map(VoidResponse::new); } /** * Returns the container's permissions. The permissions indicate whether container's blobs may be accessed publicly. * For more information, see the * <a href="https: * * @return A reactive response containing the container access policy. */ public Mono<Response<ContainerAccessPolicies>> getAccessPolicy() { return this.getAccessPolicy(null); } /** * Returns the container's permissions. The permissions indicate whether container's blobs may be accessed publicly. * For more information, see the * <a href="https: * * @param leaseAccessConditions By setting lease access conditions, requests will fail if the provided lease does * not match the active lease on the blob. * @return A reactive response containing the container access policy. */ public Mono<Response<ContainerAccessPolicies>> getAccessPolicy(LeaseAccessConditions leaseAccessConditions) { return postProcessResponse(this.azureBlobStorage.containers().getAccessPolicyWithRestResponseAsync(null, null, null, leaseAccessConditions, Context.NONE) .map(response -> new SimpleResponse<>(response, new ContainerAccessPolicies(response.deserializedHeaders().blobPublicAccess(), response.value())))); } /** * Sets the container's permissions. The permissions indicate whether blobs in a container may be accessed publicly. * Note that, for each signed identifier, we will truncate the start and expiry times to the nearest second to * ensure the time formatting is compatible with the service. For more information, see the * <a href="https: * * @param accessType Specifies how the data in this container is available to the public. See the * x-ms-blob-public-access header in the Azure Docs for more information. Pass null for no public access. * @param identifiers A list of {@link SignedIdentifier} objects that specify the permissions for the container. * Please see * <a href="https: * for more information. Passing null will clear all access policies. * @return A reactive response signalling completion. */ public Mono<VoidResponse> setAccessPolicy(PublicAccessType accessType, List<SignedIdentifier> identifiers) { return this.setAccessPolicy(accessType, identifiers, null); } /** * Sets the container's permissions. The permissions indicate whether blobs in a container may be accessed publicly. * Note that, for each signed identifier, we will truncate the start and expiry times to the nearest second to * ensure the time formatting is compatible with the service. For more information, see the * <a href="https: * * @param accessType Specifies how the data in this container is available to the public. See the * x-ms-blob-public-access header in the Azure Docs for more information. Pass null for no public access. * @param identifiers A list of {@link SignedIdentifier} objects that specify the permissions for the container. * Please see * <a href="https: * for more information. Passing null will clear all access policies. * @param accessConditions {@link ContainerAccessConditions} * @return A reactive response signalling completion. * @throws UnsupportedOperationException If {@link ContainerAccessConditions * {@link ModifiedAccessConditions */ public Mono<VoidResponse> setAccessPolicy(PublicAccessType accessType, List<SignedIdentifier> identifiers, ContainerAccessConditions accessConditions) { accessConditions = accessConditions == null ? new ContainerAccessConditions() : accessConditions; if (!validateNoEtag(accessConditions.modifiedAccessConditions())) { throw new UnsupportedOperationException("ETag access conditions are not supported for this API."); } /* We truncate to seconds because the service only supports nanoseconds or seconds, but doing an OffsetDateTime.now will only give back milliseconds (more precise fields are zeroed and not serialized). This allows for proper serialization with no real detriment to users as sub-second precision on active time for signed identifiers is not really necessary. */ if (identifiers != null) { for (SignedIdentifier identifier : identifiers) { if (identifier.accessPolicy() != null && identifier.accessPolicy().start() != null) { identifier.accessPolicy().start( identifier.accessPolicy().start().truncatedTo(ChronoUnit.SECONDS)); } if (identifier.accessPolicy() != null && identifier.accessPolicy().expiry() != null) { identifier.accessPolicy().expiry( identifier.accessPolicy().expiry().truncatedTo(ChronoUnit.SECONDS)); } } } return postProcessResponse(this.azureBlobStorage.containers() .setAccessPolicyWithRestResponseAsync(null, identifiers, null, accessType, null, accessConditions.leaseAccessConditions(), accessConditions.modifiedAccessConditions(), Context.NONE)) .map(VoidResponse::new); } /** * Returns a reactive Publisher emitting all the blobs in this container lazily as needed. The directories are * flattened and only actual blobs and no directories are returned. * * <p> * Blob names are returned in lexicographic order. For more information, see the * <a href="https: * * <p> * E.g. listing a container containing a 'foo' folder, which contains blobs 'foo1' and 'foo2', and a blob on the * root level 'bar', will return * * <ul> * <li>foo/foo1 * <li>foo/foo2 * <li>bar * </ul> * * @return A reactive response emitting the flattened blobs. */ public PagedFlux<BlobItem> listBlobsFlat() { return this.listBlobsFlat(new ListBlobsOptions()); } /** * Returns a reactive Publisher emitting all the blobs in this container lazily as needed. The directories are * flattened and only actual blobs and no directories are returned. * * <p> * Blob names are returned in lexicographic order. For more information, see the * <a href="https: * * <p> * E.g. listing a container containing a 'foo' folder, which contains blobs 'foo1' and 'foo2', and a blob on the * root level 'bar', will return * * <ul> * <li>foo/foo1 * <li>foo/foo2 * <li>bar * </ul> * * @param options {@link ListBlobsOptions} * @return A reactive response emitting the listed blobs, flattened. */ /* * Returns a single segment of blobs starting from the specified Marker. Use an empty * marker to start enumeration from the beginning. Blob names are returned in lexicographic order. * After getting a segment, process it, and then call ListBlobs again (passing the the previously-returned * Marker) to get the next segment. For more information, see the * <a href="https: * * @param marker * Identifies the portion of the list to be returned with the next list operation. * This value is returned in the response of a previous list operation as the * ListBlobsFlatSegmentResponse.body().nextMarker(). Set to null to list the first segment. * @param options * {@link ListBlobsOptions} * * @return Emits the successful response. * * @apiNote * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=list_blobs_flat "Sample code for ContainerAsyncClient.listBlobsFlatSegment")] \n * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=list_blobs_flat_helper "helper code for ContainerAsyncClient.listBlobsFlatSegment")] \n * For more samples, please see the [Samples file](%https: */ private Mono<ContainersListBlobFlatSegmentResponse> listBlobsFlatSegment(String marker, ListBlobsOptions options) { options = options == null ? new ListBlobsOptions() : options; return postProcessResponse(this.azureBlobStorage.containers() .listBlobFlatSegmentWithRestResponseAsync(null, options.prefix(), marker, options.maxResults(), options.details().toList(), null, null, Context.NONE)); } /** * Returns a reactive Publisher emitting all the blobs and directories (prefixes) under the given directory * (prefix). Directories will have {@link BlobItem * * <p> * Blob names are returned in lexicographic order. For more information, see the * <a href="https: * * <p> * E.g. listing a container containing a 'foo' folder, which contains blobs 'foo1' and 'foo2', and a blob on the * root level 'bar', will return the following results when prefix=null: * * <ul> * <li>foo/ (isPrefix = true) * <li>bar (isPrefix = false) * </ul> * <p> * will return the following results when prefix="foo/": * * <ul> * <li>foo/foo1 (isPrefix = false) * <li>foo/foo2 (isPrefix = false) * </ul> * * @param directory The directory to list blobs underneath * @return A reactive response emitting the prefixes and blobs. */ public PagedFlux<BlobItem> listBlobsHierarchy(String directory) { return this.listBlobsHierarchy("/", new ListBlobsOptions().prefix(directory)); } /** * Returns a reactive Publisher emitting all the blobs and prefixes (directories) under the given prefix * (directory). Directories will have {@link BlobItem * * <p> * Blob names are returned in lexicographic order. For more information, see the * <a href="https: * * <p> * E.g. listing a container containing a 'foo' folder, which contains blobs 'foo1' and 'foo2', and a blob on the * root level 'bar', will return the following results when prefix=null: * * <ul> * <li>foo/ (isPrefix = true) * <li>bar (isPrefix = false) * </ul> * <p> * will return the following results when prefix="foo/": * * <ul> * <li>foo/foo1 (isPrefix = false) * <li>foo/foo2 (isPrefix = false) * </ul> * * @param delimiter The delimiter for blob hierarchy, "/" for hierarchy based on directories * @param options {@link ListBlobsOptions} * @return A reactive response emitting the prefixes and blobs. */ public PagedFlux<BlobItem> listBlobsHierarchy(String delimiter, ListBlobsOptions options) { return new PagedFlux<>( () -> listBlobsHierarchySegment(null, delimiter, options) .map(response -> new PagedResponseBase<>( response.request(), response.statusCode(), response.headers(), response.value().segment().blobItems(), response.value().nextMarker(), response.deserializedHeaders())), (marker) -> listBlobsHierarchySegment(marker, delimiter, options) .map(response -> new PagedResponseBase<>( response.request(), response.statusCode(), response.headers(), response.value().segment().blobItems(), response.value().nextMarker(), response.deserializedHeaders()))); } /* * Returns a single segment of blobs and blob prefixes starting from the specified Marker. Use an empty * marker to start enumeration from the beginning. Blob names are returned in lexicographic order. * After getting a segment, process it, and then call ListBlobs again (passing the the previously-returned * Marker) to get the next segment. For more information, see the * <a href="https: * * @param marker * Identifies the portion of the list to be returned with the next list operation. * This value is returned in the response of a previous list operation as the * ListBlobsHierarchySegmentResponse.body().nextMarker(). Set to null to list the first segment. * @param delimiter * The operation returns a BlobPrefix element in the response body that acts as a placeholder for all blobs * whose names begin with the same substring up to the appearance of the delimiter character. The delimiter may * be a single character or a string. * @param options * {@link ListBlobsOptions} * * @return Emits the successful response. * @throws UnsupportedOperationException If {@link ListBlobsOptions * set. * * @apiNote * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=list_blobs_hierarchy "Sample code for ContainerAsyncClient.listBlobsHierarchySegment")] \n * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=list_blobs_hierarchy_helper "helper code for ContainerAsyncClient.listBlobsHierarchySegment")] \n * For more samples, please see the [Samples file](%https: */ private Mono<ContainersListBlobHierarchySegmentResponse> listBlobsHierarchySegment(String marker, String delimiter, ListBlobsOptions options) { options = options == null ? new ListBlobsOptions() : options; if (options.details().snapshots()) { throw new UnsupportedOperationException("Including snapshots in a hierarchical listing is not supported."); } return postProcessResponse(this.azureBlobStorage.containers() .listBlobHierarchySegmentWithRestResponseAsync(null, delimiter, options.prefix(), marker, options.maxResults(), options.details().toList(), null, null, Context.NONE)); } /** * Acquires a lease on the blob for write and delete operations. The lease duration must be between 15 to 60 * seconds, or infinite (-1). * * @param proposedId A {@code String} in any valid GUID format. May be null. * @param duration The duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A * non-infinite lease can be between 15 and 60 seconds. * @return A reactive response containing the lease ID. */ public Mono<Response<String>> acquireLease(String proposedId, int duration) { return this.acquireLease(proposedId, duration, null); } /** * Acquires a lease on the blob for write and delete operations. The lease duration must be between 15 to 60 * seconds, or infinite (-1). * * @param proposedID A {@code String} in any valid GUID format. May be null. * @param duration The duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A * non-infinite lease can be between 15 and 60 seconds. * @param modifiedAccessConditions Standard HTTP Access conditions related to the modification of data. ETag and * LastModifiedTime are used to construct conditions related to when the blob was changed relative to the given * request. The request will fail if the specified condition is not satisfied. * @return A reactive response containing the lease ID. * @throws UnsupportedOperationException If either {@link ModifiedAccessConditions * ModifiedAccessConditions */ public Mono<Response<String>> acquireLease(String proposedID, int duration, ModifiedAccessConditions modifiedAccessConditions) { if (!this.validateNoEtag(modifiedAccessConditions)) { throw new UnsupportedOperationException( "ETag access conditions are not supported for this API."); } return postProcessResponse(this.azureBlobStorage.containers().acquireLeaseWithRestResponseAsync( null, null, duration, proposedID, null, modifiedAccessConditions, Context.NONE)) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().leaseId())); } /** * Renews the blob's previously-acquired lease. * * @param leaseID The leaseId of the active lease on the blob. * @return A reactive response containing the renewed lease ID. */ public Mono<Response<String>> renewLease(String leaseID) { return this.renewLease(leaseID, null); } /** * Renews the blob's previously-acquired lease. * * @param leaseID The leaseId of the active lease on the blob. * @param modifiedAccessConditions Standard HTTP Access conditions related to the modification of data. ETag and * LastModifiedTime are used to construct conditions related to when the blob was changed relative to the given * request. The request will fail if the specified condition is not satisfied. * @return A reactive response containing the renewed lease ID. * @throws UnsupportedOperationException If either {@link ModifiedAccessConditions * ModifiedAccessConditions */ public Mono<Response<String>> renewLease(String leaseID, ModifiedAccessConditions modifiedAccessConditions) { if (!this.validateNoEtag(modifiedAccessConditions)) { throw new UnsupportedOperationException( "ETag access conditions are not supported for this API."); } return postProcessResponse(this.azureBlobStorage.containers().renewLeaseWithRestResponseAsync(null, leaseID, null, null, modifiedAccessConditions, Context.NONE)) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().leaseId())); } /** * Releases the blob's previously-acquired lease. * * @param leaseID The leaseId of the active lease on the blob. * @return A reactive response signalling completion. */ public Mono<VoidResponse> releaseLease(String leaseID) { return this.releaseLease(leaseID, null); } /** * Releases the blob's previously-acquired lease. * * @param leaseID The leaseId of the active lease on the blob. * @param modifiedAccessConditions Standard HTTP Access conditions related to the modification of data. ETag and * LastModifiedTime are used to construct conditions related to when the blob was changed relative to the given * request. The request will fail if the specified condition is not satisfied. * @return A reactive response signalling completion. * @throws UnsupportedOperationException If either {@link ModifiedAccessConditions * ModifiedAccessConditions */ public Mono<VoidResponse> releaseLease(String leaseID, ModifiedAccessConditions modifiedAccessConditions) { if (!this.validateNoEtag(modifiedAccessConditions)) { throw new UnsupportedOperationException( "ETag access conditions are not supported for this API."); } return postProcessResponse(this.azureBlobStorage.containers().releaseLeaseWithRestResponseAsync( null, leaseID, null, null, modifiedAccessConditions, Context.NONE)) .map(VoidResponse::new); } /** * BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1) constant * to break a fixed-duration lease when it expires or an infinite lease immediately. * * @return A reactive response containing the remaining time in the broken lease. */ public Mono<Response<Duration>> breakLease() { return this.breakLease(null, null); } /** * BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1) constant * to break a fixed-duration lease when it expires or an infinite lease immediately. * * @param breakPeriodInSeconds An optional {@code Integer} representing the proposed duration of seconds that the * lease should continue before it is broken, between 0 and 60 seconds. This break period is only used if it is * shorter than the time remaining on the lease. If longer, the time remaining on the lease is used. A new lease * will not be available before the break period has expired, but the lease may be held for longer than the break * period. * @param modifiedAccessConditions Standard HTTP Access conditions related to the modification of data. ETag and * LastModifiedTime are used to construct conditions related to when the blob was changed relative to the given * request. The request will fail if the specified condition is not satisfied. * @return A reactive response containing the remaining time in the broken lease. * @throws UnsupportedOperationException If either {@link ModifiedAccessConditions * ModifiedAccessConditions */ public Mono<Response<Duration>> breakLease(Integer breakPeriodInSeconds, ModifiedAccessConditions modifiedAccessConditions) { if (!this.validateNoEtag(modifiedAccessConditions)) { throw new UnsupportedOperationException( "ETag access conditions are not supported for this API."); } return postProcessResponse(this.azureBlobStorage.containers().breakLeaseWithRestResponseAsync(null, null, breakPeriodInSeconds, null, modifiedAccessConditions, Context.NONE)) .map(rb -> new SimpleResponse<>(rb, Duration.ofSeconds(rb.deserializedHeaders().leaseTime()))); } /** * ChangeLease changes the blob's lease ID. * * @param leaseId The leaseId of the active lease on the blob. * @param proposedID A {@code String} in any valid GUID format. * @return A reactive response containing the new lease ID. */ public Mono<Response<String>> changeLease(String leaseId, String proposedID) { return this.changeLease(leaseId, proposedID, null); } /** * ChangeLease changes the blob's lease ID. For more information, see the <a href="https: * Docs</a>. * * @param leaseId The leaseId of the active lease on the blob. * @param proposedID A {@code String} in any valid GUID format. * @param modifiedAccessConditions Standard HTTP Access conditions related to the modification of data. ETag and * LastModifiedTime are used to construct conditions related to when the blob was changed relative to the given * request. The request will fail if the specified condition is not satisfied. * @return A reactive response containing the new lease ID. * @throws UnsupportedOperationException If either {@link ModifiedAccessConditions * ModifiedAccessConditions */ public Mono<Response<String>> changeLease(String leaseId, String proposedID, ModifiedAccessConditions modifiedAccessConditions) { if (!this.validateNoEtag(modifiedAccessConditions)) { throw new UnsupportedOperationException( "ETag access conditions are not supported for this API."); } return postProcessResponse(this.azureBlobStorage.containers().changeLeaseWithRestResponseAsync(null, leaseId, proposedID, null, null, modifiedAccessConditions, Context.NONE)) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().leaseId())); } /** * Returns the sku name and account kind for the account. For more information, please see the * <a href="https: * * @return A reactive response containing the account info. */ public Mono<Response<StorageAccountInfo>> getAccountInfo() { return postProcessResponse( this.azureBlobStorage.containers().getAccountInfoWithRestResponseAsync(null, Context.NONE)) .map(rb -> new SimpleResponse<>(rb, new StorageAccountInfo(rb.deserializedHeaders()))); } private boolean validateNoEtag(ModifiedAccessConditions modifiedAccessConditions) { if (modifiedAccessConditions == null) { return true; } return modifiedAccessConditions.ifMatch() == null && modifiedAccessConditions.ifNoneMatch() == null; } /** * Generates a user delegation SAS with the specified parameters * * @param userDelegationKey The {@code UserDelegationKey} user delegation key for the SAS * @param accountName The {@code String} account name for the SAS * @param permissions The {@code ContainerSASPermissions} permission for the SAS * @param expiryTime The {@code OffsetDateTime} expiry time for the SAS * @return A string that represents the SAS token */ public String generateUserDelegationSAS(UserDelegationKey userDelegationKey, String accountName, ContainerSASPermission permissions, OffsetDateTime expiryTime) { return this.generateUserDelegationSAS(userDelegationKey, accountName, permissions, expiryTime, null, null, null, null, null, null, null, null, null); } /** * Generates a user delegation SAS token with the specified parameters * * @param userDelegationKey The {@code UserDelegationKey} user delegation key for the SAS * @param accountName The {@code String} account name for the SAS * @param permissions The {@code ContainerSASPermissions} permission for the SAS * @param expiryTime The {@code OffsetDateTime} expiry time for the SAS * @param startTime An optional {@code OffsetDateTime} start time for the SAS * @param version An optional {@code String} version for the SAS * @param sasProtocol An optional {@code SASProtocol} protocol for the SAS * @param ipRange An optional {@code IPRange} ip address range for the SAS * @return A string that represents the SAS token */ public String generateUserDelegationSAS(UserDelegationKey userDelegationKey, String accountName, ContainerSASPermission permissions, OffsetDateTime expiryTime, OffsetDateTime startTime, String version, SASProtocol sasProtocol, IPRange ipRange) { return this.generateUserDelegationSAS(userDelegationKey, accountName, permissions, expiryTime, startTime, version, sasProtocol, ipRange, null /* cacheControl */, null /* contentDisposition */, null /* contentEncoding */, null /* contentLanguage */, null /* contentType */); } /** * Generates a user delegation SAS token with the specified parameters * * @param userDelegationKey The {@code UserDelegationKey} user delegation key for the SAS * @param accountName The {@code String} account name for the SAS * @param permissions The {@code ContainerSASPermissions} permission for the SAS * @param expiryTime The {@code OffsetDateTime} expiry time for the SAS * @param startTime An optional {@code OffsetDateTime} start time for the SAS * @param version An optional {@code String} version for the SAS * @param sasProtocol An optional {@code SASProtocol} protocol for the SAS * @param ipRange An optional {@code IPRange} ip address range for the SAS * @param cacheControl An optional {@code String} cache-control header for the SAS. * @param contentDisposition An optional {@code String} content-disposition header for the SAS. * @param contentEncoding An optional {@code String} content-encoding header for the SAS. * @param contentLanguage An optional {@code String} content-language header for the SAS. * @param contentType An optional {@code String} content-type header for the SAS. * @return A string that represents the SAS token */ public String generateUserDelegationSAS(UserDelegationKey userDelegationKey, String accountName, ContainerSASPermission permissions, OffsetDateTime expiryTime, OffsetDateTime startTime, String version, SASProtocol sasProtocol, IPRange ipRange, String cacheControl, String contentDisposition, String contentEncoding, String contentLanguage, String contentType) { ServiceSASSignatureValues serviceSASSignatureValues = new ServiceSASSignatureValues(version, sasProtocol, startTime, expiryTime, permissions == null ? null : permissions.toString(), ipRange, null /* identifier*/, cacheControl, contentDisposition, contentEncoding, contentLanguage, contentType); ServiceSASSignatureValues values = configureServiceSASSignatureValues(serviceSASSignatureValues, accountName); SASQueryParameters sasQueryParameters = values.generateSASQueryParameters(userDelegationKey); return sasQueryParameters.encode(); } /** * Generates a SAS token with the specified parameters * * @param permissions The {@code ContainerSASPermissions} permission for the SAS * @param expiryTime The {@code OffsetDateTime} expiry time for the SAS * @return A string that represents the SAS token */ public String generateSAS(ContainerSASPermission permissions, OffsetDateTime expiryTime) { return this.generateSAS(null, permissions, /* identifier */ expiryTime, null /* startTime */, null /* version */, null /* sasProtocol */, null /* ipRange */, null /* cacheControl */, null /* contentDisposition */, null /* contentEncoding */, null /* contentLanguage */, null /*contentType*/); } /** * Generates a SAS token with the specified parameters * * @param identifier The {@code String} name of the access policy on the container this SAS references if any * @return A string that represents the SAS token */ public String generateSAS(String identifier) { return this.generateSAS(identifier, null /* permissions*/, null /* expiryTime */, null /* startTime */, null /* version */, null /* sasProtocol */, null /* ipRange */, null /* cacheControl */, null /* contentDisposition */, null /* contentEncoding */, null /* contentLanguage */, null /*contentType*/); } /** * Generates a SAS token with the specified parameters * * @param identifier The {@code String} name of the access policy on the container this SAS references if any * @param permissions The {@code ContainerSASPermissions} permission for the SAS * @param expiryTime The {@code OffsetDateTime} expiry time for the SAS * @param startTime An optional {@code OffsetDateTime} start time for the SAS * @param version An optional {@code String} version for the SAS * @param sasProtocol An optional {@code SASProtocol} protocol for the SAS * @param ipRange An optional {@code IPRange} ip address range for the SAS * @return A string that represents the SAS token */ public String generateSAS(String identifier, ContainerSASPermission permissions, OffsetDateTime expiryTime, OffsetDateTime startTime, String version, SASProtocol sasProtocol, IPRange ipRange) { return this.generateSAS(identifier, permissions, expiryTime, startTime, version, sasProtocol, ipRange, null /* cacheControl */, null /* contentDisposition */, null /* contentEncoding */, null /* contentLanguage */, null /*contentType*/); } /** * Generates a SAS token with the specified parameters * * @param identifier The {@code String} name of the access policy on the container this SAS references if any * @param permissions The {@code ContainerSASPermissions} permission for the SAS * @param expiryTime The {@code OffsetDateTime} expiry time for the SAS * @param startTime An optional {@code OffsetDateTime} start time for the SAS * @param version An optional {@code String} version for the SAS * @param sasProtocol An optional {@code SASProtocol} protocol for the SAS * @param ipRange An optional {@code IPRange} ip address range for the SAS * @param cacheControl An optional {@code String} cache-control header for the SAS. * @param contentDisposition An optional {@code String} content-disposition header for the SAS. * @param contentEncoding An optional {@code String} content-encoding header for the SAS. * @param contentLanguage An optional {@code String} content-language header for the SAS. * @param contentType An optional {@code String} content-type header for the SAS. * @return A string that represents the SAS token */ public String generateSAS(String identifier, ContainerSASPermission permissions, OffsetDateTime expiryTime, OffsetDateTime startTime, String version, SASProtocol sasProtocol, IPRange ipRange, String cacheControl, String contentDisposition, String contentEncoding, String contentLanguage, String contentType) { ServiceSASSignatureValues serviceSASSignatureValues = new ServiceSASSignatureValues(version, sasProtocol, startTime, expiryTime, permissions == null ? null : permissions.toString(), ipRange, identifier, cacheControl, contentDisposition, contentEncoding, contentLanguage, contentType); SharedKeyCredential sharedKeyCredential = Utility.getSharedKeyCredential(this.azureBlobStorage.getHttpPipeline()); Utility.assertNotNull("sharedKeyCredential", sharedKeyCredential); ServiceSASSignatureValues values = configureServiceSASSignatureValues(serviceSASSignatureValues, sharedKeyCredential.accountName()); SASQueryParameters sasQueryParameters = values.generateSASQueryParameters(sharedKeyCredential); return sasQueryParameters.encode(); } /** * Sets serviceSASSignatureValues parameters dependent on the current blob type */ private ServiceSASSignatureValues configureServiceSASSignatureValues(ServiceSASSignatureValues serviceSASSignatureValues, String accountName) { serviceSASSignatureValues.canonicalName(this.azureBlobStorage.getUrl(), accountName); serviceSASSignatureValues.snapshotId(null); serviceSASSignatureValues.resource(Constants.UrlConstants.SAS_CONTAINER_CONSTANT); return serviceSASSignatureValues; } }
class ContainerAsyncClient { public static final String ROOT_CONTAINER_NAME = "$root"; public static final String STATIC_WEBSITE_CONTAINER_NAME = "$web"; public static final String LOG_CONTAINER_NAME = "$logs"; private final ClientLogger logger = new ClientLogger(ContainerAsyncClient.class); private final AzureBlobStorageImpl azureBlobStorage; /** * Package-private constructor for use by {@link ContainerClientBuilder}. * * @param azureBlobStorage the API client for blob storage */ ContainerAsyncClient(AzureBlobStorageImpl azureBlobStorage) { this.azureBlobStorage = azureBlobStorage; } /** * Creates a new {@link BlockBlobAsyncClient} object by concatenating the blobName to the end of * ContainerAsyncClient's URL. The new BlockBlobAsyncClient uses the same request policy pipeline as the * ContainerAsyncClient. To change the pipeline, create the BlockBlobAsyncClient and then call its WithPipeline * method passing in the desired pipeline object. Or, call this package's NewBlockBlobAsyncClient instead of calling * this object's NewBlockBlobAsyncClient method. * * @param blobName A {@code String} representing the name of the blob. * @return A new {@link BlockBlobAsyncClient} object which references the blob with the specified name in this * container. */ public BlockBlobAsyncClient getBlockBlobAsyncClient(String blobName) { return getBlockBlobAsyncClient(blobName, null); } /** * Creates a new {@link BlockBlobAsyncClient} object by concatenating the blobName to the end of * ContainerAsyncClient's URL. The new BlockBlobAsyncClient uses the same request policy pipeline as the * ContainerAsyncClient. To change the pipeline, create the BlockBlobAsyncClient and then call its WithPipeline * method passing in the desired pipeline object. Or, call this package's NewBlockBlobAsyncClient instead of calling * this object's NewBlockBlobAsyncClient method. * * @param blobName A {@code String} representing the name of the blob. * @param snapshot the snapshot identifier for the blob. * @return A new {@link BlockBlobAsyncClient} object which references the blob with the specified name in this * container. */ public BlockBlobAsyncClient getBlockBlobAsyncClient(String blobName, String snapshot) { return new BlockBlobAsyncClient(new AzureBlobStorageBuilder() .url(Utility.appendToURLPath(getContainerUrl(), blobName).toString()) .pipeline(azureBlobStorage.getHttpPipeline()) .build(), snapshot); } /** * Creates creates a new PageBlobAsyncClient object by concatenating blobName to the end of ContainerAsyncClient's * URL. The new PageBlobAsyncClient uses the same request policy pipeline as the ContainerAsyncClient. To change the * pipeline, create the PageBlobAsyncClient and then call its WithPipeline method passing in the desired pipeline * object. Or, call this package's NewPageBlobAsyncClient instead of calling this object's NewPageBlobAsyncClient * method. * * @param blobName A {@code String} representing the name of the blob. * @return A new {@link PageBlobAsyncClient} object which references the blob with the specified name in this * container. */ public PageBlobAsyncClient getPageBlobAsyncClient(String blobName) { return getPageBlobAsyncClient(blobName, null); } /** * Creates creates a new PageBlobAsyncClient object by concatenating blobName to the end of ContainerAsyncClient's * URL. The new PageBlobAsyncClient uses the same request policy pipeline as the ContainerAsyncClient. To change the * pipeline, create the PageBlobAsyncClient and then call its WithPipeline method passing in the desired pipeline * object. Or, call this package's NewPageBlobAsyncClient instead of calling this object's NewPageBlobAsyncClient * method. * * @param blobName A {@code String} representing the name of the blob. * @param snapshot the snapshot identifier for the blob. * @return A new {@link PageBlobAsyncClient} object which references the blob with the specified name in this * container. */ public PageBlobAsyncClient getPageBlobAsyncClient(String blobName, String snapshot) { return new PageBlobAsyncClient(new AzureBlobStorageBuilder() .url(Utility.appendToURLPath(getContainerUrl(), blobName).toString()) .pipeline(azureBlobStorage.getHttpPipeline()) .build(), snapshot); } /** * Creates creates a new AppendBlobAsyncClient object by concatenating blobName to the end of ContainerAsyncClient's * URL. The new AppendBlobAsyncClient uses the same request policy pipeline as the ContainerAsyncClient. To change * the pipeline, create the AppendBlobAsyncClient and then call its WithPipeline method passing in the desired * pipeline object. Or, call this package's NewAppendBlobAsyncClient instead of calling this object's * NewAppendBlobAsyncClient method. * * @param blobName A {@code String} representing the name of the blob. * @return A new {@link AppendBlobAsyncClient} object which references the blob with the specified name in this * container. */ public AppendBlobAsyncClient getAppendBlobAsyncClient(String blobName) { return getAppendBlobAsyncClient(blobName, null); } /** * Creates creates a new AppendBlobAsyncClient object by concatenating blobName to the end of ContainerAsyncClient's * URL. The new AppendBlobAsyncClient uses the same request policy pipeline as the ContainerAsyncClient. To change * the pipeline, create the AppendBlobAsyncClient and then call its WithPipeline method passing in the desired * pipeline object. Or, call this package's NewAppendBlobAsyncClient instead of calling this object's * NewAppendBlobAsyncClient method. * * @param blobName A {@code String} representing the name of the blob. * @param snapshot the snapshot identifier for the blob. * @return A new {@link AppendBlobAsyncClient} object which references the blob with the specified name in this * container. */ public AppendBlobAsyncClient getAppendBlobAsyncClient(String blobName, String snapshot) { return new AppendBlobAsyncClient(new AzureBlobStorageBuilder() .url(Utility.appendToURLPath(getContainerUrl(), blobName).toString()) .pipeline(azureBlobStorage.getHttpPipeline()) .build(), snapshot); } /** * Creates a new BlobAsyncClient object by concatenating blobName to the end of ContainerAsyncClient's URL. The new * BlobAsyncClient uses the same request policy pipeline as the ContainerAsyncClient. To change the pipeline, create * the BlobAsyncClient and then call its WithPipeline method passing in the desired pipeline object. Or, call this * package's getBlobAsyncClient instead of calling this object's getBlobAsyncClient method. * * @param blobName A {@code String} representing the name of the blob. * @return A new {@link BlobAsyncClient} object which references the blob with the specified name in this container. */ public BlobAsyncClient getBlobAsyncClient(String blobName) { return getBlobAsyncClient(blobName, null); } /** * Creates a new BlobAsyncClient object by concatenating blobName to the end of ContainerAsyncClient's URL. The new * BlobAsyncClient uses the same request policy pipeline as the ContainerAsyncClient. To change the pipeline, create * the BlobAsyncClient and then call its WithPipeline method passing in the desired pipeline object. Or, call this * package's getBlobAsyncClient instead of calling this object's getBlobAsyncClient method. * * @param blobName A {@code String} representing the name of the blob. * @param snapshot the snapshot identifier for the blob. * @return A new {@link BlobAsyncClient} object which references the blob with the specified name in this container. */ public BlobAsyncClient getBlobAsyncClient(String blobName, String snapshot) { return new BlobAsyncClient(new AzureBlobStorageBuilder() .url(Utility.appendToURLPath(getContainerUrl(), blobName).toString()) .pipeline(azureBlobStorage.getHttpPipeline()) .build(), snapshot); } /** * Initializes a {@link BlobServiceAsyncClient} object pointing to the storage account this container is in. * * @return A {@link BlobServiceAsyncClient} object pointing to the specified storage account */ public BlobServiceAsyncClient getBlobServiceAsyncClient() { return new BlobServiceAsyncClient(new AzureBlobStorageBuilder() .url(Utility.stripLastPathSegment(getContainerUrl()).toString()) .pipeline(azureBlobStorage.getHttpPipeline()) .build()); } /** * Gets the URL of the container represented by this client. * * @return the URL. * @throws RuntimeException If the container has a malformed URL. */ public URL getContainerUrl() { try { return new URL(azureBlobStorage.getUrl()); } catch (MalformedURLException e) { throw logger.logExceptionAsError(new RuntimeException(String.format("Invalid URL on %s: %s" + getClass().getSimpleName(), azureBlobStorage.getUrl()), e)); } } /** * Gets if the container this client represents exists in the cloud. * * @return true if the container exists, false if it doesn't */ public Mono<Boolean> exists() { return existsWithResponse().flatMap(FluxUtil::toMono); } /** * Gets if the container this client represents exists in the cloud. * * @return true if the container exists, false if it doesn't */ public Mono<Response<Boolean>> existsWithResponse() { return withContext(context -> existsWithResponse(context)); } /** * Gets if the container this client represents exists in the cloud. * * @return true if the container exists, false if it doesn't */ Mono<Response<Boolean>> existsWithResponse(Context context) { return this.getPropertiesWithResponse(null, context) .map(cp -> (Response<Boolean>) new SimpleResponse<>(cp, true)) .onErrorResume(t -> t instanceof StorageException && ((StorageException) t).statusCode() == 404, t -> { HttpResponse response = ((StorageException) t).response(); return Mono.just(new SimpleResponse<>(response.request(), response.statusCode(), response.headers(), false)); }); } /** * Creates a new container within a storage account. If a container with the same name already exists, the operation * fails. For more information, see the * <a href="https: * * @return A reactive response signalling completion. */ public Mono<Void> create() { return createWithResponse(null, null).flatMap(FluxUtil::toMono); } /** * Creates a new container within a storage account. If a container with the same name already exists, the operation * fails. For more information, see the * <a href="https: * * @param metadata {@link Metadata} * @param accessType Specifies how the data in this container is available to the public. See the * x-ms-blob-public-access header in the Azure Docs for more information. Pass null for no public access. * @return A reactive response signalling completion. */ public Mono<VoidResponse> createWithResponse(Metadata metadata, PublicAccessType accessType) { return withContext(context -> createWithResponse(metadata, accessType, context)); } Mono<VoidResponse> createWithResponse(Metadata metadata, PublicAccessType accessType, Context context) { metadata = metadata == null ? new Metadata() : metadata; return postProcessResponse(this.azureBlobStorage.containers().createWithRestResponseAsync( null, null, metadata, accessType, null, null, null, context)).map(VoidResponse::new); } /** * Marks the specified container for deletion. The container and any blobs contained within it are later deleted * during garbage collection. For more information, see the * <a href="https: * * @return A reactive response signalling completion. */ public Mono<Void> delete() { return deleteWithResponse(null).flatMap(FluxUtil::toMono); } /** * Marks the specified container for deletion. The container and any blobs contained within it are later deleted * during garbage collection. For more information, see the * <a href="https: * * @param accessConditions {@link ContainerAccessConditions} * @return A reactive response signalling completion. * @throws UnsupportedOperationException If {@link ContainerAccessConditions * {@link ModifiedAccessConditions */ public Mono<VoidResponse> deleteWithResponse(ContainerAccessConditions accessConditions) { return withContext(context -> deleteWithResponse(accessConditions, context)); } Mono<VoidResponse> deleteWithResponse(ContainerAccessConditions accessConditions, Context context) { accessConditions = accessConditions == null ? new ContainerAccessConditions() : accessConditions; if (!validateNoEtag(accessConditions.modifiedAccessConditions())) { throw logger.logExceptionAsError(new UnsupportedOperationException("ETag access conditions are not supported for this API.")); } return postProcessResponse(this.azureBlobStorage.containers().deleteWithRestResponseAsync(null, null, null, accessConditions.leaseAccessConditions(), accessConditions.modifiedAccessConditions(), context)) .map(VoidResponse::new); } /** * Returns the container's metadata and system properties. For more information, see the * <a href="https: * * @return A {@link Mono} containing a {@link Response} whose {@link Response * container properties. */ public Mono<ContainerProperties> getProperties() { return getPropertiesWithResponse(null).flatMap(FluxUtil::toMono); } /** * Returns the container's metadata and system properties. For more information, see the * <a href="https: * * @param leaseAccessConditions By setting lease access conditions, requests will fail if the provided lease does * not match the active lease on the blob. * @return A reactive response containing the container properties. */ public Mono<Response<ContainerProperties>> getPropertiesWithResponse(LeaseAccessConditions leaseAccessConditions) { return withContext(context -> getPropertiesWithResponse(leaseAccessConditions, context)); } Mono<Response<ContainerProperties>> getPropertiesWithResponse(LeaseAccessConditions leaseAccessConditions, Context context) { return postProcessResponse(this.azureBlobStorage.containers().getPropertiesWithRestResponseAsync(null, null, null, leaseAccessConditions, context)) .map(rb -> new SimpleResponse<>(rb, new ContainerProperties(rb.deserializedHeaders()))); } /** * Sets the container's metadata. For more information, see the * <a href="https: * * @param metadata {@link Metadata} * @return A {@link Mono} containing a {@link Response} whose {@link Response * completion. */ public Mono<Void> setMetadata(Metadata metadata) { return setMetadataWithResponse(metadata, null).flatMap(FluxUtil::toMono); } /** * Sets the container's metadata. For more information, see the * <a href="https: * * @param metadata {@link Metadata} * @param accessConditions {@link ContainerAccessConditions} * @return A reactive response signalling completion. * @throws UnsupportedOperationException If {@link ContainerAccessConditions * anything set other than {@link ModifiedAccessConditions */ public Mono<VoidResponse> setMetadataWithResponse(Metadata metadata, ContainerAccessConditions accessConditions) { return withContext(context -> setMetadataWithResponse(metadata, accessConditions, context)); } Mono<VoidResponse> setMetadataWithResponse(Metadata metadata, ContainerAccessConditions accessConditions, Context context) { metadata = metadata == null ? new Metadata() : metadata; accessConditions = accessConditions == null ? new ContainerAccessConditions() : accessConditions; if (!validateNoEtag(accessConditions.modifiedAccessConditions()) || accessConditions.modifiedAccessConditions().ifUnmodifiedSince() != null) { throw logger.logExceptionAsError(new UnsupportedOperationException( "If-Modified-Since is the only HTTP access condition supported for this API")); } return postProcessResponse(this.azureBlobStorage.containers().setMetadataWithRestResponseAsync(null, null, metadata, null, accessConditions.leaseAccessConditions(), accessConditions.modifiedAccessConditions(), context)).map(VoidResponse::new); } /** * Returns the container's permissions. The permissions indicate whether container's blobs may be accessed publicly. * For more information, see the * <a href="https: * * @return A reactive response containing the container access policy. */ public Mono<ContainerAccessPolicies> getAccessPolicy() { return getAccessPolicyWithResponse(null).flatMap(FluxUtil::toMono); } /** * Returns the container's permissions. The permissions indicate whether container's blobs may be accessed publicly. * For more information, see the * <a href="https: * * @param leaseAccessConditions By setting lease access conditions, requests will fail if the provided lease does * not match the active lease on the blob. * @return A reactive response containing the container access policy. */ public Mono<Response<ContainerAccessPolicies>> getAccessPolicyWithResponse(LeaseAccessConditions leaseAccessConditions) { return withContext(context -> getAccessPolicyWithResponse(leaseAccessConditions, context)); } Mono<Response<ContainerAccessPolicies>> getAccessPolicyWithResponse(LeaseAccessConditions leaseAccessConditions, Context context) { return postProcessResponse(this.azureBlobStorage.containers().getAccessPolicyWithRestResponseAsync(null, null, null, leaseAccessConditions, context).map(response -> new SimpleResponse<>(response, new ContainerAccessPolicies(response.deserializedHeaders().blobPublicAccess(), response.value())))); } /** * Sets the container's permissions. The permissions indicate whether blobs in a container may be accessed publicly. * Note that, for each signed identifier, we will truncate the start and expiry times to the nearest second to * ensure the time formatting is compatible with the service. For more information, see the * <a href="https: * * @param accessType Specifies how the data in this container is available to the public. See the * x-ms-blob-public-access header in the Azure Docs for more information. Pass null for no public access. * @param identifiers A list of {@link SignedIdentifier} objects that specify the permissions for the container. * Please see * <a href="https: * for more information. Passing null will clear all access policies. * @return A reactive response signalling completion. */ public Mono<Void> setAccessPolicy(PublicAccessType accessType, List<SignedIdentifier> identifiers) { return setAccessPolicyWithResponse(accessType, identifiers, null).flatMap(FluxUtil::toMono); } /** * Sets the container's permissions. The permissions indicate whether blobs in a container may be accessed publicly. * Note that, for each signed identifier, we will truncate the start and expiry times to the nearest second to * ensure the time formatting is compatible with the service. For more information, see the * <a href="https: * * @param accessType Specifies how the data in this container is available to the public. See the * x-ms-blob-public-access header in the Azure Docs for more information. Pass null for no public access. * @param identifiers A list of {@link SignedIdentifier} objects that specify the permissions for the container. * Please see * <a href="https: * for more information. Passing null will clear all access policies. * @param accessConditions {@link ContainerAccessConditions} * @return A reactive response signalling completion. * @throws UnsupportedOperationException If {@link ContainerAccessConditions * {@link ModifiedAccessConditions */ public Mono<VoidResponse> setAccessPolicyWithResponse(PublicAccessType accessType, List<SignedIdentifier> identifiers, ContainerAccessConditions accessConditions) { return withContext(context -> setAccessPolicyWithResponse(accessType, identifiers, accessConditions, context)); } Mono<VoidResponse> setAccessPolicyWithResponse(PublicAccessType accessType, List<SignedIdentifier> identifiers, ContainerAccessConditions accessConditions, Context context) { accessConditions = accessConditions == null ? new ContainerAccessConditions() : accessConditions; if (!validateNoEtag(accessConditions.modifiedAccessConditions())) { throw logger.logExceptionAsError(new UnsupportedOperationException("ETag access conditions are not supported for this API.")); } /* We truncate to seconds because the service only supports nanoseconds or seconds, but doing an OffsetDateTime.now will only give back milliseconds (more precise fields are zeroed and not serialized). This allows for proper serialization with no real detriment to users as sub-second precision on active time for signed identifiers is not really necessary. */ if (identifiers != null) { for (SignedIdentifier identifier : identifiers) { if (identifier.accessPolicy() != null && identifier.accessPolicy().start() != null) { identifier.accessPolicy().start( identifier.accessPolicy().start().truncatedTo(ChronoUnit.SECONDS)); } if (identifier.accessPolicy() != null && identifier.accessPolicy().expiry() != null) { identifier.accessPolicy().expiry( identifier.accessPolicy().expiry().truncatedTo(ChronoUnit.SECONDS)); } } } return postProcessResponse(this.azureBlobStorage.containers().setAccessPolicyWithRestResponseAsync(null, identifiers, null, accessType, null, accessConditions.leaseAccessConditions(), accessConditions.modifiedAccessConditions(), context)).map(VoidResponse::new); } /** * Returns a reactive Publisher emitting all the blobs in this container lazily as needed. The directories are * flattened and only actual blobs and no directories are returned. * * <p> * Blob names are returned in lexicographic order. For more information, see the * <a href="https: * * <p> * E.g. listing a container containing a 'foo' folder, which contains blobs 'foo1' and 'foo2', and a blob on the * root level 'bar', will return * * <ul> * <li>foo/foo1 * <li>foo/foo2 * <li>bar * </ul> * * @return A reactive response emitting the flattened blobs. */ public PagedFlux<BlobItem> listBlobsFlat() { return this.listBlobsFlat(new ListBlobsOptions()); } /** * Returns a reactive Publisher emitting all the blobs in this container lazily as needed. The directories are * flattened and only actual blobs and no directories are returned. * * <p> * Blob names are returned in lexicographic order. For more information, see the * <a href="https: * * <p> * E.g. listing a container containing a 'foo' folder, which contains blobs 'foo1' and 'foo2', and a blob on the * root level 'bar', will return * * <ul> * <li>foo/foo1 * <li>foo/foo2 * <li>bar * </ul> * * @param options {@link ListBlobsOptions} * @return A reactive response emitting the listed blobs, flattened. */ /* * Implementation for this paged listing operation, supporting an optional timeout provided by the synchronous * ContainerClient. Applies the given timeout to each Mono<ContainersListBlobFlatSegmentResponse> backing the * PagedFlux. * * @param options {@link ListBlobsOptions}. * @param timeout An optional timeout to be applied to the network asynchronous operations. * @return A reactive response emitting the listed blobs, flattened. */ PagedFlux<BlobItem> listBlobsFlatWithOptionalTimeout(ListBlobsOptions options, Duration timeout) { Function<String, Mono<PagedResponse<BlobItem>>> func = marker -> listBlobsFlatSegment(marker, options, timeout) .map(response -> { List<BlobItem> value = response.value().segment() == null ? new ArrayList<>(0) : response.value().segment().blobItems(); return new PagedResponseBase<>( response.request(), response.statusCode(), response.headers(), value, response.value().nextMarker(), response.deserializedHeaders()); }); return new PagedFlux<>( () -> func.apply(null), marker -> func.apply(marker)); } /* * Returns a single segment of blobs starting from the specified Marker. Use an empty * marker to start enumeration from the beginning. Blob names are returned in lexicographic order. * After getting a segment, process it, and then call ListBlobs again (passing the the previously-returned * Marker) to get the next segment. For more information, see the * <a href="https: * * @param marker * Identifies the portion of the list to be returned with the next list operation. * This value is returned in the response of a previous list operation as the * ListBlobsFlatSegmentResponse.body().nextMarker(). Set to null to list the first segment. * @param options * {@link ListBlobsOptions} * * @return Emits the successful response. * * @apiNote * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=list_blobs_flat "Sample code for ContainerAsyncClient.listBlobsFlatSegment")] \n * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=list_blobs_flat_helper "helper code for ContainerAsyncClient.listBlobsFlatSegment")] \n * For more samples, please see the [Samples file](%https: */ private Mono<ContainersListBlobFlatSegmentResponse> listBlobsFlatSegment(String marker, ListBlobsOptions options, Duration timeout) { options = options == null ? new ListBlobsOptions() : options; return postProcessResponse(Utility.applyOptionalTimeout( this.azureBlobStorage.containers().listBlobFlatSegmentWithRestResponseAsync(null, options.prefix(), marker, options.maxResults(), options.details().toList(), null, null, Context.NONE), timeout)); } /** * Returns a reactive Publisher emitting all the blobs and directories (prefixes) under the given directory * (prefix). Directories will have {@link BlobItem * * <p> * Blob names are returned in lexicographic order. For more information, see the * <a href="https: * * <p> * E.g. listing a container containing a 'foo' folder, which contains blobs 'foo1' and 'foo2', and a blob on the * root level 'bar', will return the following results when prefix=null: * * <ul> * <li>foo/ (isPrefix = true) * <li>bar (isPrefix = false) * </ul> * <p> * will return the following results when prefix="foo/": * * <ul> * <li>foo/foo1 (isPrefix = false) * <li>foo/foo2 (isPrefix = false) * </ul> * * @param directory The directory to list blobs underneath * @return A reactive response emitting the prefixes and blobs. */ public PagedFlux<BlobItem> listBlobsHierarchy(String directory) { return this.listBlobsHierarchy("/", new ListBlobsOptions().prefix(directory)); } /** * Returns a reactive Publisher emitting all the blobs and prefixes (directories) under the given prefix * (directory). Directories will have {@link BlobItem * * <p> * Blob names are returned in lexicographic order. For more information, see the * <a href="https: * * <p> * E.g. listing a container containing a 'foo' folder, which contains blobs 'foo1' and 'foo2', and a blob on the * root level 'bar', will return the following results when prefix=null: * * <ul> * <li>foo/ (isPrefix = true) * <li>bar (isPrefix = false) * </ul> * <p> * will return the following results when prefix="foo/": * * <ul> * <li>foo/foo1 (isPrefix = false) * <li>foo/foo2 (isPrefix = false) * </ul> * * @param delimiter The delimiter for blob hierarchy, "/" for hierarchy based on directories * @param options {@link ListBlobsOptions} * @return A reactive response emitting the prefixes and blobs. */ public PagedFlux<BlobItem> listBlobsHierarchy(String delimiter, ListBlobsOptions options) { return listBlobsHierarchyWithOptionalTimeout(delimiter, options, null); } /* * Implementation for this paged listing operation, supporting an optional timeout provided by the synchronous * ContainerClient. Applies the given timeout to each Mono<ContainersListBlobHierarchySegmentResponse> backing the * PagedFlux. * * @param delimiter The delimiter for blob hierarchy, "/" for hierarchy based on directories * @param options {@link ListBlobsOptions} * @param timeout An optional timeout to be applied to the network asynchronous operations. * @return A reactive response emitting the listed blobs, flattened. */ PagedFlux<BlobItem> listBlobsHierarchyWithOptionalTimeout(String delimiter, ListBlobsOptions options, Duration timeout) { Function<String, Mono<PagedResponse<BlobItem>>> func = marker -> listBlobsHierarchySegment(marker, delimiter, options, timeout) .map(response -> { List<BlobItem> value = response.value().segment() == null ? new ArrayList<>(0) : Stream.concat( response.value().segment().blobItems().stream(), response.value().segment().blobPrefixes().stream() .map(blobPrefix -> new BlobItem().name(blobPrefix.name()).isPrefix(true)) ).collect(Collectors.toList()); return new PagedResponseBase<>( response.request(), response.statusCode(), response.headers(), value, response.value().nextMarker(), response.deserializedHeaders()); }); return new PagedFlux<>( () -> func.apply(null), marker -> func.apply(marker)); } /* * Returns a single segment of blobs and blob prefixes starting from the specified Marker. Use an empty * marker to start enumeration from the beginning. Blob names are returned in lexicographic order. * After getting a segment, process it, and then call ListBlobs again (passing the the previously-returned * Marker) to get the next segment. For more information, see the * <a href="https: * * @param marker * Identifies the portion of the list to be returned with the next list operation. * This value is returned in the response of a previous list operation as the * ListBlobsHierarchySegmentResponse.body().nextMarker(). Set to null to list the first segment. * @param delimiter * The operation returns a BlobPrefix element in the response body that acts as a placeholder for all blobs * whose names begin with the same substring up to the appearance of the delimiter character. The delimiter may * be a single character or a string. * @param options * {@link ListBlobsOptions} * * @return Emits the successful response. * @throws UnsupportedOperationException If {@link ListBlobsOptions * set. * * @apiNote * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=list_blobs_hierarchy "Sample code for ContainerAsyncClient.listBlobsHierarchySegment")] \n * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=list_blobs_hierarchy_helper "helper code for ContainerAsyncClient.listBlobsHierarchySegment")] \n * For more samples, please see the [Samples file](%https: */ private Mono<ContainersListBlobHierarchySegmentResponse> listBlobsHierarchySegment(String marker, String delimiter, ListBlobsOptions options, Duration timeout) { options = options == null ? new ListBlobsOptions() : options; if (options.details().snapshots()) { throw logger.logExceptionAsError(new UnsupportedOperationException("Including snapshots in a hierarchical listing is not supported.")); } return postProcessResponse(Utility.applyOptionalTimeout( this.azureBlobStorage.containers().listBlobHierarchySegmentWithRestResponseAsync(null, delimiter, options.prefix(), marker, options.maxResults(), options.details().toList(), null, null, Context.NONE), timeout)); } /** * Acquires a lease on the blob for write and delete operations. The lease duration must be between 15 to 60 * seconds, or infinite (-1). * * @param proposedId A {@code String} in any valid GUID format. May be null. * @param duration The duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A * non-infinite lease can be between 15 and 60 seconds. * @return A reactive response containing the lease ID. */ public Mono<String> acquireLease(String proposedId, int duration) { return acquireLeaseWithResponse(proposedId, duration, null).flatMap(FluxUtil::toMono); } /** * Acquires a lease on the blob for write and delete operations. The lease duration must be between 15 to 60 * seconds, or infinite (-1). * * @param proposedID A {@code String} in any valid GUID format. May be null. * @param duration The duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A * non-infinite lease can be between 15 and 60 seconds. * @param modifiedAccessConditions Standard HTTP Access conditions related to the modification of data. ETag and * LastModifiedTime are used to construct conditions related to when the blob was changed relative to the given * request. The request will fail if the specified condition is not satisfied. * @return A reactive response containing the lease ID. * @throws UnsupportedOperationException If either {@link ModifiedAccessConditions * ModifiedAccessConditions */ public Mono<Response<String>> acquireLeaseWithResponse(String proposedID, int duration, ModifiedAccessConditions modifiedAccessConditions) { return withContext(context -> acquireLeaseWithResponse(proposedID, duration, modifiedAccessConditions, context)); } Mono<Response<String>> acquireLeaseWithResponse(String proposedID, int duration, ModifiedAccessConditions modifiedAccessConditions, Context context) { if (!this.validateNoEtag(modifiedAccessConditions)) { throw logger.logExceptionAsError(new UnsupportedOperationException( "ETag access conditions are not supported for this API.")); } return postProcessResponse(this.azureBlobStorage.containers().acquireLeaseWithRestResponseAsync(null, null, duration, proposedID, null, modifiedAccessConditions, context)).map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().leaseId())); } /** * Renews the blob's previously-acquired lease. * * @param leaseID The leaseId of the active lease on the blob. * @return A reactive response containing the renewed lease ID. */ public Mono<String> renewLease(String leaseID) { return renewLeaseWithResponse(leaseID, null).flatMap(FluxUtil::toMono); } /** * Renews the blob's previously-acquired lease. * * @param leaseID The leaseId of the active lease on the blob. * @param modifiedAccessConditions Standard HTTP Access conditions related to the modification of data. ETag and * LastModifiedTime are used to construct conditions related to when the blob was changed relative to the given * request. The request will fail if the specified condition is not satisfied. * @return A reactive response containing the renewed lease ID. * @throws UnsupportedOperationException If either {@link ModifiedAccessConditions * ModifiedAccessConditions */ public Mono<Response<String>> renewLeaseWithResponse(String leaseID, ModifiedAccessConditions modifiedAccessConditions) { return withContext(context -> renewLeaseWithResponse(leaseID, modifiedAccessConditions, context)); } Mono<Response<String>> renewLeaseWithResponse(String leaseID, ModifiedAccessConditions modifiedAccessConditions, Context context) { if (!this.validateNoEtag(modifiedAccessConditions)) { throw logger.logExceptionAsError(new UnsupportedOperationException( "ETag access conditions are not supported for this API.")); } return postProcessResponse(this.azureBlobStorage.containers().renewLeaseWithRestResponseAsync(null, leaseID, null, null, modifiedAccessConditions, context)).map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().leaseId())); } /** * Releases the blob's previously-acquired lease. * * @param leaseID The leaseId of the active lease on the blob. * @return A reactive response signalling completion. */ public Mono<Void> releaseLease(String leaseID) { return releaseLeaseWithResponse(leaseID, null).flatMap(FluxUtil::toMono); } /** * Releases the blob's previously-acquired lease. * * @param leaseID The leaseId of the active lease on the blob. * @param modifiedAccessConditions Standard HTTP Access conditions related to the modification of data. ETag and * LastModifiedTime are used to construct conditions related to when the blob was changed relative to the given * request. The request will fail if the specified condition is not satisfied. * @return A reactive response signalling completion. * @throws UnsupportedOperationException If either {@link ModifiedAccessConditions * ModifiedAccessConditions */ public Mono<VoidResponse> releaseLeaseWithResponse(String leaseID, ModifiedAccessConditions modifiedAccessConditions) { return withContext(context -> releaseLeaseWithResponse(leaseID, modifiedAccessConditions, context)); } Mono<VoidResponse> releaseLeaseWithResponse(String leaseID, ModifiedAccessConditions modifiedAccessConditions, Context context) { if (!this.validateNoEtag(modifiedAccessConditions)) { throw logger.logExceptionAsError(new UnsupportedOperationException( "ETag access conditions are not supported for this API.")); } return postProcessResponse(this.azureBlobStorage.containers().releaseLeaseWithRestResponseAsync( null, leaseID, null, null, modifiedAccessConditions, context)) .map(VoidResponse::new); } /** * BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1) constant * to break a fixed-duration lease when it expires or an infinite lease immediately. * * @return A reactive response containing the remaining time in the broken lease. */ public Mono<Duration> breakLease() { return breakLeaseWithResponse(null, null).flatMap(FluxUtil::toMono); } /** * BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1) constant * to break a fixed-duration lease when it expires or an infinite lease immediately. * * @param breakPeriodInSeconds An optional {@code Integer} representing the proposed duration of seconds that the * lease should continue before it is broken, between 0 and 60 seconds. This break period is only used if it is * shorter than the time remaining on the lease. If longer, the time remaining on the lease is used. A new lease * will not be available before the break period has expired, but the lease may be held for longer than the break * period. * @param modifiedAccessConditions Standard HTTP Access conditions related to the modification of data. ETag and * LastModifiedTime are used to construct conditions related to when the blob was changed relative to the given * request. The request will fail if the specified condition is not satisfied. * @return A reactive response containing the remaining time in the broken lease. * @throws UnsupportedOperationException If either {@link ModifiedAccessConditions * ModifiedAccessConditions */ public Mono<Response<Duration>> breakLeaseWithResponse(Integer breakPeriodInSeconds, ModifiedAccessConditions modifiedAccessConditions) { return withContext(context -> breakLeaseWithResponse(breakPeriodInSeconds, modifiedAccessConditions, context)); } Mono<Response<Duration>> breakLeaseWithResponse(Integer breakPeriodInSeconds, ModifiedAccessConditions modifiedAccessConditions, Context context) { if (!this.validateNoEtag(modifiedAccessConditions)) { throw logger.logExceptionAsError(new UnsupportedOperationException( "ETag access conditions are not supported for this API.")); } return postProcessResponse(this.azureBlobStorage.containers().breakLeaseWithRestResponseAsync(null, null, breakPeriodInSeconds, null, modifiedAccessConditions, context)) .map(rb -> new SimpleResponse<>(rb, Duration.ofSeconds(rb.deserializedHeaders().leaseTime()))); } /** * ChangeLease changes the blob's lease ID. * * @param leaseId The leaseId of the active lease on the blob. * @param proposedID A {@code String} in any valid GUID format. * @return A reactive response containing the new lease ID. */ public Mono<String> changeLease(String leaseId, String proposedID) { return changeLeaseWithResponse(leaseId, proposedID, null).flatMap(FluxUtil::toMono); } /** * ChangeLease changes the blob's lease ID. For more information, see the <a href="https: * Docs</a>. * * @param leaseId The leaseId of the active lease on the blob. * @param proposedID A {@code String} in any valid GUID format. * @param modifiedAccessConditions Standard HTTP Access conditions related to the modification of data. ETag and * LastModifiedTime are used to construct conditions related to when the blob was changed relative to the given * request. The request will fail if the specified condition is not satisfied. * @return A reactive response containing the new lease ID. * @throws UnsupportedOperationException If either {@link ModifiedAccessConditions * ModifiedAccessConditions */ public Mono<Response<String>> changeLeaseWithResponse(String leaseId, String proposedID, ModifiedAccessConditions modifiedAccessConditions) { return withContext(context -> changeLeaseWithResponse(leaseId, proposedID, modifiedAccessConditions, context)); } Mono<Response<String>> changeLeaseWithResponse(String leaseId, String proposedID, ModifiedAccessConditions modifiedAccessConditions, Context context) { if (!this.validateNoEtag(modifiedAccessConditions)) { throw logger.logExceptionAsError(new UnsupportedOperationException( "ETag access conditions are not supported for this API.")); } return postProcessResponse(this.azureBlobStorage.containers().changeLeaseWithRestResponseAsync(null, leaseId, proposedID, null, null, modifiedAccessConditions, context)) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().leaseId())); } /** * Returns the sku name and account kind for the account. For more information, please see the * <a href="https: * * @return A reactive response containing the account info. */ public Mono<StorageAccountInfo> getAccountInfo() { return getAccountInfoWithResponse().flatMap(FluxUtil::toMono); } /** * Returns the sku name and account kind for the account. For more information, please see the * <a href="https: * * @return A reactive response containing the account info. */ public Mono<Response<StorageAccountInfo>> getAccountInfoWithResponse() { return withContext(context -> getAccountInfoWithResponse(context)); } Mono<Response<StorageAccountInfo>> getAccountInfoWithResponse(Context context) { return postProcessResponse( this.azureBlobStorage.containers().getAccountInfoWithRestResponseAsync(null, context)) .map(rb -> new SimpleResponse<>(rb, new StorageAccountInfo(rb.deserializedHeaders()))); } private boolean validateNoEtag(ModifiedAccessConditions modifiedAccessConditions) { if (modifiedAccessConditions == null) { return true; } return modifiedAccessConditions.ifMatch() == null && modifiedAccessConditions.ifNoneMatch() == null; } /** * Generates a user delegation SAS with the specified parameters * * @param userDelegationKey The {@code UserDelegationKey} user delegation key for the SAS * @param accountName The {@code String} account name for the SAS * @param permissions The {@code ContainerSASPermissions} permission for the SAS * @param expiryTime The {@code OffsetDateTime} expiry time for the SAS * @return A string that represents the SAS token */ public String generateUserDelegationSAS(UserDelegationKey userDelegationKey, String accountName, ContainerSASPermission permissions, OffsetDateTime expiryTime) { return this.generateUserDelegationSAS(userDelegationKey, accountName, permissions, expiryTime, null, null, null, null, null, null, null, null, null); } /** * Generates a user delegation SAS token with the specified parameters * * @param userDelegationKey The {@code UserDelegationKey} user delegation key for the SAS * @param accountName The {@code String} account name for the SAS * @param permissions The {@code ContainerSASPermissions} permission for the SAS * @param expiryTime The {@code OffsetDateTime} expiry time for the SAS * @param startTime An optional {@code OffsetDateTime} start time for the SAS * @param version An optional {@code String} version for the SAS * @param sasProtocol An optional {@code SASProtocol} protocol for the SAS * @param ipRange An optional {@code IPRange} ip address range for the SAS * @return A string that represents the SAS token */ public String generateUserDelegationSAS(UserDelegationKey userDelegationKey, String accountName, ContainerSASPermission permissions, OffsetDateTime expiryTime, OffsetDateTime startTime, String version, SASProtocol sasProtocol, IPRange ipRange) { return this.generateUserDelegationSAS(userDelegationKey, accountName, permissions, expiryTime, startTime, version, sasProtocol, ipRange, null /* cacheControl */, null /* contentDisposition */, null /* contentEncoding */, null /* contentLanguage */, null /* contentType */); } /** * Generates a user delegation SAS token with the specified parameters * * @param userDelegationKey The {@code UserDelegationKey} user delegation key for the SAS * @param accountName The {@code String} account name for the SAS * @param permissions The {@code ContainerSASPermissions} permission for the SAS * @param expiryTime The {@code OffsetDateTime} expiry time for the SAS * @param startTime An optional {@code OffsetDateTime} start time for the SAS * @param version An optional {@code String} version for the SAS * @param sasProtocol An optional {@code SASProtocol} protocol for the SAS * @param ipRange An optional {@code IPRange} ip address range for the SAS * @param cacheControl An optional {@code String} cache-control header for the SAS. * @param contentDisposition An optional {@code String} content-disposition header for the SAS. * @param contentEncoding An optional {@code String} content-encoding header for the SAS. * @param contentLanguage An optional {@code String} content-language header for the SAS. * @param contentType An optional {@code String} content-type header for the SAS. * @return A string that represents the SAS token */ public String generateUserDelegationSAS(UserDelegationKey userDelegationKey, String accountName, ContainerSASPermission permissions, OffsetDateTime expiryTime, OffsetDateTime startTime, String version, SASProtocol sasProtocol, IPRange ipRange, String cacheControl, String contentDisposition, String contentEncoding, String contentLanguage, String contentType) { ServiceSASSignatureValues serviceSASSignatureValues = new ServiceSASSignatureValues(version, sasProtocol, startTime, expiryTime, permissions == null ? null : permissions.toString(), ipRange, null /* identifier*/, cacheControl, contentDisposition, contentEncoding, contentLanguage, contentType); ServiceSASSignatureValues values = configureServiceSASSignatureValues(serviceSASSignatureValues, accountName); SASQueryParameters sasQueryParameters = values.generateSASQueryParameters(userDelegationKey); return sasQueryParameters.encode(); } /** * Generates a SAS token with the specified parameters * * @param permissions The {@code ContainerSASPermissions} permission for the SAS * @param expiryTime The {@code OffsetDateTime} expiry time for the SAS * @return A string that represents the SAS token */ public String generateSAS(ContainerSASPermission permissions, OffsetDateTime expiryTime) { return this.generateSAS(null, permissions, /* identifier */ expiryTime, null /* startTime */, null /* version */, null /* sasProtocol */, null /* ipRange */, null /* cacheControl */, null /* contentDisposition */, null /* contentEncoding */, null /* contentLanguage */, null /*contentType*/); } /** * Generates a SAS token with the specified parameters * * @param identifier The {@code String} name of the access policy on the container this SAS references if any * @return A string that represents the SAS token */ public String generateSAS(String identifier) { return this.generateSAS(identifier, null /* permissions*/, null /* expiryTime */, null /* startTime */, null /* version */, null /* sasProtocol */, null /* ipRange */, null /* cacheControl */, null /* contentDisposition */, null /* contentEncoding */, null /* contentLanguage */, null /*contentType*/); } /** * Generates a SAS token with the specified parameters * * @param identifier The {@code String} name of the access policy on the container this SAS references if any * @param permissions The {@code ContainerSASPermissions} permission for the SAS * @param expiryTime The {@code OffsetDateTime} expiry time for the SAS * @param startTime An optional {@code OffsetDateTime} start time for the SAS * @param version An optional {@code String} version for the SAS * @param sasProtocol An optional {@code SASProtocol} protocol for the SAS * @param ipRange An optional {@code IPRange} ip address range for the SAS * @return A string that represents the SAS token */ public String generateSAS(String identifier, ContainerSASPermission permissions, OffsetDateTime expiryTime, OffsetDateTime startTime, String version, SASProtocol sasProtocol, IPRange ipRange) { return this.generateSAS(identifier, permissions, expiryTime, startTime, version, sasProtocol, ipRange, null /* cacheControl */, null /* contentDisposition */, null /* contentEncoding */, null /* contentLanguage */, null /*contentType*/); } /** * Generates a SAS token with the specified parameters * * @param identifier The {@code String} name of the access policy on the container this SAS references if any * @param permissions The {@code ContainerSASPermissions} permission for the SAS * @param expiryTime The {@code OffsetDateTime} expiry time for the SAS * @param startTime An optional {@code OffsetDateTime} start time for the SAS * @param version An optional {@code String} version for the SAS * @param sasProtocol An optional {@code SASProtocol} protocol for the SAS * @param ipRange An optional {@code IPRange} ip address range for the SAS * @param cacheControl An optional {@code String} cache-control header for the SAS. * @param contentDisposition An optional {@code String} content-disposition header for the SAS. * @param contentEncoding An optional {@code String} content-encoding header for the SAS. * @param contentLanguage An optional {@code String} content-language header for the SAS. * @param contentType An optional {@code String} content-type header for the SAS. * @return A string that represents the SAS token */ public String generateSAS(String identifier, ContainerSASPermission permissions, OffsetDateTime expiryTime, OffsetDateTime startTime, String version, SASProtocol sasProtocol, IPRange ipRange, String cacheControl, String contentDisposition, String contentEncoding, String contentLanguage, String contentType) { ServiceSASSignatureValues serviceSASSignatureValues = new ServiceSASSignatureValues(version, sasProtocol, startTime, expiryTime, permissions == null ? null : permissions.toString(), ipRange, identifier, cacheControl, contentDisposition, contentEncoding, contentLanguage, contentType); SharedKeyCredential sharedKeyCredential = Utility.getSharedKeyCredential(this.azureBlobStorage.getHttpPipeline()); Utility.assertNotNull("sharedKeyCredential", sharedKeyCredential); ServiceSASSignatureValues values = configureServiceSASSignatureValues(serviceSASSignatureValues, sharedKeyCredential.accountName()); SASQueryParameters sasQueryParameters = values.generateSASQueryParameters(sharedKeyCredential); return sasQueryParameters.encode(); } /** * Sets serviceSASSignatureValues parameters dependent on the current blob type */ private ServiceSASSignatureValues configureServiceSASSignatureValues(ServiceSASSignatureValues serviceSASSignatureValues, String accountName) { serviceSASSignatureValues.canonicalName(this.azureBlobStorage.getUrl(), accountName); serviceSASSignatureValues.snapshotId(null); serviceSASSignatureValues.resource(Constants.UrlConstants.SAS_CONTAINER_CONSTANT); return serviceSASSignatureValues; } }
Each of these functions has it's own `Function<String, Mono<PagedResponse<T>>>` to reuse the mapping into PagedResponseBase.
public PagedFlux<ContainerItem> listContainers(ListContainersOptions options) { return new PagedFlux<>( () -> listContainersSegment(null, options) .map(response -> new PagedResponseBase<>( response.request(), response.statusCode(), response.headers(), response.value().containerItems(), response.value().nextMarker(), response.deserializedHeaders())), (marker) -> listContainersSegment(marker, options) .map(response -> new PagedResponseBase<>( response.request(), response.statusCode(), response.headers(), response.value().containerItems(), response.value().nextMarker(), response.deserializedHeaders()))); }
.map(response -> new PagedResponseBase<>(
public PagedFlux<ContainerItem> listContainers(ListContainersOptions options) { return listContainersWithOptionalTimeout(options, null); }
class BlobServiceAsyncClient { private final AzureBlobStorageImpl azureBlobStorage; /** * Package-private constructor for use by {@link BlobServiceClientBuilder}. * * @param azureBlobStorageBuilder the API client builder for blob storage API */ BlobServiceAsyncClient(AzureBlobStorageBuilder azureBlobStorageBuilder) { this.azureBlobStorage = azureBlobStorageBuilder.build(); } /** * Initializes a {@link ContainerAsyncClient} object pointing to the specified container. This method does not * create a container. It simply constructs the URL to the container and offers access to methods relevant to * containers. * * @param containerName The name of the container to point to. * @return A {@link ContainerAsyncClient} object pointing to the specified container */ public ContainerAsyncClient getContainerAsyncClient(String containerName) { return new ContainerAsyncClient(new AzureBlobStorageBuilder() .url(Utility.appendToURLPath(getAccountUrl(), containerName).toString()) .pipeline(azureBlobStorage.getHttpPipeline())); } /** * Creates a new container within a storage account. If a container with the same name already exists, the operation * fails. For more information, see the * <a href="https: * * @param containerName Name of the container to create * @return A response containing a {@link ContainerAsyncClient} used to interact with the container created. */ public Mono<Response<ContainerAsyncClient>> createContainer(String containerName) { return createContainer(containerName, null, null); } /** * Creates a new container within a storage account. If a container with the same name already exists, the operation * fails. For more information, see the * <a href="https: * * @param containerName Name of the container to create * @param metadata {@link Metadata} * @param accessType Specifies how the data in this container is available to the public. See the * x-ms-blob-public-access header in the Azure Docs for more information. Pass null for no public access. * @return A response containing a {@link ContainerAsyncClient} used to interact with the container created. */ public Mono<Response<ContainerAsyncClient>> createContainer(String containerName, Metadata metadata, PublicAccessType accessType) { ContainerAsyncClient containerAsyncClient = getContainerAsyncClient(containerName); return containerAsyncClient.create(metadata, accessType) .map(response -> new SimpleResponse<>(response, containerAsyncClient)); } /** * Deletes the specified container in the storage account. If the container doesn't exist the operation fails. For * more information see the <a href="https: * * @param containerName Name of the container to delete * @return A response containing status code and HTTP headers */ public Mono<VoidResponse> deleteContainer(String containerName) { return getContainerAsyncClient(containerName).delete(); } /** * Gets the URL of the storage account represented by this client. * * @return the URL. * @throws RuntimeException If the account URL is malformed. */ public URL getAccountUrl() { try { return new URL(azureBlobStorage.getUrl()); } catch (MalformedURLException e) { throw new RuntimeException(String.format("Invalid URL on %s: %s" + getClass().getSimpleName(), azureBlobStorage.getUrl()), e); } } /** * Returns a reactive Publisher emitting all the containers in this account lazily as needed. For more information, * see the <a href="https: * * @return A reactive response emitting the list of containers. */ public PagedFlux<ContainerItem> listContainers() { return this.listContainers(new ListContainersOptions()); } /** * Returns a reactive Publisher emitting all the containers in this account lazily as needed. For more information, * see the <a href="https: * * @param options A {@link ListContainersOptions} which specifies what data should be returned by the service. * @return A reactive response emitting the list of containers. */ /* * Returns a Mono segment of containers starting from the specified Marker. * Use an empty marker to start enumeration from the beginning. Container names are returned in lexicographic order. * After getting a segment, process it, and then call ListContainers again (passing the the previously-returned * Marker) to get the next segment. For more information, see * the <a href="https: * * @param marker * Identifies the portion of the list to be returned with the next list operation. * This value is returned in the response of a previous list operation as the * ListContainersSegmentResponse.body().nextMarker(). Set to null to list the first segment. * @param options * A {@link ListContainersOptions} which specifies what data should be returned by the service. * * @return Emits the successful response. * * @apiNote * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=service_list "Sample code for ServiceURL.listContainersSegment")] \n * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=service_list_helper "Helper code for ServiceURL.listContainersSegment")] \n * For more samples, please see the [Samples file](%https: */ private Mono<ServicesListContainersSegmentResponse> listContainersSegment(String marker, ListContainersOptions options) { options = options == null ? new ListContainersOptions() : options; return postProcessResponse( this.azureBlobStorage.services().listContainersSegmentWithRestResponseAsync( options.prefix(), marker, options.maxResults(), options.details().toIncludeType(), null, null, Context.NONE)); } /** * Gets the properties of a storage account’s Blob service. For more information, see the * <a href="https: * * @return A reactive response containing the storage account properties. */ public Mono<Response<StorageServiceProperties>> getProperties() { return postProcessResponse( this.azureBlobStorage.services().getPropertiesWithRestResponseAsync(null, null, Context.NONE)) .map(rb -> new SimpleResponse<>(rb, rb.value())); } /** * Sets properties for a storage account's Blob service endpoint. For more information, see the * <a href="https: * Note that setting the default service version has no effect when using this client because this client explicitly * sets the version header on each request, overriding the default. * * @param properties Configures the service. * @return A reactive response containing the storage account properties. */ public Mono<VoidResponse> setProperties(StorageServiceProperties properties) { return postProcessResponse( this.azureBlobStorage.services().setPropertiesWithRestResponseAsync(properties, null, null, Context.NONE)) .map(VoidResponse::new); } /** * Gets a user delegation key for use with this account's blob storage. Note: This method call is only valid when * using {@link TokenCredential} in this object's {@link HttpPipeline}. * * @param start Start time for the key's validity. Null indicates immediate start. * @param expiry Expiration of the key's validity. * @return A reactive response containing the user delegation key. * @throws IllegalArgumentException If {@code start} isn't null and is after {@code expiry}. */ public Mono<Response<UserDelegationKey>> getUserDelegationKey(OffsetDateTime start, OffsetDateTime expiry) { Utility.assertNotNull("expiry", expiry); if (start != null && !start.isBefore(expiry)) { throw new IllegalArgumentException("`start` must be null or a datetime before `expiry`."); } return postProcessResponse( this.azureBlobStorage.services().getUserDelegationKeyWithRestResponseAsync( new KeyInfo() .start(start == null ? "" : Utility.ISO_8601_UTC_DATE_FORMATTER.format(start)) .expiry(Utility.ISO_8601_UTC_DATE_FORMATTER.format(expiry)), null, null, Context.NONE) ).map(rb -> new SimpleResponse<>(rb, rb.value())); } /** * Retrieves statistics related to replication for the Blob service. It is only available on the secondary location * endpoint when read-access geo-redundant replication is enabled for the storage account. For more information, see * the * <a href="https: * * @return A reactive response containing the storage account statistics. */ public Mono<Response<StorageServiceStats>> getStatistics() { return postProcessResponse( this.azureBlobStorage.services().getStatisticsWithRestResponseAsync(null, null, Context.NONE)) .map(rb -> new SimpleResponse<>(rb, rb.value())); } /** * Returns the sku name and account kind for the account. For more information, please see the * <a href="https: * * @return A reactive response containing the storage account info. */ public Mono<Response<StorageAccountInfo>> getAccountInfo() { return postProcessResponse(this.azureBlobStorage.services().getAccountInfoWithRestResponseAsync(Context.NONE)) .map(rb -> new SimpleResponse<>(rb, new StorageAccountInfo(rb.deserializedHeaders()))); } /** * Generates an account SAS token with the specified parameters * * @param accountSASService The {@code AccountSASService} services for the account SAS * @param accountSASResourceType An optional {@code AccountSASResourceType} resources for the account SAS * @param accountSASPermission The {@code AccountSASPermission} permission for the account SAS * @param expiryTime The {@code OffsetDateTime} expiry time for the account SAS * @return A string that represents the SAS token */ public String generateAccountSAS(AccountSASService accountSASService, AccountSASResourceType accountSASResourceType, AccountSASPermission accountSASPermission, OffsetDateTime expiryTime) { return this.generateAccountSAS(accountSASService, accountSASResourceType, accountSASPermission, expiryTime, null /* startTime */, null /* version */, null /* ipRange */, null /* sasProtocol */); } /** * Generates an account SAS token with the specified parameters * * @param accountSASService The {@code AccountSASService} services for the account SAS * @param accountSASResourceType An optional {@code AccountSASResourceType} resources for the account SAS * @param accountSASPermission The {@code AccountSASPermission} permission for the account SAS * @param expiryTime The {@code OffsetDateTime} expiry time for the account SAS * @param startTime The {@code OffsetDateTime} start time for the account SAS * @param version The {@code String} version for the account SAS * @param ipRange An optional {@code IPRange} ip address range for the SAS * @param sasProtocol An optional {@code SASProtocol} protocol for the SAS * @return A string that represents the SAS token */ public String generateAccountSAS(AccountSASService accountSASService, AccountSASResourceType accountSASResourceType, AccountSASPermission accountSASPermission, OffsetDateTime expiryTime, OffsetDateTime startTime, String version, IPRange ipRange, SASProtocol sasProtocol) { AccountSASSignatureValues accountSASSignatureValues = new AccountSASSignatureValues(); accountSASSignatureValues.services(accountSASService == null ? null : accountSASService.toString()); accountSASSignatureValues.resourceTypes(accountSASResourceType == null ? null : accountSASResourceType.toString()); accountSASSignatureValues.permissions(accountSASPermission == null ? null : accountSASPermission.toString()); accountSASSignatureValues.expiryTime(expiryTime); accountSASSignatureValues.startTime(startTime); if (version != null) { accountSASSignatureValues.version(version); } accountSASSignatureValues.ipRange(ipRange); accountSASSignatureValues.protocol(sasProtocol); SharedKeyCredential sharedKeyCredential = Utility.getSharedKeyCredential(this.azureBlobStorage.getHttpPipeline()); SASQueryParameters sasQueryParameters = accountSASSignatureValues.generateSASQueryParameters(sharedKeyCredential); return sasQueryParameters.encode(); } }
class BlobServiceAsyncClient { private final ClientLogger logger = new ClientLogger(BlobServiceAsyncClient.class); private final AzureBlobStorageImpl azureBlobStorage; /** * Package-private constructor for use by {@link BlobServiceClientBuilder}. * * @param azureBlobStorage the API client for blob storage */ BlobServiceAsyncClient(AzureBlobStorageImpl azureBlobStorage) { this.azureBlobStorage = azureBlobStorage; } /** * Initializes a {@link ContainerAsyncClient} object pointing to the specified container. This method does not * create a container. It simply constructs the URL to the container and offers access to methods relevant to * containers. * * @param containerName The name of the container to point to. * @return A {@link ContainerAsyncClient} object pointing to the specified container */ public ContainerAsyncClient getContainerAsyncClient(String containerName) { return new ContainerAsyncClient(new AzureBlobStorageBuilder() .url(Utility.appendToURLPath(getAccountUrl(), containerName).toString()) .pipeline(azureBlobStorage.getHttpPipeline()) .build()); } /** * Creates a new container within a storage account. If a container with the same name already exists, the operation * fails. For more information, see the * <a href="https: * * @param containerName Name of the container to create * @return A {@link Mono} containing a {@link ContainerAsyncClient} used to interact with the container created. */ public Mono<ContainerAsyncClient> createContainer(String containerName) { return createContainerWithResponse(containerName, null, null).flatMap(FluxUtil::toMono); } /** * Creates a new container within a storage account. If a container with the same name already exists, the operation * fails. For more information, see the * <a href="https: * * @param containerName Name of the container to create * @param metadata {@link Metadata} * @param accessType Specifies how the data in this container is available to the public. See the * x-ms-blob-public-access header in the Azure Docs for more information. Pass null for no public access. * @return A {@link Mono} containing a {@link Response} whose {@link Response * ContainerAsyncClient} used to interact with the container created. */ public Mono<Response<ContainerAsyncClient>> createContainerWithResponse(String containerName, Metadata metadata, PublicAccessType accessType) { return withContext(context -> createContainerWithResponse(containerName, metadata, accessType, context)); } Mono<Response<ContainerAsyncClient>> createContainerWithResponse(String containerName, Metadata metadata, PublicAccessType accessType, Context context) { ContainerAsyncClient containerAsyncClient = getContainerAsyncClient(containerName); return containerAsyncClient.createWithResponse(metadata, accessType, context) .map(response -> new SimpleResponse<>(response, containerAsyncClient)); } /** * Deletes the specified container in the storage account. If the container doesn't exist the operation fails. For * more information see the <a href="https: * Docs</a>. * * @param containerName Name of the container to delete * @return A {@link Mono} containing containing status code and HTTP headers */ public Mono<Void> deleteContainer(String containerName) { return deleteContainerWithResponse(containerName).flatMap(FluxUtil::toMono); } /** * Deletes the specified container in the storage account. If the container doesn't exist the operation fails. For * more information see the <a href="https: * Docs</a>. * * @param containerName Name of the container to delete * @return A {@link Mono} containing containing status code and HTTP headers */ public Mono<VoidResponse> deleteContainerWithResponse(String containerName) { return withContext(context -> deleteContainerWithResponse(containerName, context)); } Mono<VoidResponse> deleteContainerWithResponse(String containerName, Context context) { return getContainerAsyncClient(containerName).deleteWithResponse(null, context); } /** * Gets the URL of the storage account represented by this client. * * @return the URL. * @throws RuntimeException If the account URL is malformed. */ public URL getAccountUrl() { try { return new URL(azureBlobStorage.getUrl()); } catch (MalformedURLException e) { throw logger.logExceptionAsError(new RuntimeException(String.format("Invalid URL on %s: %s" + getClass().getSimpleName(), azureBlobStorage.getUrl()), e)); } } /** * Returns a reactive Publisher emitting all the containers in this account lazily as needed. For more information, * see the <a href="https: * * @return A reactive response emitting the list of containers. */ public PagedFlux<ContainerItem> listContainers() { return this.listContainers(new ListContainersOptions()); } /** * Returns a reactive Publisher emitting all the containers in this account lazily as needed. For more information, * see the <a href="https: * * @param options A {@link ListContainersOptions} which specifies what data should be returned by the service. * @return A reactive response emitting the list of containers. */ /* * Implementation for this paged listing operation, supporting an optional timeout provided by the synchronous * BlobServiceClient. Applies the given timeout to each Mono<ServiceListContainersSegmentResponse> backing the * PagedFlux. * * @param options A {@link ListContainersOptions} which specifies what data should be returned by the service. * @param timeout An optional timeout to be applied to the network asynchronous operations. * @return A reactive response emitting the list of containers. */ PagedFlux<ContainerItem> listContainersWithOptionalTimeout(ListContainersOptions options, Duration timeout) { Function<String, Mono<PagedResponse<ContainerItem>>> func = marker -> listContainersSegment(marker, options, timeout) .map(response -> new PagedResponseBase<>( response.request(), response.statusCode(), response.headers(), response.value().containerItems(), response.value().nextMarker(), response.deserializedHeaders())); return new PagedFlux<>( () -> func.apply(null), marker -> func.apply(marker)); } /* * Returns a Mono segment of containers starting from the specified Marker. * Use an empty marker to start enumeration from the beginning. Container names are returned in lexicographic order. * After getting a segment, process it, and then call ListContainers again (passing the the previously-returned * Marker) to get the next segment. For more information, see * the <a href="https: * * @param marker * Identifies the portion of the list to be returned with the next list operation. * This value is returned in the response of a previous list operation as the * ListContainersSegmentResponse.body().nextMarker(). Set to null to list the first segment. * @param options * A {@link ListContainersOptions} which specifies what data should be returned by the service. * * @return Emits the successful response. * * @apiNote * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=service_list "Sample code for ServiceURL.listContainersSegment")] \n * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=service_list_helper "Helper code for ServiceURL.listContainersSegment")] \n * For more samples, please see the [Samples file](%https: */ private Mono<ServicesListContainersSegmentResponse> listContainersSegment(String marker, ListContainersOptions options, Duration timeout) { options = options == null ? new ListContainersOptions() : options; return postProcessResponse(Utility.applyOptionalTimeout( this.azureBlobStorage.services().listContainersSegmentWithRestResponseAsync( options.prefix(), marker, options.maxResults(), options.details().toIncludeType(), null, null, Context.NONE), timeout)); } /** * Gets the properties of a storage account’s Blob service. For more information, see the * <a href="https: * * @return A reactive response containing the storage account properties. */ public Mono<StorageServiceProperties> getProperties() { return getPropertiesWithResponse().flatMap(FluxUtil::toMono); } /** * Gets the properties of a storage account’s Blob service. For more information, see the * <a href="https: * * @return A {@link Mono} containing a {@link Response} whose {@link Response * account properties. */ public Mono<Response<StorageServiceProperties>> getPropertiesWithResponse() { return withContext(context -> getPropertiesWithResponse(context)); } Mono<Response<StorageServiceProperties>> getPropertiesWithResponse(Context context) { return postProcessResponse( this.azureBlobStorage.services().getPropertiesWithRestResponseAsync(null, null, context)) .map(rb -> new SimpleResponse<>(rb, rb.value())); } /** * Sets properties for a storage account's Blob service endpoint. For more information, see the * <a href="https: * Note that setting the default service version has no effect when using this client because this client explicitly * sets the version header on each request, overriding the default. * * @param properties Configures the service. * @return A {@link Mono} containing the storage account properties. */ public Mono<Void> setProperties(StorageServiceProperties properties) { return setPropertiesWithReponse(properties).flatMap(FluxUtil::toMono); } /** * Sets properties for a storage account's Blob service endpoint. For more information, see the * <a href="https: * Note that setting the default service version has no effect when using this client because this client explicitly * sets the version header on each request, overriding the default. * * @param properties Configures the service. * @return A {@link Mono} containing the storage account properties. */ public Mono<VoidResponse> setPropertiesWithReponse(StorageServiceProperties properties) { return withContext(context -> setPropertiesWithReponse(properties, context)); } Mono<VoidResponse> setPropertiesWithReponse(StorageServiceProperties properties, Context context) { return postProcessResponse( this.azureBlobStorage.services().setPropertiesWithRestResponseAsync(properties, null, null, context)) .map(VoidResponse::new); } /** * Gets a user delegation key for use with this account's blob storage. Note: This method call is only valid when * using {@link TokenCredential} in this object's {@link HttpPipeline}. * * @param start Start time for the key's validity. Null indicates immediate start. * @param expiry Expiration of the key's validity. * @return A {@link Mono} containing the user delegation key. * @throws IllegalArgumentException If {@code start} isn't null and is after {@code expiry}. */ public Mono<UserDelegationKey> getUserDelegationKey(OffsetDateTime start, OffsetDateTime expiry) { return withContext(context -> getUserDelegationKeyWithResponse(start, expiry, context)).flatMap(FluxUtil::toMono); } /** * Gets a user delegation key for use with this account's blob storage. Note: This method call is only valid when * using {@link TokenCredential} in this object's {@link HttpPipeline}. * * @param start Start time for the key's validity. Null indicates immediate start. * @param expiry Expiration of the key's validity. * @return A {@link Mono} containing a {@link Response} whose {@link Response * delegation key. * @throws IllegalArgumentException If {@code start} isn't null and is after {@code expiry}. */ public Mono<Response<UserDelegationKey>> getUserDelegationKeyWithResponse(OffsetDateTime start, OffsetDateTime expiry) { return withContext(context -> getUserDelegationKeyWithResponse(start, expiry, context)); } Mono<Response<UserDelegationKey>> getUserDelegationKeyWithResponse(OffsetDateTime start, OffsetDateTime expiry, Context context) { Utility.assertNotNull("expiry", expiry); if (start != null && !start.isBefore(expiry)) { throw logger.logExceptionAsError(new IllegalArgumentException("`start` must be null or a datetime before `expiry`.")); } return postProcessResponse( this.azureBlobStorage.services().getUserDelegationKeyWithRestResponseAsync( new KeyInfo() .start(start == null ? "" : Utility.ISO_8601_UTC_DATE_FORMATTER.format(start)) .expiry(Utility.ISO_8601_UTC_DATE_FORMATTER.format(expiry)), null, null, context) ).map(rb -> new SimpleResponse<>(rb, rb.value())); } /** * Retrieves statistics related to replication for the Blob service. It is only available on the secondary location * endpoint when read-access geo-redundant replication is enabled for the storage account. For more information, see * the * <a href="https: * * @return A {@link Mono} containing the storage account statistics. */ public Mono<StorageServiceStats> getStatistics() { return getStatisticsWithResponse().flatMap(FluxUtil::toMono); } /** * Retrieves statistics related to replication for the Blob service. It is only available on the secondary location * endpoint when read-access geo-redundant replication is enabled for the storage account. For more information, see * the * <a href="https: * * @return A {@link Mono} containing a {@link Response} whose {@link Response * account statistics. */ public Mono<Response<StorageServiceStats>> getStatisticsWithResponse() { return withContext(context -> getStatisticsWithResponse(context)); } Mono<Response<StorageServiceStats>> getStatisticsWithResponse(Context context) { return postProcessResponse( this.azureBlobStorage.services().getStatisticsWithRestResponseAsync(null, null, context)) .map(rb -> new SimpleResponse<>(rb, rb.value())); } /** * Returns the sku name and account kind for the account. For more information, please see the * <a href="https: * * @return A {@link Mono} containing containing the storage account info. */ public Mono<StorageAccountInfo> getAccountInfo() { return getAccountInfoWithResponse().flatMap(FluxUtil::toMono); } /** * Returns the sku name and account kind for the account. For more information, please see the * <a href="https: * * @return A {@link Mono} containing a {@link Response} whose {@link Response * info. */ public Mono<Response<StorageAccountInfo>> getAccountInfoWithResponse() { return withContext(context -> getAccountInfoWithResponse(context)); } Mono<Response<StorageAccountInfo>> getAccountInfoWithResponse(Context context) { return postProcessResponse(this.azureBlobStorage.services().getAccountInfoWithRestResponseAsync(context)) .map(rb -> new SimpleResponse<>(rb, new StorageAccountInfo(rb.deserializedHeaders()))); } /** * Generates an account SAS token with the specified parameters * * @param accountSASService The {@code AccountSASService} services for the account SAS * @param accountSASResourceType An optional {@code AccountSASResourceType} resources for the account SAS * @param accountSASPermission The {@code AccountSASPermission} permission for the account SAS * @param expiryTime The {@code OffsetDateTime} expiry time for the account SAS * @return A string that represents the SAS token */ public String generateAccountSAS(AccountSASService accountSASService, AccountSASResourceType accountSASResourceType, AccountSASPermission accountSASPermission, OffsetDateTime expiryTime) { return this.generateAccountSAS(accountSASService, accountSASResourceType, accountSASPermission, expiryTime, null /* startTime */, null /* version */, null /* ipRange */, null /* sasProtocol */); } /** * Generates an account SAS token with the specified parameters * * @param accountSASService The {@code AccountSASService} services for the account SAS * @param accountSASResourceType An optional {@code AccountSASResourceType} resources for the account SAS * @param accountSASPermission The {@code AccountSASPermission} permission for the account SAS * @param expiryTime The {@code OffsetDateTime} expiry time for the account SAS * @param startTime The {@code OffsetDateTime} start time for the account SAS * @param version The {@code String} version for the account SAS * @param ipRange An optional {@code IPRange} ip address range for the SAS * @param sasProtocol An optional {@code SASProtocol} protocol for the SAS * @return A string that represents the SAS token */ public String generateAccountSAS(AccountSASService accountSASService, AccountSASResourceType accountSASResourceType, AccountSASPermission accountSASPermission, OffsetDateTime expiryTime, OffsetDateTime startTime, String version, IPRange ipRange, SASProtocol sasProtocol) { AccountSASSignatureValues accountSASSignatureValues = new AccountSASSignatureValues(); accountSASSignatureValues.services(accountSASService == null ? null : accountSASService.toString()); accountSASSignatureValues.resourceTypes(accountSASResourceType == null ? null : accountSASResourceType.toString()); accountSASSignatureValues.permissions(accountSASPermission == null ? null : accountSASPermission.toString()); accountSASSignatureValues.expiryTime(expiryTime); accountSASSignatureValues.startTime(startTime); if (version != null) { accountSASSignatureValues.version(version); } accountSASSignatureValues.ipRange(ipRange); accountSASSignatureValues.protocol(sasProtocol); SharedKeyCredential sharedKeyCredential = Utility.getSharedKeyCredential(this.azureBlobStorage.getHttpPipeline()); SASQueryParameters sasQueryParameters = accountSASSignatureValues.generateSASQueryParameters(sharedKeyCredential); return sasQueryParameters.encode(); } }
Each of these functions has it's own `Function<String, Mono<PagedResponse<T>>>` to reuse the mapping into PagedResponseBase.
public PagedFlux<BlobItem> listBlobsFlat(ListBlobsOptions options) { return new PagedFlux<>( () -> listBlobsFlatSegment(null, options) .map(response -> new PagedResponseBase<>( response.request(), response.statusCode(), response.headers(), response.value().segment().blobItems(), response.value().nextMarker(), response.deserializedHeaders() )), (marker) -> listBlobsFlatSegment(marker, options) .map(response -> new PagedResponseBase<>( response.request(), response.statusCode(), response.headers(), response.value().segment().blobItems(), response.value().nextMarker(), response.deserializedHeaders() )) ); }
.map(response -> new PagedResponseBase<>(
public PagedFlux<BlobItem> listBlobsFlat(ListBlobsOptions options) { return listBlobsFlatWithOptionalTimeout(options, null); }
class ContainerAsyncClient { public static final String ROOT_CONTAINER_NAME = "$root"; public static final String STATIC_WEBSITE_CONTAINER_NAME = "$web"; public static final String LOG_CONTAINER_NAME = "$logs"; private final AzureBlobStorageImpl azureBlobStorage; /** * Package-private constructor for use by {@link ContainerClientBuilder}. * * @param azureBlobStorageBuilder the API client builder for blob storage API */ ContainerAsyncClient(AzureBlobStorageBuilder azureBlobStorageBuilder) { this.azureBlobStorage = azureBlobStorageBuilder.build(); } /** * Creates a new {@link BlockBlobAsyncClient} object by concatenating the blobName to the end of * ContainerAsyncClient's URL. The new BlockBlobAsyncClient uses the same request policy pipeline as the * ContainerAsyncClient. To change the pipeline, create the BlockBlobAsyncClient and then call its WithPipeline * method passing in the desired pipeline object. Or, call this package's NewBlockBlobAsyncClient instead of calling * this object's NewBlockBlobAsyncClient method. * * @param blobName A {@code String} representing the name of the blob. * @return A new {@link BlockBlobAsyncClient} object which references the blob with the specified name in this * container. */ public BlockBlobAsyncClient getBlockBlobAsyncClient(String blobName) { return getBlockBlobAsyncClient(blobName, null); } /** * Creates a new {@link BlockBlobAsyncClient} object by concatenating the blobName to the end of * ContainerAsyncClient's URL. The new BlockBlobAsyncClient uses the same request policy pipeline as the * ContainerAsyncClient. To change the pipeline, create the BlockBlobAsyncClient and then call its WithPipeline * method passing in the desired pipeline object. Or, call this package's NewBlockBlobAsyncClient instead of calling * this object's NewBlockBlobAsyncClient method. * * @param blobName A {@code String} representing the name of the blob. * @param snapshot the snapshot identifier for the blob. * @return A new {@link BlockBlobAsyncClient} object which references the blob with the specified name in this * container. */ public BlockBlobAsyncClient getBlockBlobAsyncClient(String blobName, String snapshot) { return new BlockBlobAsyncClient(new AzureBlobStorageBuilder() .url(Utility.appendToURLPath(getContainerUrl(), blobName).toString()) .pipeline(azureBlobStorage.getHttpPipeline()), snapshot); } /** * Creates creates a new PageBlobAsyncClient object by concatenating blobName to the end of ContainerAsyncClient's * URL. The new PageBlobAsyncClient uses the same request policy pipeline as the ContainerAsyncClient. To change the * pipeline, create the PageBlobAsyncClient and then call its WithPipeline method passing in the desired pipeline * object. Or, call this package's NewPageBlobAsyncClient instead of calling this object's NewPageBlobAsyncClient * method. * * @param blobName A {@code String} representing the name of the blob. * @return A new {@link PageBlobAsyncClient} object which references the blob with the specified name in this * container. */ public PageBlobAsyncClient getPageBlobAsyncClient(String blobName) { return getPageBlobAsyncClient(blobName, null); } /** * Creates creates a new PageBlobAsyncClient object by concatenating blobName to the end of ContainerAsyncClient's * URL. The new PageBlobAsyncClient uses the same request policy pipeline as the ContainerAsyncClient. To change the * pipeline, create the PageBlobAsyncClient and then call its WithPipeline method passing in the desired pipeline * object. Or, call this package's NewPageBlobAsyncClient instead of calling this object's NewPageBlobAsyncClient * method. * * @param blobName A {@code String} representing the name of the blob. * @param snapshot the snapshot identifier for the blob. * @return A new {@link PageBlobAsyncClient} object which references the blob with the specified name in this * container. */ public PageBlobAsyncClient getPageBlobAsyncClient(String blobName, String snapshot) { return new PageBlobAsyncClient(new AzureBlobStorageBuilder() .url(Utility.appendToURLPath(getContainerUrl(), blobName).toString()) .pipeline(azureBlobStorage.getHttpPipeline()), snapshot); } /** * Creates creates a new AppendBlobAsyncClient object by concatenating blobName to the end of ContainerAsyncClient's * URL. The new AppendBlobAsyncClient uses the same request policy pipeline as the ContainerAsyncClient. To change * the pipeline, create the AppendBlobAsyncClient and then call its WithPipeline method passing in the desired * pipeline object. Or, call this package's NewAppendBlobAsyncClient instead of calling this object's * NewAppendBlobAsyncClient method. * * @param blobName A {@code String} representing the name of the blob. * @return A new {@link AppendBlobAsyncClient} object which references the blob with the specified name in this * container. */ public AppendBlobAsyncClient getAppendBlobAsyncClient(String blobName) { return getAppendBlobAsyncClient(blobName, null); } /** * Creates creates a new AppendBlobAsyncClient object by concatenating blobName to the end of ContainerAsyncClient's * URL. The new AppendBlobAsyncClient uses the same request policy pipeline as the ContainerAsyncClient. To change * the pipeline, create the AppendBlobAsyncClient and then call its WithPipeline method passing in the desired * pipeline object. Or, call this package's NewAppendBlobAsyncClient instead of calling this object's * NewAppendBlobAsyncClient method. * * @param blobName A {@code String} representing the name of the blob. * @param snapshot the snapshot identifier for the blob. * @return A new {@link AppendBlobAsyncClient} object which references the blob with the specified name in this * container. */ public AppendBlobAsyncClient getAppendBlobAsyncClient(String blobName, String snapshot) { return new AppendBlobAsyncClient(new AzureBlobStorageBuilder() .url(Utility.appendToURLPath(getContainerUrl(), blobName).toString()) .pipeline(azureBlobStorage.getHttpPipeline()), snapshot); } /** * Creates a new BlobAsyncClient object by concatenating blobName to the end of ContainerAsyncClient's URL. The new * BlobAsyncClient uses the same request policy pipeline as the ContainerAsyncClient. To change the pipeline, create * the BlobAsyncClient and then call its WithPipeline method passing in the desired pipeline object. Or, call this * package's getBlobAsyncClient instead of calling this object's getBlobAsyncClient method. * * @param blobName A {@code String} representing the name of the blob. * @return A new {@link BlobAsyncClient} object which references the blob with the specified name in this container. */ public BlobAsyncClient getBlobAsyncClient(String blobName) { return getBlobAsyncClient(blobName, null); } /** * Creates a new BlobAsyncClient object by concatenating blobName to the end of ContainerAsyncClient's URL. The new * BlobAsyncClient uses the same request policy pipeline as the ContainerAsyncClient. To change the pipeline, create * the BlobAsyncClient and then call its WithPipeline method passing in the desired pipeline object. Or, call this * package's getBlobAsyncClient instead of calling this object's getBlobAsyncClient method. * * @param blobName A {@code String} representing the name of the blob. * @param snapshot the snapshot identifier for the blob. * @return A new {@link BlobAsyncClient} object which references the blob with the specified name in this container. */ public BlobAsyncClient getBlobAsyncClient(String blobName, String snapshot) { return new BlobAsyncClient(new AzureBlobStorageBuilder() .url(Utility.appendToURLPath(getContainerUrl(), blobName).toString()) .pipeline(azureBlobStorage.getHttpPipeline()), snapshot); } /** * Initializes a {@link BlobServiceAsyncClient} object pointing to the storage account this container is in. * * @return A {@link BlobServiceAsyncClient} object pointing to the specified storage account */ public BlobServiceAsyncClient getBlobServiceAsyncClient() { return new BlobServiceAsyncClient(new AzureBlobStorageBuilder() .url(Utility.stripLastPathSegment(getContainerUrl()).toString()) .pipeline(azureBlobStorage.getHttpPipeline())); } /** * Gets the URL of the container represented by this client. * * @return the URL. * @throws RuntimeException If the container has a malformed URL. */ public URL getContainerUrl() { try { return new URL(azureBlobStorage.getUrl()); } catch (MalformedURLException e) { throw new RuntimeException(String.format("Invalid URL on %s: %s" + getClass().getSimpleName(), azureBlobStorage.getUrl()), e); } } /** * Gets if the container this client represents exists in the cloud. * * @return true if the container exists, false if it doesn't */ public Mono<Response<Boolean>> exists() { return this.getProperties(null) .map(cp -> (Response<Boolean>) new SimpleResponse<>(cp, true)) .onErrorResume(t -> t instanceof StorageException && ((StorageException) t).statusCode() == 404, t -> { HttpResponse response = ((StorageException) t).response(); return Mono.just(new SimpleResponse<>(response.request(), response.statusCode(), response.headers(), false)); }); } /** * Creates a new container within a storage account. If a container with the same name already exists, the operation * fails. For more information, see the * <a href="https: * * @return A reactive response signalling completion. */ public Mono<VoidResponse> create() { return this.create(null, null); } /** * Creates a new container within a storage account. If a container with the same name already exists, the operation * fails. For more information, see the * <a href="https: * * @param metadata {@link Metadata} * @param accessType Specifies how the data in this container is available to the public. See the * x-ms-blob-public-access header in the Azure Docs for more information. Pass null for no public access. * @return A reactive response signalling completion. */ public Mono<VoidResponse> create(Metadata metadata, PublicAccessType accessType) { metadata = metadata == null ? new Metadata() : metadata; return postProcessResponse(this.azureBlobStorage.containers().createWithRestResponseAsync( null, null, metadata, accessType, null, Context.NONE)) .map(VoidResponse::new); } /** * Marks the specified container for deletion. The container and any blobs contained within it are later deleted * during garbage collection. For more information, see the * <a href="https: * * @return A reactive response signalling completion. */ public Mono<VoidResponse> delete() { return this.delete(null); } /** * Marks the specified container for deletion. The container and any blobs contained within it are later deleted * during garbage collection. For more information, see the * <a href="https: * * @param accessConditions {@link ContainerAccessConditions} * @return A reactive response signalling completion. * @throws UnsupportedOperationException If {@link ContainerAccessConditions * {@link ModifiedAccessConditions */ public Mono<VoidResponse> delete(ContainerAccessConditions accessConditions) { accessConditions = accessConditions == null ? new ContainerAccessConditions() : accessConditions; if (!validateNoEtag(accessConditions.modifiedAccessConditions())) { throw new UnsupportedOperationException("ETag access conditions are not supported for this API."); } return postProcessResponse(this.azureBlobStorage.containers() .deleteWithRestResponseAsync(null, null, null, accessConditions.leaseAccessConditions(), accessConditions.modifiedAccessConditions(), Context.NONE)) .map(VoidResponse::new); } /** * Returns the container's metadata and system properties. For more information, see the * <a href="https: * * @return A reactive response containing the container properties. */ public Mono<Response<ContainerProperties>> getProperties() { return this.getProperties(null); } /** * Returns the container's metadata and system properties. For more information, see the * <a href="https: * * @param leaseAccessConditions By setting lease access conditions, requests will fail if the provided lease does * not match the active lease on the blob. * @return A reactive response containing the container properties. */ public Mono<Response<ContainerProperties>> getProperties(LeaseAccessConditions leaseAccessConditions) { return postProcessResponse(this.azureBlobStorage.containers() .getPropertiesWithRestResponseAsync(null, null, null, leaseAccessConditions, Context.NONE)) .map(rb -> new SimpleResponse<>(rb, new ContainerProperties(rb.deserializedHeaders()))); } /** * Sets the container's metadata. For more information, see the * <a href="https: * * @param metadata {@link Metadata} * @return A reactive response signalling completion. */ public Mono<VoidResponse> setMetadata(Metadata metadata) { return this.setMetadata(metadata, null); } /** * Sets the container's metadata. For more information, see the * <a href="https: * * @param metadata {@link Metadata} * @param accessConditions {@link ContainerAccessConditions} * @return A reactive response signalling completion. * @throws UnsupportedOperationException If {@link ContainerAccessConditions * anything set other than {@link ModifiedAccessConditions */ public Mono<VoidResponse> setMetadata(Metadata metadata, ContainerAccessConditions accessConditions) { metadata = metadata == null ? new Metadata() : metadata; accessConditions = accessConditions == null ? new ContainerAccessConditions() : accessConditions; if (!validateNoEtag(accessConditions.modifiedAccessConditions()) || accessConditions.modifiedAccessConditions().ifUnmodifiedSince() != null) { throw new UnsupportedOperationException( "If-Modified-Since is the only HTTP access condition supported for this API"); } return postProcessResponse(this.azureBlobStorage.containers() .setMetadataWithRestResponseAsync(null, null, metadata, null, accessConditions.leaseAccessConditions(), accessConditions.modifiedAccessConditions(), Context.NONE)) .map(VoidResponse::new); } /** * Returns the container's permissions. The permissions indicate whether container's blobs may be accessed publicly. * For more information, see the * <a href="https: * * @return A reactive response containing the container access policy. */ public Mono<Response<ContainerAccessPolicies>> getAccessPolicy() { return this.getAccessPolicy(null); } /** * Returns the container's permissions. The permissions indicate whether container's blobs may be accessed publicly. * For more information, see the * <a href="https: * * @param leaseAccessConditions By setting lease access conditions, requests will fail if the provided lease does * not match the active lease on the blob. * @return A reactive response containing the container access policy. */ public Mono<Response<ContainerAccessPolicies>> getAccessPolicy(LeaseAccessConditions leaseAccessConditions) { return postProcessResponse(this.azureBlobStorage.containers().getAccessPolicyWithRestResponseAsync(null, null, null, leaseAccessConditions, Context.NONE) .map(response -> new SimpleResponse<>(response, new ContainerAccessPolicies(response.deserializedHeaders().blobPublicAccess(), response.value())))); } /** * Sets the container's permissions. The permissions indicate whether blobs in a container may be accessed publicly. * Note that, for each signed identifier, we will truncate the start and expiry times to the nearest second to * ensure the time formatting is compatible with the service. For more information, see the * <a href="https: * * @param accessType Specifies how the data in this container is available to the public. See the * x-ms-blob-public-access header in the Azure Docs for more information. Pass null for no public access. * @param identifiers A list of {@link SignedIdentifier} objects that specify the permissions for the container. * Please see * <a href="https: * for more information. Passing null will clear all access policies. * @return A reactive response signalling completion. */ public Mono<VoidResponse> setAccessPolicy(PublicAccessType accessType, List<SignedIdentifier> identifiers) { return this.setAccessPolicy(accessType, identifiers, null); } /** * Sets the container's permissions. The permissions indicate whether blobs in a container may be accessed publicly. * Note that, for each signed identifier, we will truncate the start and expiry times to the nearest second to * ensure the time formatting is compatible with the service. For more information, see the * <a href="https: * * @param accessType Specifies how the data in this container is available to the public. See the * x-ms-blob-public-access header in the Azure Docs for more information. Pass null for no public access. * @param identifiers A list of {@link SignedIdentifier} objects that specify the permissions for the container. * Please see * <a href="https: * for more information. Passing null will clear all access policies. * @param accessConditions {@link ContainerAccessConditions} * @return A reactive response signalling completion. * @throws UnsupportedOperationException If {@link ContainerAccessConditions * {@link ModifiedAccessConditions */ public Mono<VoidResponse> setAccessPolicy(PublicAccessType accessType, List<SignedIdentifier> identifiers, ContainerAccessConditions accessConditions) { accessConditions = accessConditions == null ? new ContainerAccessConditions() : accessConditions; if (!validateNoEtag(accessConditions.modifiedAccessConditions())) { throw new UnsupportedOperationException("ETag access conditions are not supported for this API."); } /* We truncate to seconds because the service only supports nanoseconds or seconds, but doing an OffsetDateTime.now will only give back milliseconds (more precise fields are zeroed and not serialized). This allows for proper serialization with no real detriment to users as sub-second precision on active time for signed identifiers is not really necessary. */ if (identifiers != null) { for (SignedIdentifier identifier : identifiers) { if (identifier.accessPolicy() != null && identifier.accessPolicy().start() != null) { identifier.accessPolicy().start( identifier.accessPolicy().start().truncatedTo(ChronoUnit.SECONDS)); } if (identifier.accessPolicy() != null && identifier.accessPolicy().expiry() != null) { identifier.accessPolicy().expiry( identifier.accessPolicy().expiry().truncatedTo(ChronoUnit.SECONDS)); } } } return postProcessResponse(this.azureBlobStorage.containers() .setAccessPolicyWithRestResponseAsync(null, identifiers, null, accessType, null, accessConditions.leaseAccessConditions(), accessConditions.modifiedAccessConditions(), Context.NONE)) .map(VoidResponse::new); } /** * Returns a reactive Publisher emitting all the blobs in this container lazily as needed. The directories are * flattened and only actual blobs and no directories are returned. * * <p> * Blob names are returned in lexicographic order. For more information, see the * <a href="https: * * <p> * E.g. listing a container containing a 'foo' folder, which contains blobs 'foo1' and 'foo2', and a blob on the * root level 'bar', will return * * <ul> * <li>foo/foo1 * <li>foo/foo2 * <li>bar * </ul> * * @return A reactive response emitting the flattened blobs. */ public PagedFlux<BlobItem> listBlobsFlat() { return this.listBlobsFlat(new ListBlobsOptions()); } /** * Returns a reactive Publisher emitting all the blobs in this container lazily as needed. The directories are * flattened and only actual blobs and no directories are returned. * * <p> * Blob names are returned in lexicographic order. For more information, see the * <a href="https: * * <p> * E.g. listing a container containing a 'foo' folder, which contains blobs 'foo1' and 'foo2', and a blob on the * root level 'bar', will return * * <ul> * <li>foo/foo1 * <li>foo/foo2 * <li>bar * </ul> * * @param options {@link ListBlobsOptions} * @return A reactive response emitting the listed blobs, flattened. */ /* * Returns a single segment of blobs starting from the specified Marker. Use an empty * marker to start enumeration from the beginning. Blob names are returned in lexicographic order. * After getting a segment, process it, and then call ListBlobs again (passing the the previously-returned * Marker) to get the next segment. For more information, see the * <a href="https: * * @param marker * Identifies the portion of the list to be returned with the next list operation. * This value is returned in the response of a previous list operation as the * ListBlobsFlatSegmentResponse.body().nextMarker(). Set to null to list the first segment. * @param options * {@link ListBlobsOptions} * * @return Emits the successful response. * * @apiNote * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=list_blobs_flat "Sample code for ContainerAsyncClient.listBlobsFlatSegment")] \n * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=list_blobs_flat_helper "helper code for ContainerAsyncClient.listBlobsFlatSegment")] \n * For more samples, please see the [Samples file](%https: */ private Mono<ContainersListBlobFlatSegmentResponse> listBlobsFlatSegment(String marker, ListBlobsOptions options) { options = options == null ? new ListBlobsOptions() : options; return postProcessResponse(this.azureBlobStorage.containers() .listBlobFlatSegmentWithRestResponseAsync(null, options.prefix(), marker, options.maxResults(), options.details().toList(), null, null, Context.NONE)); } /** * Returns a reactive Publisher emitting all the blobs and directories (prefixes) under the given directory * (prefix). Directories will have {@link BlobItem * * <p> * Blob names are returned in lexicographic order. For more information, see the * <a href="https: * * <p> * E.g. listing a container containing a 'foo' folder, which contains blobs 'foo1' and 'foo2', and a blob on the * root level 'bar', will return the following results when prefix=null: * * <ul> * <li>foo/ (isPrefix = true) * <li>bar (isPrefix = false) * </ul> * <p> * will return the following results when prefix="foo/": * * <ul> * <li>foo/foo1 (isPrefix = false) * <li>foo/foo2 (isPrefix = false) * </ul> * * @param directory The directory to list blobs underneath * @return A reactive response emitting the prefixes and blobs. */ public PagedFlux<BlobItem> listBlobsHierarchy(String directory) { return this.listBlobsHierarchy("/", new ListBlobsOptions().prefix(directory)); } /** * Returns a reactive Publisher emitting all the blobs and prefixes (directories) under the given prefix * (directory). Directories will have {@link BlobItem * * <p> * Blob names are returned in lexicographic order. For more information, see the * <a href="https: * * <p> * E.g. listing a container containing a 'foo' folder, which contains blobs 'foo1' and 'foo2', and a blob on the * root level 'bar', will return the following results when prefix=null: * * <ul> * <li>foo/ (isPrefix = true) * <li>bar (isPrefix = false) * </ul> * <p> * will return the following results when prefix="foo/": * * <ul> * <li>foo/foo1 (isPrefix = false) * <li>foo/foo2 (isPrefix = false) * </ul> * * @param delimiter The delimiter for blob hierarchy, "/" for hierarchy based on directories * @param options {@link ListBlobsOptions} * @return A reactive response emitting the prefixes and blobs. */ public PagedFlux<BlobItem> listBlobsHierarchy(String delimiter, ListBlobsOptions options) { return new PagedFlux<>( () -> listBlobsHierarchySegment(null, delimiter, options) .map(response -> new PagedResponseBase<>( response.request(), response.statusCode(), response.headers(), response.value().segment().blobItems(), response.value().nextMarker(), response.deserializedHeaders())), (marker) -> listBlobsHierarchySegment(marker, delimiter, options) .map(response -> new PagedResponseBase<>( response.request(), response.statusCode(), response.headers(), response.value().segment().blobItems(), response.value().nextMarker(), response.deserializedHeaders()))); } /* * Returns a single segment of blobs and blob prefixes starting from the specified Marker. Use an empty * marker to start enumeration from the beginning. Blob names are returned in lexicographic order. * After getting a segment, process it, and then call ListBlobs again (passing the the previously-returned * Marker) to get the next segment. For more information, see the * <a href="https: * * @param marker * Identifies the portion of the list to be returned with the next list operation. * This value is returned in the response of a previous list operation as the * ListBlobsHierarchySegmentResponse.body().nextMarker(). Set to null to list the first segment. * @param delimiter * The operation returns a BlobPrefix element in the response body that acts as a placeholder for all blobs * whose names begin with the same substring up to the appearance of the delimiter character. The delimiter may * be a single character or a string. * @param options * {@link ListBlobsOptions} * * @return Emits the successful response. * @throws UnsupportedOperationException If {@link ListBlobsOptions * set. * * @apiNote * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=list_blobs_hierarchy "Sample code for ContainerAsyncClient.listBlobsHierarchySegment")] \n * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=list_blobs_hierarchy_helper "helper code for ContainerAsyncClient.listBlobsHierarchySegment")] \n * For more samples, please see the [Samples file](%https: */ private Mono<ContainersListBlobHierarchySegmentResponse> listBlobsHierarchySegment(String marker, String delimiter, ListBlobsOptions options) { options = options == null ? new ListBlobsOptions() : options; if (options.details().snapshots()) { throw new UnsupportedOperationException("Including snapshots in a hierarchical listing is not supported."); } return postProcessResponse(this.azureBlobStorage.containers() .listBlobHierarchySegmentWithRestResponseAsync(null, delimiter, options.prefix(), marker, options.maxResults(), options.details().toList(), null, null, Context.NONE)); } /** * Acquires a lease on the blob for write and delete operations. The lease duration must be between 15 to 60 * seconds, or infinite (-1). * * @param proposedId A {@code String} in any valid GUID format. May be null. * @param duration The duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A * non-infinite lease can be between 15 and 60 seconds. * @return A reactive response containing the lease ID. */ public Mono<Response<String>> acquireLease(String proposedId, int duration) { return this.acquireLease(proposedId, duration, null); } /** * Acquires a lease on the blob for write and delete operations. The lease duration must be between 15 to 60 * seconds, or infinite (-1). * * @param proposedID A {@code String} in any valid GUID format. May be null. * @param duration The duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A * non-infinite lease can be between 15 and 60 seconds. * @param modifiedAccessConditions Standard HTTP Access conditions related to the modification of data. ETag and * LastModifiedTime are used to construct conditions related to when the blob was changed relative to the given * request. The request will fail if the specified condition is not satisfied. * @return A reactive response containing the lease ID. * @throws UnsupportedOperationException If either {@link ModifiedAccessConditions * ModifiedAccessConditions */ public Mono<Response<String>> acquireLease(String proposedID, int duration, ModifiedAccessConditions modifiedAccessConditions) { if (!this.validateNoEtag(modifiedAccessConditions)) { throw new UnsupportedOperationException( "ETag access conditions are not supported for this API."); } return postProcessResponse(this.azureBlobStorage.containers().acquireLeaseWithRestResponseAsync( null, null, duration, proposedID, null, modifiedAccessConditions, Context.NONE)) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().leaseId())); } /** * Renews the blob's previously-acquired lease. * * @param leaseID The leaseId of the active lease on the blob. * @return A reactive response containing the renewed lease ID. */ public Mono<Response<String>> renewLease(String leaseID) { return this.renewLease(leaseID, null); } /** * Renews the blob's previously-acquired lease. * * @param leaseID The leaseId of the active lease on the blob. * @param modifiedAccessConditions Standard HTTP Access conditions related to the modification of data. ETag and * LastModifiedTime are used to construct conditions related to when the blob was changed relative to the given * request. The request will fail if the specified condition is not satisfied. * @return A reactive response containing the renewed lease ID. * @throws UnsupportedOperationException If either {@link ModifiedAccessConditions * ModifiedAccessConditions */ public Mono<Response<String>> renewLease(String leaseID, ModifiedAccessConditions modifiedAccessConditions) { if (!this.validateNoEtag(modifiedAccessConditions)) { throw new UnsupportedOperationException( "ETag access conditions are not supported for this API."); } return postProcessResponse(this.azureBlobStorage.containers().renewLeaseWithRestResponseAsync(null, leaseID, null, null, modifiedAccessConditions, Context.NONE)) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().leaseId())); } /** * Releases the blob's previously-acquired lease. * * @param leaseID The leaseId of the active lease on the blob. * @return A reactive response signalling completion. */ public Mono<VoidResponse> releaseLease(String leaseID) { return this.releaseLease(leaseID, null); } /** * Releases the blob's previously-acquired lease. * * @param leaseID The leaseId of the active lease on the blob. * @param modifiedAccessConditions Standard HTTP Access conditions related to the modification of data. ETag and * LastModifiedTime are used to construct conditions related to when the blob was changed relative to the given * request. The request will fail if the specified condition is not satisfied. * @return A reactive response signalling completion. * @throws UnsupportedOperationException If either {@link ModifiedAccessConditions * ModifiedAccessConditions */ public Mono<VoidResponse> releaseLease(String leaseID, ModifiedAccessConditions modifiedAccessConditions) { if (!this.validateNoEtag(modifiedAccessConditions)) { throw new UnsupportedOperationException( "ETag access conditions are not supported for this API."); } return postProcessResponse(this.azureBlobStorage.containers().releaseLeaseWithRestResponseAsync( null, leaseID, null, null, modifiedAccessConditions, Context.NONE)) .map(VoidResponse::new); } /** * BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1) constant * to break a fixed-duration lease when it expires or an infinite lease immediately. * * @return A reactive response containing the remaining time in the broken lease. */ public Mono<Response<Duration>> breakLease() { return this.breakLease(null, null); } /** * BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1) constant * to break a fixed-duration lease when it expires or an infinite lease immediately. * * @param breakPeriodInSeconds An optional {@code Integer} representing the proposed duration of seconds that the * lease should continue before it is broken, between 0 and 60 seconds. This break period is only used if it is * shorter than the time remaining on the lease. If longer, the time remaining on the lease is used. A new lease * will not be available before the break period has expired, but the lease may be held for longer than the break * period. * @param modifiedAccessConditions Standard HTTP Access conditions related to the modification of data. ETag and * LastModifiedTime are used to construct conditions related to when the blob was changed relative to the given * request. The request will fail if the specified condition is not satisfied. * @return A reactive response containing the remaining time in the broken lease. * @throws UnsupportedOperationException If either {@link ModifiedAccessConditions * ModifiedAccessConditions */ public Mono<Response<Duration>> breakLease(Integer breakPeriodInSeconds, ModifiedAccessConditions modifiedAccessConditions) { if (!this.validateNoEtag(modifiedAccessConditions)) { throw new UnsupportedOperationException( "ETag access conditions are not supported for this API."); } return postProcessResponse(this.azureBlobStorage.containers().breakLeaseWithRestResponseAsync(null, null, breakPeriodInSeconds, null, modifiedAccessConditions, Context.NONE)) .map(rb -> new SimpleResponse<>(rb, Duration.ofSeconds(rb.deserializedHeaders().leaseTime()))); } /** * ChangeLease changes the blob's lease ID. * * @param leaseId The leaseId of the active lease on the blob. * @param proposedID A {@code String} in any valid GUID format. * @return A reactive response containing the new lease ID. */ public Mono<Response<String>> changeLease(String leaseId, String proposedID) { return this.changeLease(leaseId, proposedID, null); } /** * ChangeLease changes the blob's lease ID. For more information, see the <a href="https: * Docs</a>. * * @param leaseId The leaseId of the active lease on the blob. * @param proposedID A {@code String} in any valid GUID format. * @param modifiedAccessConditions Standard HTTP Access conditions related to the modification of data. ETag and * LastModifiedTime are used to construct conditions related to when the blob was changed relative to the given * request. The request will fail if the specified condition is not satisfied. * @return A reactive response containing the new lease ID. * @throws UnsupportedOperationException If either {@link ModifiedAccessConditions * ModifiedAccessConditions */ public Mono<Response<String>> changeLease(String leaseId, String proposedID, ModifiedAccessConditions modifiedAccessConditions) { if (!this.validateNoEtag(modifiedAccessConditions)) { throw new UnsupportedOperationException( "ETag access conditions are not supported for this API."); } return postProcessResponse(this.azureBlobStorage.containers().changeLeaseWithRestResponseAsync(null, leaseId, proposedID, null, null, modifiedAccessConditions, Context.NONE)) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().leaseId())); } /** * Returns the sku name and account kind for the account. For more information, please see the * <a href="https: * * @return A reactive response containing the account info. */ public Mono<Response<StorageAccountInfo>> getAccountInfo() { return postProcessResponse( this.azureBlobStorage.containers().getAccountInfoWithRestResponseAsync(null, Context.NONE)) .map(rb -> new SimpleResponse<>(rb, new StorageAccountInfo(rb.deserializedHeaders()))); } private boolean validateNoEtag(ModifiedAccessConditions modifiedAccessConditions) { if (modifiedAccessConditions == null) { return true; } return modifiedAccessConditions.ifMatch() == null && modifiedAccessConditions.ifNoneMatch() == null; } /** * Generates a user delegation SAS with the specified parameters * * @param userDelegationKey The {@code UserDelegationKey} user delegation key for the SAS * @param accountName The {@code String} account name for the SAS * @param permissions The {@code ContainerSASPermissions} permission for the SAS * @param expiryTime The {@code OffsetDateTime} expiry time for the SAS * @return A string that represents the SAS token */ public String generateUserDelegationSAS(UserDelegationKey userDelegationKey, String accountName, ContainerSASPermission permissions, OffsetDateTime expiryTime) { return this.generateUserDelegationSAS(userDelegationKey, accountName, permissions, expiryTime, null, null, null, null, null, null, null, null, null); } /** * Generates a user delegation SAS token with the specified parameters * * @param userDelegationKey The {@code UserDelegationKey} user delegation key for the SAS * @param accountName The {@code String} account name for the SAS * @param permissions The {@code ContainerSASPermissions} permission for the SAS * @param expiryTime The {@code OffsetDateTime} expiry time for the SAS * @param startTime An optional {@code OffsetDateTime} start time for the SAS * @param version An optional {@code String} version for the SAS * @param sasProtocol An optional {@code SASProtocol} protocol for the SAS * @param ipRange An optional {@code IPRange} ip address range for the SAS * @return A string that represents the SAS token */ public String generateUserDelegationSAS(UserDelegationKey userDelegationKey, String accountName, ContainerSASPermission permissions, OffsetDateTime expiryTime, OffsetDateTime startTime, String version, SASProtocol sasProtocol, IPRange ipRange) { return this.generateUserDelegationSAS(userDelegationKey, accountName, permissions, expiryTime, startTime, version, sasProtocol, ipRange, null /* cacheControl */, null /* contentDisposition */, null /* contentEncoding */, null /* contentLanguage */, null /* contentType */); } /** * Generates a user delegation SAS token with the specified parameters * * @param userDelegationKey The {@code UserDelegationKey} user delegation key for the SAS * @param accountName The {@code String} account name for the SAS * @param permissions The {@code ContainerSASPermissions} permission for the SAS * @param expiryTime The {@code OffsetDateTime} expiry time for the SAS * @param startTime An optional {@code OffsetDateTime} start time for the SAS * @param version An optional {@code String} version for the SAS * @param sasProtocol An optional {@code SASProtocol} protocol for the SAS * @param ipRange An optional {@code IPRange} ip address range for the SAS * @param cacheControl An optional {@code String} cache-control header for the SAS. * @param contentDisposition An optional {@code String} content-disposition header for the SAS. * @param contentEncoding An optional {@code String} content-encoding header for the SAS. * @param contentLanguage An optional {@code String} content-language header for the SAS. * @param contentType An optional {@code String} content-type header for the SAS. * @return A string that represents the SAS token */ public String generateUserDelegationSAS(UserDelegationKey userDelegationKey, String accountName, ContainerSASPermission permissions, OffsetDateTime expiryTime, OffsetDateTime startTime, String version, SASProtocol sasProtocol, IPRange ipRange, String cacheControl, String contentDisposition, String contentEncoding, String contentLanguage, String contentType) { ServiceSASSignatureValues serviceSASSignatureValues = new ServiceSASSignatureValues(version, sasProtocol, startTime, expiryTime, permissions == null ? null : permissions.toString(), ipRange, null /* identifier*/, cacheControl, contentDisposition, contentEncoding, contentLanguage, contentType); ServiceSASSignatureValues values = configureServiceSASSignatureValues(serviceSASSignatureValues, accountName); SASQueryParameters sasQueryParameters = values.generateSASQueryParameters(userDelegationKey); return sasQueryParameters.encode(); } /** * Generates a SAS token with the specified parameters * * @param permissions The {@code ContainerSASPermissions} permission for the SAS * @param expiryTime The {@code OffsetDateTime} expiry time for the SAS * @return A string that represents the SAS token */ public String generateSAS(ContainerSASPermission permissions, OffsetDateTime expiryTime) { return this.generateSAS(null, permissions, /* identifier */ expiryTime, null /* startTime */, null /* version */, null /* sasProtocol */, null /* ipRange */, null /* cacheControl */, null /* contentDisposition */, null /* contentEncoding */, null /* contentLanguage */, null /*contentType*/); } /** * Generates a SAS token with the specified parameters * * @param identifier The {@code String} name of the access policy on the container this SAS references if any * @return A string that represents the SAS token */ public String generateSAS(String identifier) { return this.generateSAS(identifier, null /* permissions*/, null /* expiryTime */, null /* startTime */, null /* version */, null /* sasProtocol */, null /* ipRange */, null /* cacheControl */, null /* contentDisposition */, null /* contentEncoding */, null /* contentLanguage */, null /*contentType*/); } /** * Generates a SAS token with the specified parameters * * @param identifier The {@code String} name of the access policy on the container this SAS references if any * @param permissions The {@code ContainerSASPermissions} permission for the SAS * @param expiryTime The {@code OffsetDateTime} expiry time for the SAS * @param startTime An optional {@code OffsetDateTime} start time for the SAS * @param version An optional {@code String} version for the SAS * @param sasProtocol An optional {@code SASProtocol} protocol for the SAS * @param ipRange An optional {@code IPRange} ip address range for the SAS * @return A string that represents the SAS token */ public String generateSAS(String identifier, ContainerSASPermission permissions, OffsetDateTime expiryTime, OffsetDateTime startTime, String version, SASProtocol sasProtocol, IPRange ipRange) { return this.generateSAS(identifier, permissions, expiryTime, startTime, version, sasProtocol, ipRange, null /* cacheControl */, null /* contentDisposition */, null /* contentEncoding */, null /* contentLanguage */, null /*contentType*/); } /** * Generates a SAS token with the specified parameters * * @param identifier The {@code String} name of the access policy on the container this SAS references if any * @param permissions The {@code ContainerSASPermissions} permission for the SAS * @param expiryTime The {@code OffsetDateTime} expiry time for the SAS * @param startTime An optional {@code OffsetDateTime} start time for the SAS * @param version An optional {@code String} version for the SAS * @param sasProtocol An optional {@code SASProtocol} protocol for the SAS * @param ipRange An optional {@code IPRange} ip address range for the SAS * @param cacheControl An optional {@code String} cache-control header for the SAS. * @param contentDisposition An optional {@code String} content-disposition header for the SAS. * @param contentEncoding An optional {@code String} content-encoding header for the SAS. * @param contentLanguage An optional {@code String} content-language header for the SAS. * @param contentType An optional {@code String} content-type header for the SAS. * @return A string that represents the SAS token */ public String generateSAS(String identifier, ContainerSASPermission permissions, OffsetDateTime expiryTime, OffsetDateTime startTime, String version, SASProtocol sasProtocol, IPRange ipRange, String cacheControl, String contentDisposition, String contentEncoding, String contentLanguage, String contentType) { ServiceSASSignatureValues serviceSASSignatureValues = new ServiceSASSignatureValues(version, sasProtocol, startTime, expiryTime, permissions == null ? null : permissions.toString(), ipRange, identifier, cacheControl, contentDisposition, contentEncoding, contentLanguage, contentType); SharedKeyCredential sharedKeyCredential = Utility.getSharedKeyCredential(this.azureBlobStorage.getHttpPipeline()); Utility.assertNotNull("sharedKeyCredential", sharedKeyCredential); ServiceSASSignatureValues values = configureServiceSASSignatureValues(serviceSASSignatureValues, sharedKeyCredential.accountName()); SASQueryParameters sasQueryParameters = values.generateSASQueryParameters(sharedKeyCredential); return sasQueryParameters.encode(); } /** * Sets serviceSASSignatureValues parameters dependent on the current blob type */ private ServiceSASSignatureValues configureServiceSASSignatureValues(ServiceSASSignatureValues serviceSASSignatureValues, String accountName) { serviceSASSignatureValues.canonicalName(this.azureBlobStorage.getUrl(), accountName); serviceSASSignatureValues.snapshotId(null); serviceSASSignatureValues.resource(Constants.UrlConstants.SAS_CONTAINER_CONSTANT); return serviceSASSignatureValues; } }
class ContainerAsyncClient { public static final String ROOT_CONTAINER_NAME = "$root"; public static final String STATIC_WEBSITE_CONTAINER_NAME = "$web"; public static final String LOG_CONTAINER_NAME = "$logs"; private final ClientLogger logger = new ClientLogger(ContainerAsyncClient.class); private final AzureBlobStorageImpl azureBlobStorage; /** * Package-private constructor for use by {@link ContainerClientBuilder}. * * @param azureBlobStorage the API client for blob storage */ ContainerAsyncClient(AzureBlobStorageImpl azureBlobStorage) { this.azureBlobStorage = azureBlobStorage; } /** * Creates a new {@link BlockBlobAsyncClient} object by concatenating the blobName to the end of * ContainerAsyncClient's URL. The new BlockBlobAsyncClient uses the same request policy pipeline as the * ContainerAsyncClient. To change the pipeline, create the BlockBlobAsyncClient and then call its WithPipeline * method passing in the desired pipeline object. Or, call this package's NewBlockBlobAsyncClient instead of calling * this object's NewBlockBlobAsyncClient method. * * @param blobName A {@code String} representing the name of the blob. * @return A new {@link BlockBlobAsyncClient} object which references the blob with the specified name in this * container. */ public BlockBlobAsyncClient getBlockBlobAsyncClient(String blobName) { return getBlockBlobAsyncClient(blobName, null); } /** * Creates a new {@link BlockBlobAsyncClient} object by concatenating the blobName to the end of * ContainerAsyncClient's URL. The new BlockBlobAsyncClient uses the same request policy pipeline as the * ContainerAsyncClient. To change the pipeline, create the BlockBlobAsyncClient and then call its WithPipeline * method passing in the desired pipeline object. Or, call this package's NewBlockBlobAsyncClient instead of calling * this object's NewBlockBlobAsyncClient method. * * @param blobName A {@code String} representing the name of the blob. * @param snapshot the snapshot identifier for the blob. * @return A new {@link BlockBlobAsyncClient} object which references the blob with the specified name in this * container. */ public BlockBlobAsyncClient getBlockBlobAsyncClient(String blobName, String snapshot) { return new BlockBlobAsyncClient(new AzureBlobStorageBuilder() .url(Utility.appendToURLPath(getContainerUrl(), blobName).toString()) .pipeline(azureBlobStorage.getHttpPipeline()) .build(), snapshot); } /** * Creates creates a new PageBlobAsyncClient object by concatenating blobName to the end of ContainerAsyncClient's * URL. The new PageBlobAsyncClient uses the same request policy pipeline as the ContainerAsyncClient. To change the * pipeline, create the PageBlobAsyncClient and then call its WithPipeline method passing in the desired pipeline * object. Or, call this package's NewPageBlobAsyncClient instead of calling this object's NewPageBlobAsyncClient * method. * * @param blobName A {@code String} representing the name of the blob. * @return A new {@link PageBlobAsyncClient} object which references the blob with the specified name in this * container. */ public PageBlobAsyncClient getPageBlobAsyncClient(String blobName) { return getPageBlobAsyncClient(blobName, null); } /** * Creates creates a new PageBlobAsyncClient object by concatenating blobName to the end of ContainerAsyncClient's * URL. The new PageBlobAsyncClient uses the same request policy pipeline as the ContainerAsyncClient. To change the * pipeline, create the PageBlobAsyncClient and then call its WithPipeline method passing in the desired pipeline * object. Or, call this package's NewPageBlobAsyncClient instead of calling this object's NewPageBlobAsyncClient * method. * * @param blobName A {@code String} representing the name of the blob. * @param snapshot the snapshot identifier for the blob. * @return A new {@link PageBlobAsyncClient} object which references the blob with the specified name in this * container. */ public PageBlobAsyncClient getPageBlobAsyncClient(String blobName, String snapshot) { return new PageBlobAsyncClient(new AzureBlobStorageBuilder() .url(Utility.appendToURLPath(getContainerUrl(), blobName).toString()) .pipeline(azureBlobStorage.getHttpPipeline()) .build(), snapshot); } /** * Creates creates a new AppendBlobAsyncClient object by concatenating blobName to the end of ContainerAsyncClient's * URL. The new AppendBlobAsyncClient uses the same request policy pipeline as the ContainerAsyncClient. To change * the pipeline, create the AppendBlobAsyncClient and then call its WithPipeline method passing in the desired * pipeline object. Or, call this package's NewAppendBlobAsyncClient instead of calling this object's * NewAppendBlobAsyncClient method. * * @param blobName A {@code String} representing the name of the blob. * @return A new {@link AppendBlobAsyncClient} object which references the blob with the specified name in this * container. */ public AppendBlobAsyncClient getAppendBlobAsyncClient(String blobName) { return getAppendBlobAsyncClient(blobName, null); } /** * Creates creates a new AppendBlobAsyncClient object by concatenating blobName to the end of ContainerAsyncClient's * URL. The new AppendBlobAsyncClient uses the same request policy pipeline as the ContainerAsyncClient. To change * the pipeline, create the AppendBlobAsyncClient and then call its WithPipeline method passing in the desired * pipeline object. Or, call this package's NewAppendBlobAsyncClient instead of calling this object's * NewAppendBlobAsyncClient method. * * @param blobName A {@code String} representing the name of the blob. * @param snapshot the snapshot identifier for the blob. * @return A new {@link AppendBlobAsyncClient} object which references the blob with the specified name in this * container. */ public AppendBlobAsyncClient getAppendBlobAsyncClient(String blobName, String snapshot) { return new AppendBlobAsyncClient(new AzureBlobStorageBuilder() .url(Utility.appendToURLPath(getContainerUrl(), blobName).toString()) .pipeline(azureBlobStorage.getHttpPipeline()) .build(), snapshot); } /** * Creates a new BlobAsyncClient object by concatenating blobName to the end of ContainerAsyncClient's URL. The new * BlobAsyncClient uses the same request policy pipeline as the ContainerAsyncClient. To change the pipeline, create * the BlobAsyncClient and then call its WithPipeline method passing in the desired pipeline object. Or, call this * package's getBlobAsyncClient instead of calling this object's getBlobAsyncClient method. * * @param blobName A {@code String} representing the name of the blob. * @return A new {@link BlobAsyncClient} object which references the blob with the specified name in this container. */ public BlobAsyncClient getBlobAsyncClient(String blobName) { return getBlobAsyncClient(blobName, null); } /** * Creates a new BlobAsyncClient object by concatenating blobName to the end of ContainerAsyncClient's URL. The new * BlobAsyncClient uses the same request policy pipeline as the ContainerAsyncClient. To change the pipeline, create * the BlobAsyncClient and then call its WithPipeline method passing in the desired pipeline object. Or, call this * package's getBlobAsyncClient instead of calling this object's getBlobAsyncClient method. * * @param blobName A {@code String} representing the name of the blob. * @param snapshot the snapshot identifier for the blob. * @return A new {@link BlobAsyncClient} object which references the blob with the specified name in this container. */ public BlobAsyncClient getBlobAsyncClient(String blobName, String snapshot) { return new BlobAsyncClient(new AzureBlobStorageBuilder() .url(Utility.appendToURLPath(getContainerUrl(), blobName).toString()) .pipeline(azureBlobStorage.getHttpPipeline()) .build(), snapshot); } /** * Initializes a {@link BlobServiceAsyncClient} object pointing to the storage account this container is in. * * @return A {@link BlobServiceAsyncClient} object pointing to the specified storage account */ public BlobServiceAsyncClient getBlobServiceAsyncClient() { return new BlobServiceAsyncClient(new AzureBlobStorageBuilder() .url(Utility.stripLastPathSegment(getContainerUrl()).toString()) .pipeline(azureBlobStorage.getHttpPipeline()) .build()); } /** * Gets the URL of the container represented by this client. * * @return the URL. * @throws RuntimeException If the container has a malformed URL. */ public URL getContainerUrl() { try { return new URL(azureBlobStorage.getUrl()); } catch (MalformedURLException e) { throw logger.logExceptionAsError(new RuntimeException(String.format("Invalid URL on %s: %s" + getClass().getSimpleName(), azureBlobStorage.getUrl()), e)); } } /** * Gets if the container this client represents exists in the cloud. * * @return true if the container exists, false if it doesn't */ public Mono<Boolean> exists() { return existsWithResponse().flatMap(FluxUtil::toMono); } /** * Gets if the container this client represents exists in the cloud. * * @return true if the container exists, false if it doesn't */ public Mono<Response<Boolean>> existsWithResponse() { return withContext(context -> existsWithResponse(context)); } /** * Gets if the container this client represents exists in the cloud. * * @return true if the container exists, false if it doesn't */ Mono<Response<Boolean>> existsWithResponse(Context context) { return this.getPropertiesWithResponse(null, context) .map(cp -> (Response<Boolean>) new SimpleResponse<>(cp, true)) .onErrorResume(t -> t instanceof StorageException && ((StorageException) t).statusCode() == 404, t -> { HttpResponse response = ((StorageException) t).response(); return Mono.just(new SimpleResponse<>(response.request(), response.statusCode(), response.headers(), false)); }); } /** * Creates a new container within a storage account. If a container with the same name already exists, the operation * fails. For more information, see the * <a href="https: * * @return A reactive response signalling completion. */ public Mono<Void> create() { return createWithResponse(null, null).flatMap(FluxUtil::toMono); } /** * Creates a new container within a storage account. If a container with the same name already exists, the operation * fails. For more information, see the * <a href="https: * * @param metadata {@link Metadata} * @param accessType Specifies how the data in this container is available to the public. See the * x-ms-blob-public-access header in the Azure Docs for more information. Pass null for no public access. * @return A reactive response signalling completion. */ public Mono<VoidResponse> createWithResponse(Metadata metadata, PublicAccessType accessType) { return withContext(context -> createWithResponse(metadata, accessType, context)); } Mono<VoidResponse> createWithResponse(Metadata metadata, PublicAccessType accessType, Context context) { metadata = metadata == null ? new Metadata() : metadata; return postProcessResponse(this.azureBlobStorage.containers().createWithRestResponseAsync( null, null, metadata, accessType, null, null, null, context)).map(VoidResponse::new); } /** * Marks the specified container for deletion. The container and any blobs contained within it are later deleted * during garbage collection. For more information, see the * <a href="https: * * @return A reactive response signalling completion. */ public Mono<Void> delete() { return deleteWithResponse(null).flatMap(FluxUtil::toMono); } /** * Marks the specified container for deletion. The container and any blobs contained within it are later deleted * during garbage collection. For more information, see the * <a href="https: * * @param accessConditions {@link ContainerAccessConditions} * @return A reactive response signalling completion. * @throws UnsupportedOperationException If {@link ContainerAccessConditions * {@link ModifiedAccessConditions */ public Mono<VoidResponse> deleteWithResponse(ContainerAccessConditions accessConditions) { return withContext(context -> deleteWithResponse(accessConditions, context)); } Mono<VoidResponse> deleteWithResponse(ContainerAccessConditions accessConditions, Context context) { accessConditions = accessConditions == null ? new ContainerAccessConditions() : accessConditions; if (!validateNoEtag(accessConditions.modifiedAccessConditions())) { throw logger.logExceptionAsError(new UnsupportedOperationException("ETag access conditions are not supported for this API.")); } return postProcessResponse(this.azureBlobStorage.containers().deleteWithRestResponseAsync(null, null, null, accessConditions.leaseAccessConditions(), accessConditions.modifiedAccessConditions(), context)) .map(VoidResponse::new); } /** * Returns the container's metadata and system properties. For more information, see the * <a href="https: * * @return A {@link Mono} containing a {@link Response} whose {@link Response * container properties. */ public Mono<ContainerProperties> getProperties() { return getPropertiesWithResponse(null).flatMap(FluxUtil::toMono); } /** * Returns the container's metadata and system properties. For more information, see the * <a href="https: * * @param leaseAccessConditions By setting lease access conditions, requests will fail if the provided lease does * not match the active lease on the blob. * @return A reactive response containing the container properties. */ public Mono<Response<ContainerProperties>> getPropertiesWithResponse(LeaseAccessConditions leaseAccessConditions) { return withContext(context -> getPropertiesWithResponse(leaseAccessConditions, context)); } Mono<Response<ContainerProperties>> getPropertiesWithResponse(LeaseAccessConditions leaseAccessConditions, Context context) { return postProcessResponse(this.azureBlobStorage.containers().getPropertiesWithRestResponseAsync(null, null, null, leaseAccessConditions, context)) .map(rb -> new SimpleResponse<>(rb, new ContainerProperties(rb.deserializedHeaders()))); } /** * Sets the container's metadata. For more information, see the * <a href="https: * * @param metadata {@link Metadata} * @return A {@link Mono} containing a {@link Response} whose {@link Response * completion. */ public Mono<Void> setMetadata(Metadata metadata) { return setMetadataWithResponse(metadata, null).flatMap(FluxUtil::toMono); } /** * Sets the container's metadata. For more information, see the * <a href="https: * * @param metadata {@link Metadata} * @param accessConditions {@link ContainerAccessConditions} * @return A reactive response signalling completion. * @throws UnsupportedOperationException If {@link ContainerAccessConditions * anything set other than {@link ModifiedAccessConditions */ public Mono<VoidResponse> setMetadataWithResponse(Metadata metadata, ContainerAccessConditions accessConditions) { return withContext(context -> setMetadataWithResponse(metadata, accessConditions, context)); } Mono<VoidResponse> setMetadataWithResponse(Metadata metadata, ContainerAccessConditions accessConditions, Context context) { metadata = metadata == null ? new Metadata() : metadata; accessConditions = accessConditions == null ? new ContainerAccessConditions() : accessConditions; if (!validateNoEtag(accessConditions.modifiedAccessConditions()) || accessConditions.modifiedAccessConditions().ifUnmodifiedSince() != null) { throw logger.logExceptionAsError(new UnsupportedOperationException( "If-Modified-Since is the only HTTP access condition supported for this API")); } return postProcessResponse(this.azureBlobStorage.containers().setMetadataWithRestResponseAsync(null, null, metadata, null, accessConditions.leaseAccessConditions(), accessConditions.modifiedAccessConditions(), context)).map(VoidResponse::new); } /** * Returns the container's permissions. The permissions indicate whether container's blobs may be accessed publicly. * For more information, see the * <a href="https: * * @return A reactive response containing the container access policy. */ public Mono<ContainerAccessPolicies> getAccessPolicy() { return getAccessPolicyWithResponse(null).flatMap(FluxUtil::toMono); } /** * Returns the container's permissions. The permissions indicate whether container's blobs may be accessed publicly. * For more information, see the * <a href="https: * * @param leaseAccessConditions By setting lease access conditions, requests will fail if the provided lease does * not match the active lease on the blob. * @return A reactive response containing the container access policy. */ public Mono<Response<ContainerAccessPolicies>> getAccessPolicyWithResponse(LeaseAccessConditions leaseAccessConditions) { return withContext(context -> getAccessPolicyWithResponse(leaseAccessConditions, context)); } Mono<Response<ContainerAccessPolicies>> getAccessPolicyWithResponse(LeaseAccessConditions leaseAccessConditions, Context context) { return postProcessResponse(this.azureBlobStorage.containers().getAccessPolicyWithRestResponseAsync(null, null, null, leaseAccessConditions, context).map(response -> new SimpleResponse<>(response, new ContainerAccessPolicies(response.deserializedHeaders().blobPublicAccess(), response.value())))); } /** * Sets the container's permissions. The permissions indicate whether blobs in a container may be accessed publicly. * Note that, for each signed identifier, we will truncate the start and expiry times to the nearest second to * ensure the time formatting is compatible with the service. For more information, see the * <a href="https: * * @param accessType Specifies how the data in this container is available to the public. See the * x-ms-blob-public-access header in the Azure Docs for more information. Pass null for no public access. * @param identifiers A list of {@link SignedIdentifier} objects that specify the permissions for the container. * Please see * <a href="https: * for more information. Passing null will clear all access policies. * @return A reactive response signalling completion. */ public Mono<Void> setAccessPolicy(PublicAccessType accessType, List<SignedIdentifier> identifiers) { return setAccessPolicyWithResponse(accessType, identifiers, null).flatMap(FluxUtil::toMono); } /** * Sets the container's permissions. The permissions indicate whether blobs in a container may be accessed publicly. * Note that, for each signed identifier, we will truncate the start and expiry times to the nearest second to * ensure the time formatting is compatible with the service. For more information, see the * <a href="https: * * @param accessType Specifies how the data in this container is available to the public. See the * x-ms-blob-public-access header in the Azure Docs for more information. Pass null for no public access. * @param identifiers A list of {@link SignedIdentifier} objects that specify the permissions for the container. * Please see * <a href="https: * for more information. Passing null will clear all access policies. * @param accessConditions {@link ContainerAccessConditions} * @return A reactive response signalling completion. * @throws UnsupportedOperationException If {@link ContainerAccessConditions * {@link ModifiedAccessConditions */ public Mono<VoidResponse> setAccessPolicyWithResponse(PublicAccessType accessType, List<SignedIdentifier> identifiers, ContainerAccessConditions accessConditions) { return withContext(context -> setAccessPolicyWithResponse(accessType, identifiers, accessConditions, context)); } Mono<VoidResponse> setAccessPolicyWithResponse(PublicAccessType accessType, List<SignedIdentifier> identifiers, ContainerAccessConditions accessConditions, Context context) { accessConditions = accessConditions == null ? new ContainerAccessConditions() : accessConditions; if (!validateNoEtag(accessConditions.modifiedAccessConditions())) { throw logger.logExceptionAsError(new UnsupportedOperationException("ETag access conditions are not supported for this API.")); } /* We truncate to seconds because the service only supports nanoseconds or seconds, but doing an OffsetDateTime.now will only give back milliseconds (more precise fields are zeroed and not serialized). This allows for proper serialization with no real detriment to users as sub-second precision on active time for signed identifiers is not really necessary. */ if (identifiers != null) { for (SignedIdentifier identifier : identifiers) { if (identifier.accessPolicy() != null && identifier.accessPolicy().start() != null) { identifier.accessPolicy().start( identifier.accessPolicy().start().truncatedTo(ChronoUnit.SECONDS)); } if (identifier.accessPolicy() != null && identifier.accessPolicy().expiry() != null) { identifier.accessPolicy().expiry( identifier.accessPolicy().expiry().truncatedTo(ChronoUnit.SECONDS)); } } } return postProcessResponse(this.azureBlobStorage.containers().setAccessPolicyWithRestResponseAsync(null, identifiers, null, accessType, null, accessConditions.leaseAccessConditions(), accessConditions.modifiedAccessConditions(), context)).map(VoidResponse::new); } /** * Returns a reactive Publisher emitting all the blobs in this container lazily as needed. The directories are * flattened and only actual blobs and no directories are returned. * * <p> * Blob names are returned in lexicographic order. For more information, see the * <a href="https: * * <p> * E.g. listing a container containing a 'foo' folder, which contains blobs 'foo1' and 'foo2', and a blob on the * root level 'bar', will return * * <ul> * <li>foo/foo1 * <li>foo/foo2 * <li>bar * </ul> * * @return A reactive response emitting the flattened blobs. */ public PagedFlux<BlobItem> listBlobsFlat() { return this.listBlobsFlat(new ListBlobsOptions()); } /** * Returns a reactive Publisher emitting all the blobs in this container lazily as needed. The directories are * flattened and only actual blobs and no directories are returned. * * <p> * Blob names are returned in lexicographic order. For more information, see the * <a href="https: * * <p> * E.g. listing a container containing a 'foo' folder, which contains blobs 'foo1' and 'foo2', and a blob on the * root level 'bar', will return * * <ul> * <li>foo/foo1 * <li>foo/foo2 * <li>bar * </ul> * * @param options {@link ListBlobsOptions} * @return A reactive response emitting the listed blobs, flattened. */ /* * Implementation for this paged listing operation, supporting an optional timeout provided by the synchronous * ContainerClient. Applies the given timeout to each Mono<ContainersListBlobFlatSegmentResponse> backing the * PagedFlux. * * @param options {@link ListBlobsOptions}. * @param timeout An optional timeout to be applied to the network asynchronous operations. * @return A reactive response emitting the listed blobs, flattened. */ PagedFlux<BlobItem> listBlobsFlatWithOptionalTimeout(ListBlobsOptions options, Duration timeout) { Function<String, Mono<PagedResponse<BlobItem>>> func = marker -> listBlobsFlatSegment(marker, options, timeout) .map(response -> { List<BlobItem> value = response.value().segment() == null ? new ArrayList<>(0) : response.value().segment().blobItems(); return new PagedResponseBase<>( response.request(), response.statusCode(), response.headers(), value, response.value().nextMarker(), response.deserializedHeaders()); }); return new PagedFlux<>( () -> func.apply(null), marker -> func.apply(marker)); } /* * Returns a single segment of blobs starting from the specified Marker. Use an empty * marker to start enumeration from the beginning. Blob names are returned in lexicographic order. * After getting a segment, process it, and then call ListBlobs again (passing the the previously-returned * Marker) to get the next segment. For more information, see the * <a href="https: * * @param marker * Identifies the portion of the list to be returned with the next list operation. * This value is returned in the response of a previous list operation as the * ListBlobsFlatSegmentResponse.body().nextMarker(). Set to null to list the first segment. * @param options * {@link ListBlobsOptions} * * @return Emits the successful response. * * @apiNote * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=list_blobs_flat "Sample code for ContainerAsyncClient.listBlobsFlatSegment")] \n * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=list_blobs_flat_helper "helper code for ContainerAsyncClient.listBlobsFlatSegment")] \n * For more samples, please see the [Samples file](%https: */ private Mono<ContainersListBlobFlatSegmentResponse> listBlobsFlatSegment(String marker, ListBlobsOptions options, Duration timeout) { options = options == null ? new ListBlobsOptions() : options; return postProcessResponse(Utility.applyOptionalTimeout( this.azureBlobStorage.containers().listBlobFlatSegmentWithRestResponseAsync(null, options.prefix(), marker, options.maxResults(), options.details().toList(), null, null, Context.NONE), timeout)); } /** * Returns a reactive Publisher emitting all the blobs and directories (prefixes) under the given directory * (prefix). Directories will have {@link BlobItem * * <p> * Blob names are returned in lexicographic order. For more information, see the * <a href="https: * * <p> * E.g. listing a container containing a 'foo' folder, which contains blobs 'foo1' and 'foo2', and a blob on the * root level 'bar', will return the following results when prefix=null: * * <ul> * <li>foo/ (isPrefix = true) * <li>bar (isPrefix = false) * </ul> * <p> * will return the following results when prefix="foo/": * * <ul> * <li>foo/foo1 (isPrefix = false) * <li>foo/foo2 (isPrefix = false) * </ul> * * @param directory The directory to list blobs underneath * @return A reactive response emitting the prefixes and blobs. */ public PagedFlux<BlobItem> listBlobsHierarchy(String directory) { return this.listBlobsHierarchy("/", new ListBlobsOptions().prefix(directory)); } /** * Returns a reactive Publisher emitting all the blobs and prefixes (directories) under the given prefix * (directory). Directories will have {@link BlobItem * * <p> * Blob names are returned in lexicographic order. For more information, see the * <a href="https: * * <p> * E.g. listing a container containing a 'foo' folder, which contains blobs 'foo1' and 'foo2', and a blob on the * root level 'bar', will return the following results when prefix=null: * * <ul> * <li>foo/ (isPrefix = true) * <li>bar (isPrefix = false) * </ul> * <p> * will return the following results when prefix="foo/": * * <ul> * <li>foo/foo1 (isPrefix = false) * <li>foo/foo2 (isPrefix = false) * </ul> * * @param delimiter The delimiter for blob hierarchy, "/" for hierarchy based on directories * @param options {@link ListBlobsOptions} * @return A reactive response emitting the prefixes and blobs. */ public PagedFlux<BlobItem> listBlobsHierarchy(String delimiter, ListBlobsOptions options) { return listBlobsHierarchyWithOptionalTimeout(delimiter, options, null); } /* * Implementation for this paged listing operation, supporting an optional timeout provided by the synchronous * ContainerClient. Applies the given timeout to each Mono<ContainersListBlobHierarchySegmentResponse> backing the * PagedFlux. * * @param delimiter The delimiter for blob hierarchy, "/" for hierarchy based on directories * @param options {@link ListBlobsOptions} * @param timeout An optional timeout to be applied to the network asynchronous operations. * @return A reactive response emitting the listed blobs, flattened. */ PagedFlux<BlobItem> listBlobsHierarchyWithOptionalTimeout(String delimiter, ListBlobsOptions options, Duration timeout) { Function<String, Mono<PagedResponse<BlobItem>>> func = marker -> listBlobsHierarchySegment(marker, delimiter, options, timeout) .map(response -> { List<BlobItem> value = response.value().segment() == null ? new ArrayList<>(0) : Stream.concat( response.value().segment().blobItems().stream(), response.value().segment().blobPrefixes().stream() .map(blobPrefix -> new BlobItem().name(blobPrefix.name()).isPrefix(true)) ).collect(Collectors.toList()); return new PagedResponseBase<>( response.request(), response.statusCode(), response.headers(), value, response.value().nextMarker(), response.deserializedHeaders()); }); return new PagedFlux<>( () -> func.apply(null), marker -> func.apply(marker)); } /* * Returns a single segment of blobs and blob prefixes starting from the specified Marker. Use an empty * marker to start enumeration from the beginning. Blob names are returned in lexicographic order. * After getting a segment, process it, and then call ListBlobs again (passing the the previously-returned * Marker) to get the next segment. For more information, see the * <a href="https: * * @param marker * Identifies the portion of the list to be returned with the next list operation. * This value is returned in the response of a previous list operation as the * ListBlobsHierarchySegmentResponse.body().nextMarker(). Set to null to list the first segment. * @param delimiter * The operation returns a BlobPrefix element in the response body that acts as a placeholder for all blobs * whose names begin with the same substring up to the appearance of the delimiter character. The delimiter may * be a single character or a string. * @param options * {@link ListBlobsOptions} * * @return Emits the successful response. * @throws UnsupportedOperationException If {@link ListBlobsOptions * set. * * @apiNote * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=list_blobs_hierarchy "Sample code for ContainerAsyncClient.listBlobsHierarchySegment")] \n * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=list_blobs_hierarchy_helper "helper code for ContainerAsyncClient.listBlobsHierarchySegment")] \n * For more samples, please see the [Samples file](%https: */ private Mono<ContainersListBlobHierarchySegmentResponse> listBlobsHierarchySegment(String marker, String delimiter, ListBlobsOptions options, Duration timeout) { options = options == null ? new ListBlobsOptions() : options; if (options.details().snapshots()) { throw logger.logExceptionAsError(new UnsupportedOperationException("Including snapshots in a hierarchical listing is not supported.")); } return postProcessResponse(Utility.applyOptionalTimeout( this.azureBlobStorage.containers().listBlobHierarchySegmentWithRestResponseAsync(null, delimiter, options.prefix(), marker, options.maxResults(), options.details().toList(), null, null, Context.NONE), timeout)); } /** * Acquires a lease on the blob for write and delete operations. The lease duration must be between 15 to 60 * seconds, or infinite (-1). * * @param proposedId A {@code String} in any valid GUID format. May be null. * @param duration The duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A * non-infinite lease can be between 15 and 60 seconds. * @return A reactive response containing the lease ID. */ public Mono<String> acquireLease(String proposedId, int duration) { return acquireLeaseWithResponse(proposedId, duration, null).flatMap(FluxUtil::toMono); } /** * Acquires a lease on the blob for write and delete operations. The lease duration must be between 15 to 60 * seconds, or infinite (-1). * * @param proposedID A {@code String} in any valid GUID format. May be null. * @param duration The duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A * non-infinite lease can be between 15 and 60 seconds. * @param modifiedAccessConditions Standard HTTP Access conditions related to the modification of data. ETag and * LastModifiedTime are used to construct conditions related to when the blob was changed relative to the given * request. The request will fail if the specified condition is not satisfied. * @return A reactive response containing the lease ID. * @throws UnsupportedOperationException If either {@link ModifiedAccessConditions * ModifiedAccessConditions */ public Mono<Response<String>> acquireLeaseWithResponse(String proposedID, int duration, ModifiedAccessConditions modifiedAccessConditions) { return withContext(context -> acquireLeaseWithResponse(proposedID, duration, modifiedAccessConditions, context)); } Mono<Response<String>> acquireLeaseWithResponse(String proposedID, int duration, ModifiedAccessConditions modifiedAccessConditions, Context context) { if (!this.validateNoEtag(modifiedAccessConditions)) { throw logger.logExceptionAsError(new UnsupportedOperationException( "ETag access conditions are not supported for this API.")); } return postProcessResponse(this.azureBlobStorage.containers().acquireLeaseWithRestResponseAsync(null, null, duration, proposedID, null, modifiedAccessConditions, context)).map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().leaseId())); } /** * Renews the blob's previously-acquired lease. * * @param leaseID The leaseId of the active lease on the blob. * @return A reactive response containing the renewed lease ID. */ public Mono<String> renewLease(String leaseID) { return renewLeaseWithResponse(leaseID, null).flatMap(FluxUtil::toMono); } /** * Renews the blob's previously-acquired lease. * * @param leaseID The leaseId of the active lease on the blob. * @param modifiedAccessConditions Standard HTTP Access conditions related to the modification of data. ETag and * LastModifiedTime are used to construct conditions related to when the blob was changed relative to the given * request. The request will fail if the specified condition is not satisfied. * @return A reactive response containing the renewed lease ID. * @throws UnsupportedOperationException If either {@link ModifiedAccessConditions * ModifiedAccessConditions */ public Mono<Response<String>> renewLeaseWithResponse(String leaseID, ModifiedAccessConditions modifiedAccessConditions) { return withContext(context -> renewLeaseWithResponse(leaseID, modifiedAccessConditions, context)); } Mono<Response<String>> renewLeaseWithResponse(String leaseID, ModifiedAccessConditions modifiedAccessConditions, Context context) { if (!this.validateNoEtag(modifiedAccessConditions)) { throw logger.logExceptionAsError(new UnsupportedOperationException( "ETag access conditions are not supported for this API.")); } return postProcessResponse(this.azureBlobStorage.containers().renewLeaseWithRestResponseAsync(null, leaseID, null, null, modifiedAccessConditions, context)).map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().leaseId())); } /** * Releases the blob's previously-acquired lease. * * @param leaseID The leaseId of the active lease on the blob. * @return A reactive response signalling completion. */ public Mono<Void> releaseLease(String leaseID) { return releaseLeaseWithResponse(leaseID, null).flatMap(FluxUtil::toMono); } /** * Releases the blob's previously-acquired lease. * * @param leaseID The leaseId of the active lease on the blob. * @param modifiedAccessConditions Standard HTTP Access conditions related to the modification of data. ETag and * LastModifiedTime are used to construct conditions related to when the blob was changed relative to the given * request. The request will fail if the specified condition is not satisfied. * @return A reactive response signalling completion. * @throws UnsupportedOperationException If either {@link ModifiedAccessConditions * ModifiedAccessConditions */ public Mono<VoidResponse> releaseLeaseWithResponse(String leaseID, ModifiedAccessConditions modifiedAccessConditions) { return withContext(context -> releaseLeaseWithResponse(leaseID, modifiedAccessConditions, context)); } Mono<VoidResponse> releaseLeaseWithResponse(String leaseID, ModifiedAccessConditions modifiedAccessConditions, Context context) { if (!this.validateNoEtag(modifiedAccessConditions)) { throw logger.logExceptionAsError(new UnsupportedOperationException( "ETag access conditions are not supported for this API.")); } return postProcessResponse(this.azureBlobStorage.containers().releaseLeaseWithRestResponseAsync( null, leaseID, null, null, modifiedAccessConditions, context)) .map(VoidResponse::new); } /** * BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1) constant * to break a fixed-duration lease when it expires or an infinite lease immediately. * * @return A reactive response containing the remaining time in the broken lease. */ public Mono<Duration> breakLease() { return breakLeaseWithResponse(null, null).flatMap(FluxUtil::toMono); } /** * BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1) constant * to break a fixed-duration lease when it expires or an infinite lease immediately. * * @param breakPeriodInSeconds An optional {@code Integer} representing the proposed duration of seconds that the * lease should continue before it is broken, between 0 and 60 seconds. This break period is only used if it is * shorter than the time remaining on the lease. If longer, the time remaining on the lease is used. A new lease * will not be available before the break period has expired, but the lease may be held for longer than the break * period. * @param modifiedAccessConditions Standard HTTP Access conditions related to the modification of data. ETag and * LastModifiedTime are used to construct conditions related to when the blob was changed relative to the given * request. The request will fail if the specified condition is not satisfied. * @return A reactive response containing the remaining time in the broken lease. * @throws UnsupportedOperationException If either {@link ModifiedAccessConditions * ModifiedAccessConditions */ public Mono<Response<Duration>> breakLeaseWithResponse(Integer breakPeriodInSeconds, ModifiedAccessConditions modifiedAccessConditions) { return withContext(context -> breakLeaseWithResponse(breakPeriodInSeconds, modifiedAccessConditions, context)); } Mono<Response<Duration>> breakLeaseWithResponse(Integer breakPeriodInSeconds, ModifiedAccessConditions modifiedAccessConditions, Context context) { if (!this.validateNoEtag(modifiedAccessConditions)) { throw logger.logExceptionAsError(new UnsupportedOperationException( "ETag access conditions are not supported for this API.")); } return postProcessResponse(this.azureBlobStorage.containers().breakLeaseWithRestResponseAsync(null, null, breakPeriodInSeconds, null, modifiedAccessConditions, context)) .map(rb -> new SimpleResponse<>(rb, Duration.ofSeconds(rb.deserializedHeaders().leaseTime()))); } /** * ChangeLease changes the blob's lease ID. * * @param leaseId The leaseId of the active lease on the blob. * @param proposedID A {@code String} in any valid GUID format. * @return A reactive response containing the new lease ID. */ public Mono<String> changeLease(String leaseId, String proposedID) { return changeLeaseWithResponse(leaseId, proposedID, null).flatMap(FluxUtil::toMono); } /** * ChangeLease changes the blob's lease ID. For more information, see the <a href="https: * Docs</a>. * * @param leaseId The leaseId of the active lease on the blob. * @param proposedID A {@code String} in any valid GUID format. * @param modifiedAccessConditions Standard HTTP Access conditions related to the modification of data. ETag and * LastModifiedTime are used to construct conditions related to when the blob was changed relative to the given * request. The request will fail if the specified condition is not satisfied. * @return A reactive response containing the new lease ID. * @throws UnsupportedOperationException If either {@link ModifiedAccessConditions * ModifiedAccessConditions */ public Mono<Response<String>> changeLeaseWithResponse(String leaseId, String proposedID, ModifiedAccessConditions modifiedAccessConditions) { return withContext(context -> changeLeaseWithResponse(leaseId, proposedID, modifiedAccessConditions, context)); } Mono<Response<String>> changeLeaseWithResponse(String leaseId, String proposedID, ModifiedAccessConditions modifiedAccessConditions, Context context) { if (!this.validateNoEtag(modifiedAccessConditions)) { throw logger.logExceptionAsError(new UnsupportedOperationException( "ETag access conditions are not supported for this API.")); } return postProcessResponse(this.azureBlobStorage.containers().changeLeaseWithRestResponseAsync(null, leaseId, proposedID, null, null, modifiedAccessConditions, context)) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().leaseId())); } /** * Returns the sku name and account kind for the account. For more information, please see the * <a href="https: * * @return A reactive response containing the account info. */ public Mono<StorageAccountInfo> getAccountInfo() { return getAccountInfoWithResponse().flatMap(FluxUtil::toMono); } /** * Returns the sku name and account kind for the account. For more information, please see the * <a href="https: * * @return A reactive response containing the account info. */ public Mono<Response<StorageAccountInfo>> getAccountInfoWithResponse() { return withContext(context -> getAccountInfoWithResponse(context)); } Mono<Response<StorageAccountInfo>> getAccountInfoWithResponse(Context context) { return postProcessResponse( this.azureBlobStorage.containers().getAccountInfoWithRestResponseAsync(null, context)) .map(rb -> new SimpleResponse<>(rb, new StorageAccountInfo(rb.deserializedHeaders()))); } private boolean validateNoEtag(ModifiedAccessConditions modifiedAccessConditions) { if (modifiedAccessConditions == null) { return true; } return modifiedAccessConditions.ifMatch() == null && modifiedAccessConditions.ifNoneMatch() == null; } /** * Generates a user delegation SAS with the specified parameters * * @param userDelegationKey The {@code UserDelegationKey} user delegation key for the SAS * @param accountName The {@code String} account name for the SAS * @param permissions The {@code ContainerSASPermissions} permission for the SAS * @param expiryTime The {@code OffsetDateTime} expiry time for the SAS * @return A string that represents the SAS token */ public String generateUserDelegationSAS(UserDelegationKey userDelegationKey, String accountName, ContainerSASPermission permissions, OffsetDateTime expiryTime) { return this.generateUserDelegationSAS(userDelegationKey, accountName, permissions, expiryTime, null, null, null, null, null, null, null, null, null); } /** * Generates a user delegation SAS token with the specified parameters * * @param userDelegationKey The {@code UserDelegationKey} user delegation key for the SAS * @param accountName The {@code String} account name for the SAS * @param permissions The {@code ContainerSASPermissions} permission for the SAS * @param expiryTime The {@code OffsetDateTime} expiry time for the SAS * @param startTime An optional {@code OffsetDateTime} start time for the SAS * @param version An optional {@code String} version for the SAS * @param sasProtocol An optional {@code SASProtocol} protocol for the SAS * @param ipRange An optional {@code IPRange} ip address range for the SAS * @return A string that represents the SAS token */ public String generateUserDelegationSAS(UserDelegationKey userDelegationKey, String accountName, ContainerSASPermission permissions, OffsetDateTime expiryTime, OffsetDateTime startTime, String version, SASProtocol sasProtocol, IPRange ipRange) { return this.generateUserDelegationSAS(userDelegationKey, accountName, permissions, expiryTime, startTime, version, sasProtocol, ipRange, null /* cacheControl */, null /* contentDisposition */, null /* contentEncoding */, null /* contentLanguage */, null /* contentType */); } /** * Generates a user delegation SAS token with the specified parameters * * @param userDelegationKey The {@code UserDelegationKey} user delegation key for the SAS * @param accountName The {@code String} account name for the SAS * @param permissions The {@code ContainerSASPermissions} permission for the SAS * @param expiryTime The {@code OffsetDateTime} expiry time for the SAS * @param startTime An optional {@code OffsetDateTime} start time for the SAS * @param version An optional {@code String} version for the SAS * @param sasProtocol An optional {@code SASProtocol} protocol for the SAS * @param ipRange An optional {@code IPRange} ip address range for the SAS * @param cacheControl An optional {@code String} cache-control header for the SAS. * @param contentDisposition An optional {@code String} content-disposition header for the SAS. * @param contentEncoding An optional {@code String} content-encoding header for the SAS. * @param contentLanguage An optional {@code String} content-language header for the SAS. * @param contentType An optional {@code String} content-type header for the SAS. * @return A string that represents the SAS token */ public String generateUserDelegationSAS(UserDelegationKey userDelegationKey, String accountName, ContainerSASPermission permissions, OffsetDateTime expiryTime, OffsetDateTime startTime, String version, SASProtocol sasProtocol, IPRange ipRange, String cacheControl, String contentDisposition, String contentEncoding, String contentLanguage, String contentType) { ServiceSASSignatureValues serviceSASSignatureValues = new ServiceSASSignatureValues(version, sasProtocol, startTime, expiryTime, permissions == null ? null : permissions.toString(), ipRange, null /* identifier*/, cacheControl, contentDisposition, contentEncoding, contentLanguage, contentType); ServiceSASSignatureValues values = configureServiceSASSignatureValues(serviceSASSignatureValues, accountName); SASQueryParameters sasQueryParameters = values.generateSASQueryParameters(userDelegationKey); return sasQueryParameters.encode(); } /** * Generates a SAS token with the specified parameters * * @param permissions The {@code ContainerSASPermissions} permission for the SAS * @param expiryTime The {@code OffsetDateTime} expiry time for the SAS * @return A string that represents the SAS token */ public String generateSAS(ContainerSASPermission permissions, OffsetDateTime expiryTime) { return this.generateSAS(null, permissions, /* identifier */ expiryTime, null /* startTime */, null /* version */, null /* sasProtocol */, null /* ipRange */, null /* cacheControl */, null /* contentDisposition */, null /* contentEncoding */, null /* contentLanguage */, null /*contentType*/); } /** * Generates a SAS token with the specified parameters * * @param identifier The {@code String} name of the access policy on the container this SAS references if any * @return A string that represents the SAS token */ public String generateSAS(String identifier) { return this.generateSAS(identifier, null /* permissions*/, null /* expiryTime */, null /* startTime */, null /* version */, null /* sasProtocol */, null /* ipRange */, null /* cacheControl */, null /* contentDisposition */, null /* contentEncoding */, null /* contentLanguage */, null /*contentType*/); } /** * Generates a SAS token with the specified parameters * * @param identifier The {@code String} name of the access policy on the container this SAS references if any * @param permissions The {@code ContainerSASPermissions} permission for the SAS * @param expiryTime The {@code OffsetDateTime} expiry time for the SAS * @param startTime An optional {@code OffsetDateTime} start time for the SAS * @param version An optional {@code String} version for the SAS * @param sasProtocol An optional {@code SASProtocol} protocol for the SAS * @param ipRange An optional {@code IPRange} ip address range for the SAS * @return A string that represents the SAS token */ public String generateSAS(String identifier, ContainerSASPermission permissions, OffsetDateTime expiryTime, OffsetDateTime startTime, String version, SASProtocol sasProtocol, IPRange ipRange) { return this.generateSAS(identifier, permissions, expiryTime, startTime, version, sasProtocol, ipRange, null /* cacheControl */, null /* contentDisposition */, null /* contentEncoding */, null /* contentLanguage */, null /*contentType*/); } /** * Generates a SAS token with the specified parameters * * @param identifier The {@code String} name of the access policy on the container this SAS references if any * @param permissions The {@code ContainerSASPermissions} permission for the SAS * @param expiryTime The {@code OffsetDateTime} expiry time for the SAS * @param startTime An optional {@code OffsetDateTime} start time for the SAS * @param version An optional {@code String} version for the SAS * @param sasProtocol An optional {@code SASProtocol} protocol for the SAS * @param ipRange An optional {@code IPRange} ip address range for the SAS * @param cacheControl An optional {@code String} cache-control header for the SAS. * @param contentDisposition An optional {@code String} content-disposition header for the SAS. * @param contentEncoding An optional {@code String} content-encoding header for the SAS. * @param contentLanguage An optional {@code String} content-language header for the SAS. * @param contentType An optional {@code String} content-type header for the SAS. * @return A string that represents the SAS token */ public String generateSAS(String identifier, ContainerSASPermission permissions, OffsetDateTime expiryTime, OffsetDateTime startTime, String version, SASProtocol sasProtocol, IPRange ipRange, String cacheControl, String contentDisposition, String contentEncoding, String contentLanguage, String contentType) { ServiceSASSignatureValues serviceSASSignatureValues = new ServiceSASSignatureValues(version, sasProtocol, startTime, expiryTime, permissions == null ? null : permissions.toString(), ipRange, identifier, cacheControl, contentDisposition, contentEncoding, contentLanguage, contentType); SharedKeyCredential sharedKeyCredential = Utility.getSharedKeyCredential(this.azureBlobStorage.getHttpPipeline()); Utility.assertNotNull("sharedKeyCredential", sharedKeyCredential); ServiceSASSignatureValues values = configureServiceSASSignatureValues(serviceSASSignatureValues, sharedKeyCredential.accountName()); SASQueryParameters sasQueryParameters = values.generateSASQueryParameters(sharedKeyCredential); return sasQueryParameters.encode(); } /** * Sets serviceSASSignatureValues parameters dependent on the current blob type */ private ServiceSASSignatureValues configureServiceSASSignatureValues(ServiceSASSignatureValues serviceSASSignatureValues, String accountName) { serviceSASSignatureValues.canonicalName(this.azureBlobStorage.getUrl(), accountName); serviceSASSignatureValues.snapshotId(null); serviceSASSignatureValues.resource(Constants.UrlConstants.SAS_CONTAINER_CONSTANT); return serviceSASSignatureValues; } }
nit: ownership that have not `been` modified
private Mono<Void> loadBalance(final Tuple2<Map<String, PartitionOwnership>, List<String>> tuple) { return Mono.fromRunnable(() -> { logger.info("Starting load balancer"); Map<String, PartitionOwnership> partitionOwnershipMap = tuple.getT1(); List<String> partitionIds = tuple.getT2(); if (ImplUtils.isNullOrEmpty(partitionIds)) { throw Exceptions .propagate(new IllegalStateException("There are no partitions in Event Hub " + this.eventHubName)); } int numberOfPartitions = partitionIds.size(); logger.info("Partition manager returned {} ownership records", partitionOwnershipMap.size()); logger.info("EventHubAsyncClient returned {} partitions", numberOfPartitions); if (!isValid(partitionOwnershipMap)) { throw Exceptions .propagate(new IllegalStateException("Invalid partitionOwnership data from PartitionManager")); } /* * Remove all partitions' ownership that have not be modified for a configuration period of time. This means * that the previous EventProcessor that owned the partition is probably down and the partition is now eligible * to be claimed by other EventProcessors. */ Map<String, PartitionOwnership> activePartitionOwnershipMap = removeInactivePartitionOwnerships( partitionOwnershipMap); logger.info("Number of active ownership records {}", activePartitionOwnershipMap.size()); if (ImplUtils.isNullOrEmpty(activePartitionOwnershipMap)) { /* * If the active partition ownership map is empty, this is the first time an event processor is * running or all Event Processors are down for this Event Hub, consumer group combination. All * partitions in this Event Hub are available to claim. Choose a random partition to claim ownership. */ claimOwnership(partitionOwnershipMap, partitionIds.get(RANDOM.nextInt(numberOfPartitions))); return; } /* * Create a map of owner id and a list of partitions it owns */ Map<String, List<PartitionOwnership>> ownerPartitionMap = activePartitionOwnershipMap.values() .stream() .collect( Collectors.groupingBy(PartitionOwnership::ownerId, mapping(Function.identity(), toList()))); ownerPartitionMap.putIfAbsent(this.ownerId, new ArrayList<>()); /* * Find the minimum number of partitions every event processor should own when the load is * evenly distributed. */ int numberOfActiveEventProcessors = ownerPartitionMap.size(); logger.info("Number of active event processors {}", ownerPartitionMap.size()); int minPartitionsPerEventProcessor = numberOfPartitions / numberOfActiveEventProcessors; /* * If the number of partitions in Event Hub is not evenly divisible by number of active event processors, * a few Event Processors may own 1 additional partition than the minimum when the load is balanced. Calculate * the number of event processors that can own additional partition. */ int numberOfEventProcessorsWithAdditionalPartition = numberOfPartitions % numberOfActiveEventProcessors; logger.info("Expected min partitions per event processor = {}, expected number of event " + "processors with additional partition = {}", minPartitionsPerEventProcessor, numberOfEventProcessorsWithAdditionalPartition); if (isLoadBalanced(minPartitionsPerEventProcessor, numberOfEventProcessorsWithAdditionalPartition, ownerPartitionMap)) { logger.info("Load is balanced"); return; } if (!shouldOwnMorePartitions(minPartitionsPerEventProcessor, ownerPartitionMap)) { logger.info("This event processor owns {} partitions and shouldn't own more", ownerPartitionMap.get(ownerId)); return; } logger.info( "Load is unbalanced and this event processor should own more partitions"); /* * If some partitions are unclaimed, this could be because an event processor is down and * it's partitions are now available for others to own or because event processors are just * starting up and gradually claiming partitions to own or new partitions were added to Event Hub. * Find any partition that is not actively owned and claim it. * * OR * * Find a partition to steal from another event processor. Pick the event processor that has owns the highest * number of partitions. */ String partitionToClaim = partitionIds.parallelStream() .filter(partitionId -> !activePartitionOwnershipMap.containsKey(partitionId)) .findAny() .orElseGet(() -> { logger.info("No unclaimed partitions, stealing from another event processor"); return findPartitionToSteal(ownerPartitionMap); }); claimOwnership(partitionOwnershipMap, partitionToClaim); }); }
* Remove all partitions' ownership that have not be modified for a configuration period of time. This means
private Mono<Void> loadBalance(final Tuple2<Map<String, PartitionOwnership>, List<String>> tuple) { return Mono.fromRunnable(() -> { logger.info("Starting load balancer"); Map<String, PartitionOwnership> partitionOwnershipMap = tuple.getT1(); List<String> partitionIds = tuple.getT2(); if (ImplUtils.isNullOrEmpty(partitionIds)) { throw Exceptions .propagate(new IllegalStateException("There are no partitions in Event Hub " + this.eventHubName)); } int numberOfPartitions = partitionIds.size(); logger.info("Partition manager returned {} ownership records", partitionOwnershipMap.size()); logger.info("EventHubAsyncClient returned {} partitions", numberOfPartitions); if (!isValid(partitionOwnershipMap)) { throw Exceptions .propagate(new IllegalStateException("Invalid partitionOwnership data from PartitionManager")); } /* * Remove all partitions' ownership that have not been modified for a configuration period of time. This * means * that the previous EventProcessor that owned the partition is probably down and the partition is now eligible * to be claimed by other EventProcessors. */ Map<String, PartitionOwnership> activePartitionOwnershipMap = removeInactivePartitionOwnerships( partitionOwnershipMap); logger.info("Number of active ownership records {}", activePartitionOwnershipMap.size()); if (ImplUtils.isNullOrEmpty(activePartitionOwnershipMap)) { /* * If the active partition ownership map is empty, this is the first time an event processor is * running or all Event Processors are down for this Event Hub, consumer group combination. All * partitions in this Event Hub are available to claim. Choose a random partition to claim ownership. */ claimOwnership(partitionOwnershipMap, partitionIds.get(RANDOM.nextInt(numberOfPartitions))); return; } /* * Create a map of owner id and a list of partitions it owns */ Map<String, List<PartitionOwnership>> ownerPartitionMap = activePartitionOwnershipMap.values() .stream() .collect( Collectors.groupingBy(PartitionOwnership::ownerId, mapping(Function.identity(), toList()))); ownerPartitionMap.putIfAbsent(this.ownerId, new ArrayList<>()); /* * Find the minimum number of partitions every event processor should own when the load is * evenly distributed. */ int numberOfActiveEventProcessors = ownerPartitionMap.size(); logger.info("Number of active event processors {}", ownerPartitionMap.size()); int minPartitionsPerEventProcessor = numberOfPartitions / numberOfActiveEventProcessors; /* * If the number of partitions in Event Hub is not evenly divisible by number of active event processors, * a few Event Processors may own 1 additional partition than the minimum when the load is balanced. Calculate * the number of event processors that can own additional partition. */ int numberOfEventProcessorsWithAdditionalPartition = numberOfPartitions % numberOfActiveEventProcessors; logger.info("Expected min partitions per event processor = {}, expected number of event " + "processors with additional partition = {}", minPartitionsPerEventProcessor, numberOfEventProcessorsWithAdditionalPartition); if (isLoadBalanced(minPartitionsPerEventProcessor, numberOfEventProcessorsWithAdditionalPartition, ownerPartitionMap)) { logger.info("Load is balanced"); return; } if (!shouldOwnMorePartitions(minPartitionsPerEventProcessor, ownerPartitionMap)) { logger.info("This event processor owns {} partitions and shouldn't own more", ownerPartitionMap.get(ownerId).size()); return; } logger.info( "Load is unbalanced and this event processor should own more partitions"); /* * If some partitions are unclaimed, this could be because an event processor is down and * it's partitions are now available for others to own or because event processors are just * starting up and gradually claiming partitions to own or new partitions were added to Event Hub. * Find any partition that is not actively owned and claim it. * * OR * * Find a partition to steal from another event processor. Pick the event processor that has owns the highest * number of partitions. */ String partitionToClaim = partitionIds.parallelStream() .filter(partitionId -> !activePartitionOwnershipMap.containsKey(partitionId)) .findAny() .orElseGet(() -> { logger.info("No unclaimed partitions, stealing from another event processor"); return findPartitionToSteal(ownerPartitionMap); }); claimOwnership(partitionOwnershipMap, partitionToClaim); }); }
class PartitionBasedLoadBalancer { private static final Random RANDOM = new Random(); private final ClientLogger logger = new ClientLogger(PartitionBasedLoadBalancer.class); private final String eventHubName; private final String consumerGroupName; private final PartitionManager partitionManager; private final EventHubAsyncClient eventHubAsyncClient; private final String ownerId; private final long inactiveTimeLimitInSeconds; private final PartitionPumpManager partitionPumpManager; /** * Creates an instance of PartitionBasedLoadBalancer for the given Event Hub name and consumer group. * * @param partitionManager The partition manager that this load balancer will use to read/update ownership details. * @param eventHubAsyncClient The asynchronous Event Hub client used to consume events. * @param eventHubName The Event Hub name the {@link EventProcessor} is associated with. * @param consumerGroupName The consumer group name the {@link EventProcessor} is associated with. * @param ownerId The identifier of the {@link EventProcessor} that owns this load balancer. * @param inactiveTimeLimitInSeconds The time in seconds to wait for an update on an ownership record before * assuming the owner of the partition is inactive. * @param partitionPumpManager The partition pump manager that keeps track of all EventHubConsumers and partitions * that this {@link EventProcessor} is processing. */ public PartitionBasedLoadBalancer(final PartitionManager partitionManager, final EventHubAsyncClient eventHubAsyncClient, final String eventHubName, final String consumerGroupName, final String ownerId, final long inactiveTimeLimitInSeconds, final PartitionPumpManager partitionPumpManager) { this.partitionManager = partitionManager; this.eventHubAsyncClient = eventHubAsyncClient; this.eventHubName = eventHubName; this.consumerGroupName = consumerGroupName; this.ownerId = ownerId; this.inactiveTimeLimitInSeconds = inactiveTimeLimitInSeconds; this.partitionPumpManager = partitionPumpManager; } /** * This is the main method responsible for load balancing. This method is expected to be invoked by the {@link * EventProcessor} periodically. Every call to this method will result in this {@link EventProcessor} owning <b>at * most one</b> new partition. * <p> * The load is considered balanced when no active EventProcessor owns 2 partitions more than any other active * EventProcessor.Given that each invocation to this method results in ownership claim of at most one partition, * this algorithm converges gradually towards a steady state. * </p> * When a new partition is claimed, this method is also responsible for starting a partition pump that creates an * {@link EventHubAsyncConsumer} for processing events from that partition. */ public void loadBalance() { /* * Retrieve current partition ownership details from the datastore. */ final Mono<Map<String, PartitionOwnership>> partitionOwnershipMono = partitionManager .listOwnership(eventHubName, consumerGroupName) .timeout(Duration.ofSeconds(1)) .collectMap(PartitionOwnership::partitionId, Function.identity()); /* * Retrieve the list of partition ids from the Event Hub. */ final Mono<List<String>> partitionsMono = eventHubAsyncClient .getPartitionIds() .timeout(Duration.ofSeconds(1)) .collectList(); Mono.zip(partitionOwnershipMono, partitionsMono) .flatMap(this::loadBalance) .doOnError(ex -> logger.warning("Load balancing for event processor failed - {}", ex.getMessage())) .subscribe(); } /* * This method works with the given partition ownership details and Event Hub partitions to evaluate whether the * current Event Processor should take on the responsibility of processing more partitions. */ /* * Check if partition ownership data is valid before proceeding with load balancing. */ private boolean isValid(final Map<String, PartitionOwnership> partitionOwnershipMap) { return partitionOwnershipMap.values() .stream() .noneMatch(partitionOwnership -> { return partitionOwnership.eventHubName() == null || !partitionOwnership.eventHubName().equals(this.eventHubName) || partitionOwnership.consumerGroupName() == null || !partitionOwnership.consumerGroupName().equals(this.consumerGroupName) || partitionOwnership.partitionId() == null || partitionOwnership.lastModifiedTime() == null || partitionOwnership.eTag() == null; }); } /* * Find the event processor that owns the maximum number of partitions and steal a random partition * from it. */ private String findPartitionToSteal(final Map<String, List<PartitionOwnership>> ownerPartitionMap) { Map.Entry<String, List<PartitionOwnership>> ownerWithMaxPartitions = ownerPartitionMap.entrySet() .stream() .max(Comparator.comparingInt(entry -> entry.getValue().size())) .get(); int numberOfPartitions = ownerWithMaxPartitions.getValue().size(); logger.info("Owner id {} owns {} partitions, stealing a partition from it", ownerWithMaxPartitions.getKey(), numberOfPartitions); return ownerWithMaxPartitions.getValue().get(RANDOM.nextInt(numberOfPartitions)).partitionId(); } /* * When the load is balanced, all active event processors own at least {@code minPartitionsPerEventProcessor} * and only {@code numberOfEventProcessorsWithAdditionalPartition} event processors will own 1 additional * partition. */ private boolean isLoadBalanced(final int minPartitionsPerEventProcessor, final int numberOfEventProcessorsWithAdditionalPartition, final Map<String, List<PartitionOwnership>> ownerPartitionMap) { int count = 0; for (List<PartitionOwnership> partitionOwnership : ownerPartitionMap.values()) { int numberOfPartitions = partitionOwnership.size(); if (numberOfPartitions < minPartitionsPerEventProcessor || numberOfPartitions > minPartitionsPerEventProcessor + 1) { return false; } if (numberOfPartitions == minPartitionsPerEventProcessor + 1) { count++; } } return count == numberOfEventProcessorsWithAdditionalPartition; } /* * This method is called after determining that the load is not balanced. This method will evaluate * if the current event processor should own more partitions. Specifically, this method returns true if the * current event processor owns less than the minimum number of partitions or if it owns the minimum number * and no other event processor owns lesser number of partitions than this event processor. */ private boolean shouldOwnMorePartitions(final int minPartitionsPerEventProcessor, final Map<String, List<PartitionOwnership>> ownerPartitionMap) { int numberOfPartitionsOwned = ownerPartitionMap.get(this.ownerId).size(); int leastPartitionsOwnedByAnyEventProcessor = ownerPartitionMap.values().stream().min(Comparator.comparingInt(List::size)).get().size(); return numberOfPartitionsOwned < minPartitionsPerEventProcessor || numberOfPartitionsOwned == leastPartitionsOwnedByAnyEventProcessor; } /* * This method will create a new map of partition id and PartitionOwnership containing only those partitions * that are actively owned. All entries in the original map returned by PartitionManager that haven't been * modified for a duration of time greater than the allowed inactivity time limit are assumed to be owned by * dead event processors. These will not be included in the map returned by this method. */ private Map<String, PartitionOwnership> removeInactivePartitionOwnerships( final Map<String, PartitionOwnership> partitionOwnershipMap) { return partitionOwnershipMap .entrySet() .stream() .filter(entry -> { return (System.currentTimeMillis() - entry.getValue().lastModifiedTime() < TimeUnit.SECONDS .toMillis(inactiveTimeLimitInSeconds)) && !ImplUtils.isNullOrEmpty(entry.getValue().ownerId()); }).collect(Collectors.toMap(Entry::getKey, Entry::getValue)); } private void claimOwnership(final Map<String, PartitionOwnership> partitionOwnershipMap, final String partitionIdToClaim) { logger.info("Attempting to claim ownership of partition {}", partitionIdToClaim); PartitionOwnership ownershipRequest = createPartitionOwnershipRequest(partitionOwnershipMap, partitionIdToClaim); partitionManager .claimOwnership(ownershipRequest) .timeout(Duration.ofSeconds(1)) .doOnNext(partitionOwnership -> logger.info("Successfully claimed ownership of partition {}", partitionOwnership.partitionId())) .doOnError(ex -> logger .warning("Failed to claim ownership of partition {} - {}", ownershipRequest.partitionId(), ex.getMessage(), ex)) .subscribe(partitionPumpManager::startPartitionPump); } private PartitionOwnership createPartitionOwnershipRequest( final Map<String, PartitionOwnership> partitionOwnershipMap, final String partitionIdToClaim) { PartitionOwnership previousPartitionOwnership = partitionOwnershipMap.get(partitionIdToClaim); PartitionOwnership partitionOwnershipRequest = new PartitionOwnership() .ownerId(this.ownerId) .partitionId(partitionIdToClaim) .consumerGroupName(this.consumerGroupName) .eventHubName(this.eventHubName) .sequenceNumber(previousPartitionOwnership == null ? null : previousPartitionOwnership.sequenceNumber()) .offset(previousPartitionOwnership == null ? null : previousPartitionOwnership.offset()) .eTag(previousPartitionOwnership == null ? null : previousPartitionOwnership.eTag()) .ownerLevel(0L); return partitionOwnershipRequest; } }
class PartitionBasedLoadBalancer { private static final Random RANDOM = new Random(); private final ClientLogger logger = new ClientLogger(PartitionBasedLoadBalancer.class); private final String eventHubName; private final String consumerGroupName; private final PartitionManager partitionManager; private final EventHubAsyncClient eventHubAsyncClient; private final String ownerId; private final long inactiveTimeLimitInSeconds; private final PartitionPumpManager partitionPumpManager; /** * Creates an instance of PartitionBasedLoadBalancer for the given Event Hub name and consumer group. * * @param partitionManager The partition manager that this load balancer will use to read/update ownership details. * @param eventHubAsyncClient The asynchronous Event Hub client used to consume events. * @param eventHubName The Event Hub name the {@link EventProcessor} is associated with. * @param consumerGroupName The consumer group name the {@link EventProcessor} is associated with. * @param ownerId The identifier of the {@link EventProcessor} that owns this load balancer. * @param inactiveTimeLimitInSeconds The time in seconds to wait for an update on an ownership record before * assuming the owner of the partition is inactive. * @param partitionPumpManager The partition pump manager that keeps track of all EventHubConsumers and partitions * that this {@link EventProcessor} is processing. */ public PartitionBasedLoadBalancer(final PartitionManager partitionManager, final EventHubAsyncClient eventHubAsyncClient, final String eventHubName, final String consumerGroupName, final String ownerId, final long inactiveTimeLimitInSeconds, final PartitionPumpManager partitionPumpManager) { this.partitionManager = partitionManager; this.eventHubAsyncClient = eventHubAsyncClient; this.eventHubName = eventHubName; this.consumerGroupName = consumerGroupName; this.ownerId = ownerId; this.inactiveTimeLimitInSeconds = inactiveTimeLimitInSeconds; this.partitionPumpManager = partitionPumpManager; } /** * This is the main method responsible for load balancing. This method is expected to be invoked by the {@link * EventProcessor} periodically. Every call to this method will result in this {@link EventProcessor} owning <b>at * most one</b> new partition. * <p> * The load is considered balanced when no active EventProcessor owns 2 partitions more than any other active * EventProcessor. Given that each invocation to this method results in ownership claim of at most one partition, * this algorithm converges gradually towards a steady state. * </p> * When a new partition is claimed, this method is also responsible for starting a partition pump that creates an * {@link EventHubAsyncConsumer} for processing events from that partition. */ public void loadBalance() { /* * Retrieve current partition ownership details from the datastore. */ final Mono<Map<String, PartitionOwnership>> partitionOwnershipMono = partitionManager .listOwnership(eventHubName, consumerGroupName) .timeout(Duration.ofSeconds(1)) .collectMap(PartitionOwnership::partitionId, Function.identity()); /* * Retrieve the list of partition ids from the Event Hub. */ final Mono<List<String>> partitionsMono = eventHubAsyncClient .getPartitionIds() .timeout(Duration.ofSeconds(1)) .collectList(); Mono.zip(partitionOwnershipMono, partitionsMono) .flatMap(this::loadBalance) .doOnError(ex -> logger.warning("Load balancing for event processor failed - {}", ex.getMessage())) .subscribe(); } /* * This method works with the given partition ownership details and Event Hub partitions to evaluate whether the * current Event Processor should take on the responsibility of processing more partitions. */ /* * Check if partition ownership data is valid before proceeding with load balancing. */ private boolean isValid(final Map<String, PartitionOwnership> partitionOwnershipMap) { return partitionOwnershipMap.values() .stream() .noneMatch(partitionOwnership -> { return partitionOwnership.eventHubName() == null || !partitionOwnership.eventHubName().equals(this.eventHubName) || partitionOwnership.consumerGroupName() == null || !partitionOwnership.consumerGroupName().equals(this.consumerGroupName) || partitionOwnership.partitionId() == null || partitionOwnership.lastModifiedTime() == null || partitionOwnership.eTag() == null; }); } /* * Find the event processor that owns the maximum number of partitions and steal a random partition * from it. */ private String findPartitionToSteal(final Map<String, List<PartitionOwnership>> ownerPartitionMap) { Map.Entry<String, List<PartitionOwnership>> ownerWithMaxPartitions = ownerPartitionMap.entrySet() .stream() .max(Comparator.comparingInt(entry -> entry.getValue().size())) .get(); int numberOfPartitions = ownerWithMaxPartitions.getValue().size(); logger.info("Owner id {} owns {} partitions, stealing a partition from it", ownerWithMaxPartitions.getKey(), numberOfPartitions); return ownerWithMaxPartitions.getValue().get(RANDOM.nextInt(numberOfPartitions)).partitionId(); } /* * When the load is balanced, all active event processors own at least {@code minPartitionsPerEventProcessor} * and only {@code numberOfEventProcessorsWithAdditionalPartition} event processors will own 1 additional * partition. */ private boolean isLoadBalanced(final int minPartitionsPerEventProcessor, final int numberOfEventProcessorsWithAdditionalPartition, final Map<String, List<PartitionOwnership>> ownerPartitionMap) { int count = 0; for (List<PartitionOwnership> partitionOwnership : ownerPartitionMap.values()) { int numberOfPartitions = partitionOwnership.size(); if (numberOfPartitions < minPartitionsPerEventProcessor || numberOfPartitions > minPartitionsPerEventProcessor + 1) { return false; } if (numberOfPartitions == minPartitionsPerEventProcessor + 1) { count++; } } return count == numberOfEventProcessorsWithAdditionalPartition; } /* * This method is called after determining that the load is not balanced. This method will evaluate * if the current event processor should own more partitions. Specifically, this method returns true if the * current event processor owns less than the minimum number of partitions or if it owns the minimum number * and no other event processor owns lesser number of partitions than this event processor. */ private boolean shouldOwnMorePartitions(final int minPartitionsPerEventProcessor, final Map<String, List<PartitionOwnership>> ownerPartitionMap) { int numberOfPartitionsOwned = ownerPartitionMap.get(this.ownerId).size(); int leastPartitionsOwnedByAnyEventProcessor = ownerPartitionMap.values().stream().min(Comparator.comparingInt(List::size)).get().size(); return numberOfPartitionsOwned < minPartitionsPerEventProcessor || numberOfPartitionsOwned == leastPartitionsOwnedByAnyEventProcessor; } /* * This method will create a new map of partition id and PartitionOwnership containing only those partitions * that are actively owned. All entries in the original map returned by PartitionManager that haven't been * modified for a duration of time greater than the allowed inactivity time limit are assumed to be owned by * dead event processors. These will not be included in the map returned by this method. */ private Map<String, PartitionOwnership> removeInactivePartitionOwnerships( final Map<String, PartitionOwnership> partitionOwnershipMap) { return partitionOwnershipMap .entrySet() .stream() .filter(entry -> { return (System.currentTimeMillis() - entry.getValue().lastModifiedTime() < TimeUnit.SECONDS .toMillis(inactiveTimeLimitInSeconds)) && !ImplUtils.isNullOrEmpty(entry.getValue().ownerId()); }).collect(Collectors.toMap(Entry::getKey, Entry::getValue)); } private void claimOwnership(final Map<String, PartitionOwnership> partitionOwnershipMap, final String partitionIdToClaim) { logger.info("Attempting to claim ownership of partition {}", partitionIdToClaim); PartitionOwnership ownershipRequest = createPartitionOwnershipRequest(partitionOwnershipMap, partitionIdToClaim); partitionManager .claimOwnership(ownershipRequest) .timeout(Duration.ofSeconds(1)) .doOnNext(partitionOwnership -> logger.info("Successfully claimed ownership of partition {}", partitionOwnership.partitionId())) .doOnError(ex -> logger .warning("Failed to claim ownership of partition {} - {}", ownershipRequest.partitionId(), ex.getMessage(), ex)) .subscribe(partitionPumpManager::startPartitionPump); } private PartitionOwnership createPartitionOwnershipRequest( final Map<String, PartitionOwnership> partitionOwnershipMap, final String partitionIdToClaim) { PartitionOwnership previousPartitionOwnership = partitionOwnershipMap.get(partitionIdToClaim); PartitionOwnership partitionOwnershipRequest = new PartitionOwnership() .ownerId(this.ownerId) .partitionId(partitionIdToClaim) .consumerGroupName(this.consumerGroupName) .eventHubName(this.eventHubName) .sequenceNumber(previousPartitionOwnership == null ? null : previousPartitionOwnership.sequenceNumber()) .offset(previousPartitionOwnership == null ? null : previousPartitionOwnership.offset()) .eTag(previousPartitionOwnership == null ? null : previousPartitionOwnership.eTag()) .ownerLevel(0L); return partitionOwnershipRequest; } }
```suggestion "'connectionString' contains an Event Hub name [%s]. Please use the" ```
public EventHubClientBuilder connectionString(String connectionString, String eventHubName) { if (ImplUtils.isNullOrEmpty(eventHubName)) { throw new IllegalArgumentException("'eventHubName' cannot be null or empty"); } final ConnectionStringProperties properties = new ConnectionStringProperties(connectionString); final TokenCredential tokenCredential; try { tokenCredential = new EventHubSharedAccessKeyCredential(properties.sharedAccessKeyName(), properties.sharedAccessKey(), ClientConstants.TOKEN_VALIDITY); } catch (InvalidKeyException | NoSuchAlgorithmException e) { throw new AzureException("Could not create the EventHubSharedAccessKeyCredential.", e); } if (!ImplUtils.isNullOrEmpty(properties.eventHubName())) { throw new IllegalArgumentException(String.format(Locale.US, "'connectionString' contains an Event Hub path [%s]. Please use the" + " credentials(String connectionString) overload. Or supply a 'connectionString' without" + " 'EntityPath' in it.", properties.eventHubName())); } return credential(properties.endpoint().getHost(), eventHubName, tokenCredential); }
"'connectionString' contains an Event Hub path [%s]. Please use the"
public EventHubClientBuilder connectionString(String connectionString, String eventHubName) { if (ImplUtils.isNullOrEmpty(eventHubName)) { throw new IllegalArgumentException("'eventHubName' cannot be null or empty"); } final ConnectionStringProperties properties = new ConnectionStringProperties(connectionString); final TokenCredential tokenCredential; try { tokenCredential = new EventHubSharedAccessKeyCredential(properties.sharedAccessKeyName(), properties.sharedAccessKey(), ClientConstants.TOKEN_VALIDITY); } catch (InvalidKeyException | NoSuchAlgorithmException e) { throw new AzureException("Could not create the EventHubSharedAccessKeyCredential.", e); } if (!ImplUtils.isNullOrEmpty(properties.eventHubName())) { throw new IllegalArgumentException(String.format(Locale.US, "'connectionString' contains an Event Hub name [%s]. Please use the" + " credentials(String connectionString) overload. Or supply a 'connectionString' without" + " 'EntityPath' in it.", properties.eventHubName())); } return credential(properties.endpoint().getHost(), eventHubName, tokenCredential); }
class EventHubClientBuilder { private static final String AZURE_EVENT_HUBS_CONNECTION_STRING = "AZURE_EVENT_HUBS_CONNECTION_STRING"; private static final RetryOptions DEFAULT_RETRY = new RetryOptions() .tryTimeout(ClientConstants.OPERATION_TIMEOUT); private TokenCredential credentials; private Configuration configuration; private ProxyConfiguration proxyConfiguration; private RetryOptions retryOptions; private Scheduler scheduler; private TransportType transport; private String host; private String eventHubName; private EventPosition initialEventPosition; private PartitionProcessorFactory partitionProcessorFactory; private String consumerGroupName; private PartitionManager partitionManager; /** * Creates a new instance with the default transport {@link TransportType */ public EventHubClientBuilder() { transport = TransportType.AMQP; } /** * Sets the credential information given a connection string to the Event Hub instance. * * <p> * If the connection string is copied from the Event Hubs namespace, it will likely not contain the path to the * desired Event Hub, which is needed. In this case, the path can be added manually by adding {@literal * "EntityPath=EVENT_HUB_NAME"} to the end of the connection string. For example, "EntityPath=telemetry-hub". * </p> * * <p> * If you have defined a shared access policy directly on the Event Hub itself, then copying the connection string * from that Event Hub will result in a connection string that contains the path. * </p> * * @param connectionString The connection string to use for connecting to the Event Hub instance. It is * expected that the Event Hub path and the shared access key properties are contained in this connection * string. * @return The updated {@link EventHubClientBuilder} object. * @throws IllegalArgumentException if {@code connectionString} is null or empty. Or, the {@code * connectionString} does not contain the "EntityPath" key, which is the name of the Event Hub instance. * @throws AzureException If the shared access signature token credential could not be created using the * connection string. */ public EventHubClientBuilder connectionString(String connectionString) { final ConnectionStringProperties properties = new ConnectionStringProperties(connectionString); final TokenCredential tokenCredential; try { tokenCredential = new EventHubSharedAccessKeyCredential(properties.sharedAccessKeyName(), properties.sharedAccessKey(), ClientConstants.TOKEN_VALIDITY); } catch (InvalidKeyException | NoSuchAlgorithmException e) { throw new AzureException("Could not create the EventHubSharedAccessKeyCredential.", e); } return credential(properties.endpoint().getHost(), properties.eventHubName(), tokenCredential); } /** * Sets the credential information given a connection string to the Event Hubs namespace and a path to a specific * Event Hub instance. * * @param connectionString The connection string to use for connecting to the Event Hubs namespace; it is * expected that the shared access key properties are contained in this connection string, but not the Event * Hub name. * @param eventHubName The name of the Event Hub to connect the client to. * @return The updated {@link EventHubClientBuilder} object. * @throws IllegalArgumentException if {@code connectionString} or {@code eventHubName} is null or empty. * Or, if the {@code connectionString} contains the Event Hub path. * @throws AzureException If the shared access signature token credential could not be created using the * connection string. */ /** * Sets the configuration store that is used during construction of the service client. * * If not specified, the default configuration store is used to configure the {@link EventHubAsyncClient}. Use * {@link Configuration * * @param configuration The configuration store used to configure the {@link EventHubAsyncClient}. * @return The updated {@link EventHubClientBuilder} object. */ public EventHubClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the credential information for which Event Hub instance to connect to, and how to authorize against it. * * @param host The fully qualified host name for the Event Hubs namespace. This is likely to be similar to * {@literal "{your-namespace}.servicebus.windows.net}". * @param eventHubName The name of the Event Hub to connect the client to. * @param credential The token credential to use for authorization. Access controls may be specified by the * Event Hubs namespace or the requested Event Hub, depending on Azure configuration. * @return The updated {@link EventHubClientBuilder} object. * @throws IllegalArgumentException if {@code host} or {@code eventHubName} is null or empty. * @throws NullPointerException if {@code credentials} is null. */ public EventHubClientBuilder credential(String host, String eventHubName, TokenCredential credential) { if (ImplUtils.isNullOrEmpty(host)) { throw new IllegalArgumentException("'host' cannot be null or empty"); } if (ImplUtils.isNullOrEmpty(eventHubName)) { throw new IllegalArgumentException("'eventHubName' cannot be null or empty."); } Objects.requireNonNull(credential); this.host = host; this.credentials = credential; this.eventHubName = eventHubName; return this; } /** * Sets the proxy configuration to use for {@link EventHubAsyncClient}. When a proxy is configured, {@link * TransportType * * @param proxyConfiguration The proxy configuration to use. * @return The updated {@link EventHubClientBuilder} object. */ public EventHubClientBuilder proxyConfiguration(ProxyConfiguration proxyConfiguration) { this.proxyConfiguration = proxyConfiguration; return this; } /** * Sets the scheduler for operations such as connecting to and receiving or sending data to Event Hubs. If none is * specified, an elastic pool is used. * * @param scheduler The scheduler for operations such as connecting to and receiving or sending data to * Event Hubs. * @return The updated {@link EventHubClientBuilder} object. */ public EventHubClientBuilder scheduler(Scheduler scheduler) { this.scheduler = scheduler; return this; } /** * Sets the transport type by which all the communication with Azure Event Hubs occurs. Default value is {@link * TransportType * * @param transport The transport type to use. * @return The updated {@link EventHubClientBuilder} object. */ public EventHubClientBuilder transportType(TransportType transport) { this.transport = transport; return this; } /** * Sets the retry policy for {@link EventHubAsyncClient}. If not specified, the default retry options are used. * * @param retryOptions The retry policy to use. * @return The updated {@link EventHubClientBuilder} object. */ public EventHubClientBuilder retry(RetryOptions retryOptions) { this.retryOptions = retryOptions; return this; } /** * Creates a new {@link EventHubAsyncClient} based on options set on this builder. Every time {@code * buildAsyncClient()} is invoked, a new instance of {@link EventHubAsyncClient} is created. * * <p> * The following options are used if ones are not specified in the builder: * * <ul> * <li>If no configuration is specified, the {@link ConfigurationManager * is used to provide any shared configuration values. The configuration values read are the {@link * BaseConfigurations * ProxyConfiguration * <li>If no retry is specified, the default retry options are used.</li> * <li>If no proxy is specified, the builder checks the {@link ConfigurationManager * configuration} for a configured proxy, then it checks to see if a system proxy is configured.</li> * <li>If no timeout is specified, a {@link ClientConstants * <li>If no scheduler is specified, an {@link Schedulers * </ul> * * @return A new {@link EventHubAsyncClient} instance with all the configured options. * @throws IllegalArgumentException if the credentials have not been set using either {@link * * specified but the transport type is not {@link TransportType */ public EventHubAsyncClient buildAsyncClient() { configuration = configuration == null ? ConfigurationManager.getConfiguration().clone() : configuration; if (credentials == null) { final String connectionString = configuration.get(AZURE_EVENT_HUBS_CONNECTION_STRING); if (ImplUtils.isNullOrEmpty(connectionString)) { throw new IllegalArgumentException("Credentials have not been set using 'EventHubClientBuilder.credentials(String)'" + "EventHubClientBuilder.credentials(String, String, TokenCredential). And the connection string is" + "not set in the '" + AZURE_EVENT_HUBS_CONNECTION_STRING + "' environment variable."); } connectionString(connectionString); } if (retryOptions == null) { retryOptions = DEFAULT_RETRY; } if (proxyConfiguration != null && proxyConfiguration.isProxyAddressConfigured() && transport != TransportType.AMQP_WEB_SOCKETS) { throw new IllegalArgumentException("Cannot use a proxy when TransportType is not AMQP."); } if (proxyConfiguration == null) { proxyConfiguration = getDefaultProxyConfiguration(configuration); } if (scheduler == null) { scheduler = Schedulers.elastic(); } final ReactorProvider provider = new ReactorProvider(); final ReactorHandlerProvider handlerProvider = new ReactorHandlerProvider(provider); final CBSAuthorizationType authorizationType = credentials instanceof EventHubSharedAccessKeyCredential ? CBSAuthorizationType.SHARED_ACCESS_SIGNATURE : CBSAuthorizationType.JSON_WEB_TOKEN; final ConnectionOptions parameters = new ConnectionOptions(host, eventHubName, credentials, authorizationType, transport, retryOptions, proxyConfiguration, scheduler); return new EventHubAsyncClient(parameters, provider, handlerProvider); } private ProxyConfiguration getDefaultProxyConfiguration(Configuration configuration) { ProxyAuthenticationType authentication = ProxyAuthenticationType.NONE; if (proxyConfiguration != null) { authentication = proxyConfiguration.authentication(); } String proxyAddress = configuration.get(BaseConfigurations.HTTP_PROXY); if (ImplUtils.isNullOrEmpty(proxyAddress)) { return ProxyConfiguration.SYSTEM_DEFAULTS; } final String[] hostPort = proxyAddress.split(":"); if (hostPort.length < 2) { throw new IllegalArgumentException("HTTP_PROXY cannot be parsed into a proxy"); } final String host = hostPort[0]; final int port = Integer.parseInt(hostPort[1]); final Proxy proxy = new Proxy(Proxy.Type.HTTP, new InetSocketAddress(host, port)); final String username = configuration.get(ProxyConfiguration.PROXY_USERNAME); final String password = configuration.get(ProxyConfiguration.PROXY_PASSWORD); return new ProxyConfiguration(authentication, proxy, username, password); } /** * This property must be set for building an {@link EventProcessorAsyncClient}. * * Sets the consumer group name from which the {@link EventProcessorAsyncClient} should consume events from. * * @param consumerGroupName The consumer group name this {@link EventProcessorAsyncClient} should consume events * from. * @return The updated {@link EventHubClientBuilder} object. */ public EventHubClientBuilder consumerGroupName(String consumerGroupName) { this.consumerGroupName = consumerGroupName; return this; } /** * This property can be optionally set when building an {@link EventProcessorAsyncClient}. * * Sets the initial event position. If this property is not set and if checkpoint for a partition doesn't exist, * {@link EventPosition * * @param initialEventPosition The initial event position. * @return The updated {@link EventHubClientBuilder} object. */ public EventHubClientBuilder initialEventPosition(EventPosition initialEventPosition) { this.initialEventPosition = initialEventPosition; return this; } /** * This property must be set when building an {@link EventProcessorAsyncClient}. * * Sets the {@link PartitionManager} the {@link EventProcessorAsyncClient} will use for storing partition * ownership and checkpoint information. * * @param partitionManager Implementation of {@link PartitionManager}. * @return The updated {@link EventHubClientBuilder} object. */ public EventHubClientBuilder partitionManager(PartitionManager partitionManager) { this.partitionManager = partitionManager; return this; } /** * This property must be set when building an {@link EventProcessorAsyncClient}. * * Sets the partition processor factory for creating new instance(s) of {@link PartitionProcessor}. * * @param partitionProcessorFactory The factory that creates new processor for each partition. * @return The updated {@link EventHubClientBuilder} object. */ public EventHubClientBuilder partitionProcessorFactory(PartitionProcessorFactory partitionProcessorFactory) { this.partitionProcessorFactory = partitionProcessorFactory; return this; } /** * This will create a new {@link EventProcessorAsyncClient} configured with the options set in this builder. Each call * to this method will return a new instance of {@link EventProcessorAsyncClient}. * * <p> * A new instance of {@link EventHubAsyncClient} will be created with configured options by calling the {@link * * </p> * * <p> * If the {@link * this {@link EventProcessorAsyncClient} will start processing from {@link EventPosition * available event in the respective partitions. * </p> * * @return A new instance of {@link EventProcessorAsyncClient}. */ public EventProcessorAsyncClient buildEventProcessorAsyncClient() { EventPosition initialEventPosition = this.initialEventPosition == null ? EventPosition.earliest() : this.initialEventPosition; return new EventProcessorAsyncClient(buildAsyncClient(), this.consumerGroupName, this.partitionProcessorFactory, initialEventPosition, partitionManager, eventHubName); } }
class EventHubClientBuilder { private static final String AZURE_EVENT_HUBS_CONNECTION_STRING = "AZURE_EVENT_HUBS_CONNECTION_STRING"; private static final RetryOptions DEFAULT_RETRY = new RetryOptions() .tryTimeout(ClientConstants.OPERATION_TIMEOUT); private TokenCredential credentials; private Configuration configuration; private ProxyConfiguration proxyConfiguration; private RetryOptions retryOptions; private Scheduler scheduler; private TransportType transport; private String host; private String eventHubName; private EventPosition initialEventPosition; private PartitionProcessorFactory partitionProcessorFactory; private String consumerGroupName; private PartitionManager partitionManager; /** * Creates a new instance with the default transport {@link TransportType */ public EventHubClientBuilder() { transport = TransportType.AMQP; } /** * Sets the credential information given a connection string to the Event Hub instance. * * <p> * If the connection string is copied from the Event Hubs namespace, it will likely not contain the name to the * desired Event Hub, which is needed. In this case, the name can be added manually by adding {@literal * "EntityPath=EVENT_HUB_NAME"} to the end of the connection string. For example, "EntityPath=telemetry-hub". * </p> * * <p> * If you have defined a shared access policy directly on the Event Hub itself, then copying the connection string * from that Event Hub will result in a connection string that contains the name. * </p> * * @param connectionString The connection string to use for connecting to the Event Hub instance. It is * expected that the Event Hub name and the shared access key properties are contained in this connection * string. * @return The updated {@link EventHubClientBuilder} object. * @throws IllegalArgumentException if {@code connectionString} is null or empty. Or, the {@code * connectionString} does not contain the "EntityPath" key, which is the name of the Event Hub instance. * @throws AzureException If the shared access signature token credential could not be created using the * connection string. */ public EventHubClientBuilder connectionString(String connectionString) { final ConnectionStringProperties properties = new ConnectionStringProperties(connectionString); final TokenCredential tokenCredential; try { tokenCredential = new EventHubSharedAccessKeyCredential(properties.sharedAccessKeyName(), properties.sharedAccessKey(), ClientConstants.TOKEN_VALIDITY); } catch (InvalidKeyException | NoSuchAlgorithmException e) { throw new AzureException("Could not create the EventHubSharedAccessKeyCredential.", e); } return credential(properties.endpoint().getHost(), properties.eventHubName(), tokenCredential); } /** * Sets the credential information given a connection string to the Event Hubs namespace and name to a specific * Event Hub instance. * * @param connectionString The connection string to use for connecting to the Event Hubs namespace; it is * expected that the shared access key properties are contained in this connection string, but not the Event * Hub name. * @param eventHubName The name of the Event Hub to connect the client to. * @return The updated {@link EventHubClientBuilder} object. * @throws IllegalArgumentException if {@code connectionString} or {@code eventHubName} is null or empty. * Or, if the {@code connectionString} contains the Event Hub name. * @throws AzureException If the shared access signature token credential could not be created using the * connection string. */ /** * Sets the configuration store that is used during construction of the service client. * * If not specified, the default configuration store is used to configure the {@link EventHubAsyncClient}. Use * {@link Configuration * * @param configuration The configuration store used to configure the {@link EventHubAsyncClient}. * @return The updated {@link EventHubClientBuilder} object. */ public EventHubClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the credential information for which Event Hub instance to connect to, and how to authorize against it. * * @param host The fully qualified host name for the Event Hubs namespace. This is likely to be similar to * {@literal "{your-namespace}.servicebus.windows.net}". * @param eventHubName The name of the Event Hub to connect the client to. * @param credential The token credential to use for authorization. Access controls may be specified by the * Event Hubs namespace or the requested Event Hub, depending on Azure configuration. * @return The updated {@link EventHubClientBuilder} object. * @throws IllegalArgumentException if {@code host} or {@code eventHubName} is null or empty. * @throws NullPointerException if {@code credentials} is null. */ public EventHubClientBuilder credential(String host, String eventHubName, TokenCredential credential) { if (ImplUtils.isNullOrEmpty(host)) { throw new IllegalArgumentException("'host' cannot be null or empty"); } if (ImplUtils.isNullOrEmpty(eventHubName)) { throw new IllegalArgumentException("'eventHubName' cannot be null or empty."); } Objects.requireNonNull(credential); this.host = host; this.credentials = credential; this.eventHubName = eventHubName; return this; } /** * Sets the proxy configuration to use for {@link EventHubAsyncClient}. When a proxy is configured, {@link * TransportType * * @param proxyConfiguration The proxy configuration to use. * @return The updated {@link EventHubClientBuilder} object. */ public EventHubClientBuilder proxyConfiguration(ProxyConfiguration proxyConfiguration) { this.proxyConfiguration = proxyConfiguration; return this; } /** * Sets the scheduler for operations such as connecting to and receiving or sending data to Event Hubs. If none is * specified, an elastic pool is used. * * @param scheduler The scheduler for operations such as connecting to and receiving or sending data to * Event Hubs. * @return The updated {@link EventHubClientBuilder} object. */ public EventHubClientBuilder scheduler(Scheduler scheduler) { this.scheduler = scheduler; return this; } /** * Sets the transport type by which all the communication with Azure Event Hubs occurs. Default value is {@link * TransportType * * @param transport The transport type to use. * @return The updated {@link EventHubClientBuilder} object. */ public EventHubClientBuilder transportType(TransportType transport) { this.transport = transport; return this; } /** * Sets the retry policy for {@link EventHubAsyncClient}. If not specified, the default retry options are used. * * @param retryOptions The retry policy to use. * @return The updated {@link EventHubClientBuilder} object. */ public EventHubClientBuilder retry(RetryOptions retryOptions) { this.retryOptions = retryOptions; return this; } /** * Creates a new {@link EventHubAsyncClient} based on options set on this builder. Every time {@code * buildAsyncClient()} is invoked, a new instance of {@link EventHubAsyncClient} is created. * * <p> * The following options are used if ones are not specified in the builder: * * <ul> * <li>If no configuration is specified, the {@link ConfigurationManager * is used to provide any shared configuration values. The configuration values read are the {@link * BaseConfigurations * ProxyConfiguration * <li>If no retry is specified, the default retry options are used.</li> * <li>If no proxy is specified, the builder checks the {@link ConfigurationManager * configuration} for a configured proxy, then it checks to see if a system proxy is configured.</li> * <li>If no timeout is specified, a {@link ClientConstants * <li>If no scheduler is specified, an {@link Schedulers * </ul> * * @return A new {@link EventHubAsyncClient} instance with all the configured options. * @throws IllegalArgumentException if the credentials have not been set using either {@link * * specified but the transport type is not {@link TransportType */ public EventHubAsyncClient buildAsyncClient() { configuration = configuration == null ? ConfigurationManager.getConfiguration().clone() : configuration; if (credentials == null) { final String connectionString = configuration.get(AZURE_EVENT_HUBS_CONNECTION_STRING); if (ImplUtils.isNullOrEmpty(connectionString)) { throw new IllegalArgumentException("Credentials have not been set using 'EventHubClientBuilder.credentials(String)'" + "EventHubClientBuilder.credentials(String, String, TokenCredential). And the connection string is" + "not set in the '" + AZURE_EVENT_HUBS_CONNECTION_STRING + "' environment variable."); } connectionString(connectionString); } if (retryOptions == null) { retryOptions = DEFAULT_RETRY; } if (proxyConfiguration != null && proxyConfiguration.isProxyAddressConfigured() && transport != TransportType.AMQP_WEB_SOCKETS) { throw new IllegalArgumentException("Cannot use a proxy when TransportType is not AMQP."); } if (proxyConfiguration == null) { proxyConfiguration = getDefaultProxyConfiguration(configuration); } if (scheduler == null) { scheduler = Schedulers.elastic(); } final ReactorProvider provider = new ReactorProvider(); final ReactorHandlerProvider handlerProvider = new ReactorHandlerProvider(provider); final CBSAuthorizationType authorizationType = credentials instanceof EventHubSharedAccessKeyCredential ? CBSAuthorizationType.SHARED_ACCESS_SIGNATURE : CBSAuthorizationType.JSON_WEB_TOKEN; final ConnectionOptions parameters = new ConnectionOptions(host, eventHubName, credentials, authorizationType, transport, retryOptions, proxyConfiguration, scheduler); return new EventHubAsyncClient(parameters, provider, handlerProvider); } private ProxyConfiguration getDefaultProxyConfiguration(Configuration configuration) { ProxyAuthenticationType authentication = ProxyAuthenticationType.NONE; if (proxyConfiguration != null) { authentication = proxyConfiguration.authentication(); } String proxyAddress = configuration.get(BaseConfigurations.HTTP_PROXY); if (ImplUtils.isNullOrEmpty(proxyAddress)) { return ProxyConfiguration.SYSTEM_DEFAULTS; } final String[] hostPort = proxyAddress.split(":"); if (hostPort.length < 2) { throw new IllegalArgumentException("HTTP_PROXY cannot be parsed into a proxy"); } final String host = hostPort[0]; final int port = Integer.parseInt(hostPort[1]); final Proxy proxy = new Proxy(Proxy.Type.HTTP, new InetSocketAddress(host, port)); final String username = configuration.get(ProxyConfiguration.PROXY_USERNAME); final String password = configuration.get(ProxyConfiguration.PROXY_PASSWORD); return new ProxyConfiguration(authentication, proxy, username, password); } /** * This property must be set for building an {@link EventProcessorAsyncClient}. * * Sets the consumer group name from which the {@link EventProcessorAsyncClient} should consume events from. * * @param consumerGroupName The consumer group name this {@link EventProcessorAsyncClient} should consume events * from. * @return The updated {@link EventHubClientBuilder} object. */ public EventHubClientBuilder consumerGroupName(String consumerGroupName) { this.consumerGroupName = consumerGroupName; return this; } /** * This property can be optionally set when building an {@link EventProcessorAsyncClient}. * * Sets the initial event position. If this property is not set and if checkpoint for a partition doesn't exist, * {@link EventPosition * * @param initialEventPosition The initial event position. * @return The updated {@link EventHubClientBuilder} object. */ public EventHubClientBuilder initialEventPosition(EventPosition initialEventPosition) { this.initialEventPosition = initialEventPosition; return this; } /** * This property must be set when building an {@link EventProcessorAsyncClient}. * * Sets the {@link PartitionManager} the {@link EventProcessorAsyncClient} will use for storing partition * ownership and checkpoint information. * * @param partitionManager Implementation of {@link PartitionManager}. * @return The updated {@link EventHubClientBuilder} object. */ public EventHubClientBuilder partitionManager(PartitionManager partitionManager) { this.partitionManager = partitionManager; return this; } /** * This property must be set when building an {@link EventProcessorAsyncClient}. * * Sets the partition processor factory for creating new instance(s) of {@link PartitionProcessor}. * * @param partitionProcessorFactory The factory that creates new processor for each partition. * @return The updated {@link EventHubClientBuilder} object. */ public EventHubClientBuilder partitionProcessorFactory(PartitionProcessorFactory partitionProcessorFactory) { this.partitionProcessorFactory = partitionProcessorFactory; return this; } /** * This will create a new {@link EventProcessorAsyncClient} configured with the options set in this builder. Each call * to this method will return a new instance of {@link EventProcessorAsyncClient}. * * <p> * A new instance of {@link EventHubAsyncClient} will be created with configured options by calling the {@link * * </p> * * <p> * If the {@link * this {@link EventProcessorAsyncClient} will start processing from {@link EventPosition * available event in the respective partitions. * </p> * * @return A new instance of {@link EventProcessorAsyncClient}. */ public EventProcessorAsyncClient buildEventProcessorAsyncClient() { EventPosition initialEventPosition = this.initialEventPosition == null ? EventPosition.earliest() : this.initialEventPosition; return new EventProcessorAsyncClient(buildAsyncClient(), this.consumerGroupName, this.partitionProcessorFactory, initialEventPosition, partitionManager, eventHubName); } }
Do you know why the context.httpRequest already contained this header? I'm not sure if we're covering up an underlying issue (ie. not resetting the HTTP request context for each HTTP request)
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { String header = context.httpRequest().headers().value("User-Agent"); if (header == null || header.startsWith(DEFAULT_USER_AGENT_HEADER)) { header = userAgent; } else { header = userAgent + " " + header; } context.httpRequest().headers().put("User-Agent", header); return next.process(); }
if (header == null || header.startsWith(DEFAULT_USER_AGENT_HEADER)) {
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { String header = context.httpRequest().headers().value("User-Agent"); if (header == null || header.startsWith(DEFAULT_USER_AGENT_HEADER)) { header = userAgent; } else { header = userAgent + " " + header; } context.httpRequest().headers().put("User-Agent", header); return next.process(); }
class UserAgentPolicy implements HttpPipelinePolicy { private static final String DEFAULT_USER_AGENT_HEADER = "azsdk-java"; private static final String USER_AGENT_FORMAT = DEFAULT_USER_AGENT_HEADER + "-%s/%s %s"; private static final String DISABLED_TELEMETRY_USER_AGENT_FORMAT = DEFAULT_USER_AGENT_HEADER + "-%s/%s"; private static final String PLATFORM_INFO_FORMAT = "%s; %s %s"; private final String userAgent; /** * Creates a {@link UserAgentPolicy} with a default user agent string. */ public UserAgentPolicy() { this(null); } /** * Creates a UserAgentPolicy with {@code userAgent} as the header value. If {@code userAgent} is {@code null}, then * the default user agent value is used. * * @param userAgent The user agent string to add to request headers. */ public UserAgentPolicy(String userAgent) { if (userAgent != null) { this.userAgent = userAgent; } else { this.userAgent = DEFAULT_USER_AGENT_HEADER; } } /** * Creates a UserAgentPolicy with the {@code sdkName} and {@code sdkVersion} in the User-Agent header value. * * If the passed configuration contains true for AZURE_TELEMETRY_DISABLED the platform information won't be included * in the user agent. * * @param sdkName Name of the client library. * @param sdkVersion Version of the client library. * @param configuration Configuration store that will be checked for the AZURE_TELEMETRY_DISABLED. */ public UserAgentPolicy(String sdkName, String sdkVersion, Configuration configuration) { boolean telemetryDisabled = configuration.get(BaseConfigurations.AZURE_TELEMETRY_DISABLED, false); if (telemetryDisabled) { this.userAgent = String.format(DISABLED_TELEMETRY_USER_AGENT_FORMAT, sdkName, sdkVersion); } else { this.userAgent = String.format(USER_AGENT_FORMAT, sdkName, sdkVersion, getPlatformInfo()); } } /** * Updates the User-Agent header with the value supplied in the policy. * * When the User-Agent header already has a value and it differs from the value used to create this policy the * User-Agent header is updated by prepending the value in this policy. * {@inheritDoc} */ @Override private static String getPlatformInfo() { String javaVersion = ConfigurationManager.getConfiguration().get("java.version"); String osName = ConfigurationManager.getConfiguration().get("os.name"); String osVersion = ConfigurationManager.getConfiguration().get("os.version"); return String.format(PLATFORM_INFO_FORMAT, javaVersion, osName, osVersion); } }
class UserAgentPolicy implements HttpPipelinePolicy { private static final String DEFAULT_USER_AGENT_HEADER = "azsdk-java"; private static final String USER_AGENT_FORMAT = DEFAULT_USER_AGENT_HEADER + "-%s/%s %s"; private static final String DISABLED_TELEMETRY_USER_AGENT_FORMAT = DEFAULT_USER_AGENT_HEADER + "-%s/%s"; private static final String PLATFORM_INFO_FORMAT = "%s; %s %s"; private final String userAgent; /** * Creates a {@link UserAgentPolicy} with a default user agent string. */ public UserAgentPolicy() { this(null); } /** * Creates a UserAgentPolicy with {@code userAgent} as the header value. If {@code userAgent} is {@code null}, then * the default user agent value is used. * * @param userAgent The user agent string to add to request headers. */ public UserAgentPolicy(String userAgent) { if (userAgent != null) { this.userAgent = userAgent; } else { this.userAgent = DEFAULT_USER_AGENT_HEADER; } } /** * Creates a UserAgentPolicy with the {@code sdkName} and {@code sdkVersion} in the User-Agent header value. * * If the passed configuration contains true for AZURE_TELEMETRY_DISABLED the platform information won't be included * in the user agent. * * @param sdkName Name of the client library. * @param sdkVersion Version of the client library. * @param configuration Configuration store that will be checked for the AZURE_TELEMETRY_DISABLED. */ public UserAgentPolicy(String sdkName, String sdkVersion, Configuration configuration) { boolean telemetryDisabled = configuration.get(BaseConfigurations.AZURE_TELEMETRY_DISABLED, false); if (telemetryDisabled) { this.userAgent = String.format(DISABLED_TELEMETRY_USER_AGENT_FORMAT, sdkName, sdkVersion); } else { this.userAgent = String.format(USER_AGENT_FORMAT, sdkName, sdkVersion, getPlatformInfo()); } } /** * Updates the User-Agent header with the value supplied in the policy. * * When the User-Agent header already has a value and it differs from the value used to create this policy the * User-Agent header is updated by prepending the value in this policy. * {@inheritDoc} */ @Override private static String getPlatformInfo() { String javaVersion = ConfigurationManager.getConfiguration().get("java.version"); String osName = ConfigurationManager.getConfiguration().get("os.name"); String osVersion = ConfigurationManager.getConfiguration().get("os.version"); return String.format(PLATFORM_INFO_FORMAT, javaVersion, osName, osVersion); } }
The header information was being maintained due to the call being captured as a Mono value, every time the Mono was mapped or filtered it would re-run it again and the context was being maintained. Outside of blowing everything away on a successful completion, which just feels risky to do, I feel that making the check for adding the header stricter is safer. I had the thought of removing this if/else statement or potential making a way to indicate if the UserAgent header being added is one generated in our builder vs a customers UserAgent header.
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { String header = context.httpRequest().headers().value("User-Agent"); if (header == null || header.startsWith(DEFAULT_USER_AGENT_HEADER)) { header = userAgent; } else { header = userAgent + " " + header; } context.httpRequest().headers().put("User-Agent", header); return next.process(); }
if (header == null || header.startsWith(DEFAULT_USER_AGENT_HEADER)) {
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { String header = context.httpRequest().headers().value("User-Agent"); if (header == null || header.startsWith(DEFAULT_USER_AGENT_HEADER)) { header = userAgent; } else { header = userAgent + " " + header; } context.httpRequest().headers().put("User-Agent", header); return next.process(); }
class UserAgentPolicy implements HttpPipelinePolicy { private static final String DEFAULT_USER_AGENT_HEADER = "azsdk-java"; private static final String USER_AGENT_FORMAT = DEFAULT_USER_AGENT_HEADER + "-%s/%s %s"; private static final String DISABLED_TELEMETRY_USER_AGENT_FORMAT = DEFAULT_USER_AGENT_HEADER + "-%s/%s"; private static final String PLATFORM_INFO_FORMAT = "%s; %s %s"; private final String userAgent; /** * Creates a {@link UserAgentPolicy} with a default user agent string. */ public UserAgentPolicy() { this(null); } /** * Creates a UserAgentPolicy with {@code userAgent} as the header value. If {@code userAgent} is {@code null}, then * the default user agent value is used. * * @param userAgent The user agent string to add to request headers. */ public UserAgentPolicy(String userAgent) { if (userAgent != null) { this.userAgent = userAgent; } else { this.userAgent = DEFAULT_USER_AGENT_HEADER; } } /** * Creates a UserAgentPolicy with the {@code sdkName} and {@code sdkVersion} in the User-Agent header value. * * If the passed configuration contains true for AZURE_TELEMETRY_DISABLED the platform information won't be included * in the user agent. * * @param sdkName Name of the client library. * @param sdkVersion Version of the client library. * @param configuration Configuration store that will be checked for the AZURE_TELEMETRY_DISABLED. */ public UserAgentPolicy(String sdkName, String sdkVersion, Configuration configuration) { boolean telemetryDisabled = configuration.get(BaseConfigurations.AZURE_TELEMETRY_DISABLED, false); if (telemetryDisabled) { this.userAgent = String.format(DISABLED_TELEMETRY_USER_AGENT_FORMAT, sdkName, sdkVersion); } else { this.userAgent = String.format(USER_AGENT_FORMAT, sdkName, sdkVersion, getPlatformInfo()); } } /** * Updates the User-Agent header with the value supplied in the policy. * * When the User-Agent header already has a value and it differs from the value used to create this policy the * User-Agent header is updated by prepending the value in this policy. * {@inheritDoc} */ @Override private static String getPlatformInfo() { String javaVersion = ConfigurationManager.getConfiguration().get("java.version"); String osName = ConfigurationManager.getConfiguration().get("os.name"); String osVersion = ConfigurationManager.getConfiguration().get("os.version"); return String.format(PLATFORM_INFO_FORMAT, javaVersion, osName, osVersion); } }
class UserAgentPolicy implements HttpPipelinePolicy { private static final String DEFAULT_USER_AGENT_HEADER = "azsdk-java"; private static final String USER_AGENT_FORMAT = DEFAULT_USER_AGENT_HEADER + "-%s/%s %s"; private static final String DISABLED_TELEMETRY_USER_AGENT_FORMAT = DEFAULT_USER_AGENT_HEADER + "-%s/%s"; private static final String PLATFORM_INFO_FORMAT = "%s; %s %s"; private final String userAgent; /** * Creates a {@link UserAgentPolicy} with a default user agent string. */ public UserAgentPolicy() { this(null); } /** * Creates a UserAgentPolicy with {@code userAgent} as the header value. If {@code userAgent} is {@code null}, then * the default user agent value is used. * * @param userAgent The user agent string to add to request headers. */ public UserAgentPolicy(String userAgent) { if (userAgent != null) { this.userAgent = userAgent; } else { this.userAgent = DEFAULT_USER_AGENT_HEADER; } } /** * Creates a UserAgentPolicy with the {@code sdkName} and {@code sdkVersion} in the User-Agent header value. * * If the passed configuration contains true for AZURE_TELEMETRY_DISABLED the platform information won't be included * in the user agent. * * @param sdkName Name of the client library. * @param sdkVersion Version of the client library. * @param configuration Configuration store that will be checked for the AZURE_TELEMETRY_DISABLED. */ public UserAgentPolicy(String sdkName, String sdkVersion, Configuration configuration) { boolean telemetryDisabled = configuration.get(BaseConfigurations.AZURE_TELEMETRY_DISABLED, false); if (telemetryDisabled) { this.userAgent = String.format(DISABLED_TELEMETRY_USER_AGENT_FORMAT, sdkName, sdkVersion); } else { this.userAgent = String.format(USER_AGENT_FORMAT, sdkName, sdkVersion, getPlatformInfo()); } } /** * Updates the User-Agent header with the value supplied in the policy. * * When the User-Agent header already has a value and it differs from the value used to create this policy the * User-Agent header is updated by prepending the value in this policy. * {@inheritDoc} */ @Override private static String getPlatformInfo() { String javaVersion = ConfigurationManager.getConfiguration().get("java.version"); String osName = ConfigurationManager.getConfiguration().get("os.name"); String osVersion = ConfigurationManager.getConfiguration().get("os.version"); return String.format(PLATFORM_INFO_FORMAT, javaVersion, osName, osVersion); } }
eventHubPath -> eventHubName
public EventProcessorAsyncClient createInstance() { String connectionString = "Endpoint={endpoint};SharedAccessKeyName={sharedAccessKeyName};" + "SharedAccessKey={sharedAccessKey};EntityPath={eventHubPath}"; EventProcessorAsyncClient eventProcessorAsyncClient = new EventHubClientBuilder() .connectionString(connectionString) .partitionProcessorFactory((PartitionProcessorImpl::new)) .consumerGroupName("consumer-group") .buildEventProcessorAsyncClient(); return eventProcessorAsyncClient; }
+ "SharedAccessKey={sharedAccessKey};EntityPath={eventHubPath}";
public EventProcessorAsyncClient createInstance() { String connectionString = "Endpoint={endpoint};SharedAccessKeyName={sharedAccessKeyName};" + "SharedAccessKey={sharedAccessKey};EntityPath={eventHubName}"; EventProcessorAsyncClient eventProcessorAsyncClient = new EventHubClientBuilder() .connectionString(connectionString) .partitionProcessorFactory((PartitionProcessorImpl::new)) .consumerGroupName("consumer-group") .buildEventProcessorAsyncClient(); return eventProcessorAsyncClient; }
class EventProcessorJavaDocCodeSamples { /** * Code snippet for showing how to create a new instance of {@link EventProcessorAsyncClient}. * * @return An instance of {@link EventProcessorAsyncClient}. */ /** * Code snippet for showing how to start and stop an {@link EventProcessorAsyncClient}. */ public void startStopSample() { EventProcessorAsyncClient eventProcessorAsyncClient = createInstance(); eventProcessorAsyncClient.start(); eventProcessorAsyncClient.stop(); } /** * No-op partition processor used in code snippet to demo creating an instance of {@link EventProcessorAsyncClient}. * This class will not be visible in the code snippet. */ private static class PartitionProcessorImpl implements PartitionProcessor { PartitionContext partitionContext; CheckpointManager checkpointManager; /** * Creates new instance. * * @param partitionContext The partition context for this partition processor. * @param checkpointManager The checkpoint manager for this partition processor. */ private PartitionProcessorImpl(PartitionContext partitionContext, CheckpointManager checkpointManager) { this.partitionContext = partitionContext; this.checkpointManager = checkpointManager; } /** * {@inheritDoc} * * @return a representation of deferred initialization. */ @Override public Mono<Void> initialize() { return Mono.empty(); } /** * {@inheritDoc} * * @return a representation of deferred processing of events. */ @Override public Mono<Void> processEvent(EventData eventData) { return Mono.empty(); } /** * {@inheritDoc} * * @param throwable The {@link Throwable} that caused this method to be called. */ @Override public void processError(Throwable throwable) { System.out.println("Error while processing events"); } /** * {@inheritDoc} * * @param closeReason {@link CloseReason} for closing this partition processor. * @return a representation of deferred closing of partition processor. */ @Override public Mono<Void> close(CloseReason closeReason) { return Mono.empty(); } } }
class EventProcessorJavaDocCodeSamples { /** * Code snippet for showing how to create a new instance of {@link EventProcessorAsyncClient}. * * @return An instance of {@link EventProcessorAsyncClient}. */ /** * Code snippet for showing how to start and stop an {@link EventProcessorAsyncClient}. */ public void startStopSample() { EventProcessorAsyncClient eventProcessorAsyncClient = createInstance(); eventProcessorAsyncClient.start(); eventProcessorAsyncClient.stop(); } /** * No-op partition processor used in code snippet to demo creating an instance of {@link EventProcessorAsyncClient}. * This class will not be visible in the code snippet. */ private static final class PartitionProcessorImpl implements PartitionProcessor { PartitionContext partitionContext; CheckpointManager checkpointManager; /** * Creates new instance. * * @param partitionContext The partition context for this partition processor. * @param checkpointManager The checkpoint manager for this partition processor. */ private PartitionProcessorImpl(PartitionContext partitionContext, CheckpointManager checkpointManager) { this.partitionContext = partitionContext; this.checkpointManager = checkpointManager; } /** * {@inheritDoc} * * @return a representation of deferred initialization. */ @Override public Mono<Void> initialize() { return Mono.empty(); } /** * {@inheritDoc} * * @return a representation of deferred processing of events. */ @Override public Mono<Void> processEvent(EventData eventData) { return Mono.empty(); } /** * {@inheritDoc} * * @param throwable The {@link Throwable} that caused this method to be called. */ @Override public void processError(Throwable throwable) { System.out.println("Error while processing events"); } /** * {@inheritDoc} * * @param closeReason {@link CloseReason} for closing this partition processor. * @return a representation of deferred closing of partition processor. */ @Override public Mono<Void> close(CloseReason closeReason) { return Mono.empty(); } } }
nit: inconsistent `final`. I don't see you re-setting this variable.
private void checkNamingPattern(DetailAST blockCommentToken) { if (!BlockCommentPosition.isOnMethod(blockCommentToken)) { return; } DetailNode javadocNode = null; try { javadocNode = DetailNodeTreeStringPrinter.parseJavadocAsDetailNode(blockCommentToken); } catch (IllegalArgumentException ex) { } if (javadocNode == null) { return; } for (DetailNode node : javadocNode.getChildren()) { if (node.getType() != JavadocTokenTypes.JAVADOC_INLINE_TAG) { continue; } DetailNode customNameNode = JavadocUtil.findFirstToken(node, JavadocTokenTypes.CUSTOM_NAME); if (customNameNode == null || !CODE_SNIPPET_ANNOTATION.equals(customNameNode.getText())) { return; } DetailNode descriptionNode = JavadocUtil.findFirstToken(node, JavadocTokenTypes.DESCRIPTION); if (descriptionNode == null) { log(node.getLineNumber(), MISSING_CODESNIPPET_TAG_MESSAGE); return; } String customDescription = JavadocUtil.findFirstToken(descriptionNode, JavadocTokenTypes.TEXT).getText(); DetailAST methodDefToken = methodDefStack.peek(); final String methodName = methodDefToken.findFirstToken(TokenTypes.IDENT).getText(); final String className = classNameStack.isEmpty() ? "" : classNameStack.peek(); final String parameters = constructParametersString(methodDefToken); String fullPath = packageName + "." + className + "." + methodName; if (parameters != null) { fullPath = fullPath + " } if (customDescription == null || customDescription.isEmpty() || !isNamingMatched(customDescription.toLowerCase(), fullPath.toLowerCase(), parameters)) { log(node.getLineNumber(), String.format("Naming pattern mismatch. The @codeSnippet description " + "''%s'' does not match ''%s''. Case Insensitive.", customDescription, fullPath)); } } }
DetailAST methodDefToken = methodDefStack.peek();
private void checkNamingPattern(DetailAST blockCommentToken) { if (!BlockCommentPosition.isOnMethod(blockCommentToken)) { return; } DetailNode javadocNode = null; try { javadocNode = DetailNodeTreeStringPrinter.parseJavadocAsDetailNode(blockCommentToken); } catch (IllegalArgumentException ex) { } if (javadocNode == null) { return; } for (DetailNode node : javadocNode.getChildren()) { if (node.getType() != JavadocTokenTypes.JAVADOC_INLINE_TAG) { continue; } DetailNode customNameNode = JavadocUtil.findFirstToken(node, JavadocTokenTypes.CUSTOM_NAME); if (customNameNode == null || !CODE_SNIPPET_ANNOTATION.equals(customNameNode.getText())) { return; } DetailNode descriptionNode = JavadocUtil.findFirstToken(node, JavadocTokenTypes.DESCRIPTION); if (descriptionNode == null) { log(node.getLineNumber(), MISSING_CODESNIPPET_TAG_MESSAGE); return; } String customDescription = JavadocUtil.findFirstToken(descriptionNode, JavadocTokenTypes.TEXT).getText(); final String methodName = methodDefToken.findFirstToken(TokenTypes.IDENT).getText(); final String className = classNameStack.isEmpty() ? "" : classNameStack.peek(); final String parameters = constructParametersString(methodDefToken); String fullPath = packageName + "." + className + "." + methodName; final String fullPathWithoutParameters = fullPath; if (parameters != null) { fullPath = fullPath + " } if (customDescription == null || customDescription.isEmpty() || !isNamingMatched(customDescription.toLowerCase(Locale.ROOT), fullPathWithoutParameters.toLowerCase(Locale.ROOT), parameters)) { log(node.getLineNumber(), String.format("Naming pattern mismatch. The @codesnippet description " + "''%s'' does not match ''%s''. Case Insensitive.", customDescription, fullPath)); } } }
class name when leave the same token private Deque<String> classNameStack = new ArrayDeque<>(); private Deque<DetailAST> methodDefStack = new ArrayDeque<>(); @Override public int[] getDefaultTokens() { return getRequiredTokens(); }
class name when leave the same token private Deque<String> classNameStack = new ArrayDeque<>(); private DetailAST methodDefToken = null; @Override public int[] getDefaultTokens() { return getRequiredTokens(); }
Consider using `AnnotationUtil.getAnnotation(...)`
private boolean hasImmutableAnnotation(DetailAST classDefToken) { final DetailAST modifiersToken = classDefToken.findFirstToken(TokenTypes.MODIFIERS); for (DetailAST ast = modifiersToken.getFirstChild(); ast != null; ast = ast.getNextSibling()) { if (ast.getType() == TokenTypes.ANNOTATION) { final DetailAST annotationIdent = ast.findFirstToken(TokenTypes.IDENT); return annotationIdent != null && IMMUTABLE_NOTATION.equals(annotationIdent.getText()); } } return false; }
for (DetailAST ast = modifiersToken.getFirstChild(); ast != null; ast = ast.getNextSibling()) {
private boolean hasImmutableAnnotation(DetailAST classDefToken) { DetailAST immutableAnnotation = AnnotationUtil.getAnnotation(classDefToken, IMMUTABLE_NOTATION); return immutableAnnotation != null; }
class is annotated with @Immutable, false otherwise. */
class is annotated with {@literal @Immutable}
This could be simplified using `TokenUtil.findFirstTokenByPredicate`. Similar with other instances.
private void checkForOnlyFinalFields(DetailAST objBlockToken) { for (DetailAST ast = objBlockToken.getFirstChild(); ast != null; ast = ast.getNextSibling()) { if (TokenTypes.VARIABLE_DEF == ast.getType()) { final DetailAST modifiersToken = ast.findFirstToken(TokenTypes.MODIFIERS); if (!modifiersToken.branchContains(TokenTypes.FINAL) && !Utils.hasIllegalCombination(modifiersToken)) { log(modifiersToken, String.format(ERROR_MSG, ast.findFirstToken(TokenTypes.IDENT).getText(), objBlockToken.getPreviousSibling().getText())); } } } }
for (DetailAST ast = objBlockToken.getFirstChild(); ast != null; ast = ast.getNextSibling()) {
private void checkForOnlyFinalFields(DetailAST objBlockToken) { Optional<DetailAST> nonFinalFieldFound = TokenUtil.findFirstTokenByPredicate(objBlockToken, node -> TokenTypes.VARIABLE_DEF == node.getType() && !node.branchContains(TokenTypes.FINAL) && !Utils.hasIllegalCombination(node.findFirstToken(TokenTypes.MODIFIERS))); if (nonFinalFieldFound.isPresent()) { DetailAST field = nonFinalFieldFound.get().findFirstToken(TokenTypes.IDENT); log(field, String.format(ERROR_MSG, field.getText())); } }
class are final * * @param objBlockToken the OBJBLOCK AST node */
class are final * * @param objBlockToken the OBJBLOCK AST node */
uff.. I didn't know about those Util classes!! Super util! thank you! Updating...
private boolean hasImmutableAnnotation(DetailAST classDefToken) { final DetailAST modifiersToken = classDefToken.findFirstToken(TokenTypes.MODIFIERS); for (DetailAST ast = modifiersToken.getFirstChild(); ast != null; ast = ast.getNextSibling()) { if (ast.getType() == TokenTypes.ANNOTATION) { final DetailAST annotationIdent = ast.findFirstToken(TokenTypes.IDENT); return annotationIdent != null && IMMUTABLE_NOTATION.equals(annotationIdent.getText()); } } return false; }
for (DetailAST ast = modifiersToken.getFirstChild(); ast != null; ast = ast.getNextSibling()) {
private boolean hasImmutableAnnotation(DetailAST classDefToken) { DetailAST immutableAnnotation = AnnotationUtil.getAnnotation(classDefToken, IMMUTABLE_NOTATION); return immutableAnnotation != null; }
class is annotated with @Immutable, false otherwise. */
class is annotated with {@literal @Immutable}
updated. this TokenUtil would've made everything easier since the beginning. :)
private void checkForOnlyFinalFields(DetailAST objBlockToken) { for (DetailAST ast = objBlockToken.getFirstChild(); ast != null; ast = ast.getNextSibling()) { if (TokenTypes.VARIABLE_DEF == ast.getType()) { final DetailAST modifiersToken = ast.findFirstToken(TokenTypes.MODIFIERS); if (!modifiersToken.branchContains(TokenTypes.FINAL) && !Utils.hasIllegalCombination(modifiersToken)) { log(modifiersToken, String.format(ERROR_MSG, ast.findFirstToken(TokenTypes.IDENT).getText(), objBlockToken.getPreviousSibling().getText())); } } } }
for (DetailAST ast = objBlockToken.getFirstChild(); ast != null; ast = ast.getNextSibling()) {
private void checkForOnlyFinalFields(DetailAST objBlockToken) { Optional<DetailAST> nonFinalFieldFound = TokenUtil.findFirstTokenByPredicate(objBlockToken, node -> TokenTypes.VARIABLE_DEF == node.getType() && !node.branchContains(TokenTypes.FINAL) && !Utils.hasIllegalCombination(node.findFirstToken(TokenTypes.MODIFIERS))); if (nonFinalFieldFound.isPresent()) { DetailAST field = nonFinalFieldFound.get().findFirstToken(TokenTypes.IDENT); log(field, String.format(ERROR_MSG, field.getText())); } }
class are final * * @param objBlockToken the OBJBLOCK AST node */
class are final * * @param objBlockToken the OBJBLOCK AST node */
I think this can be simplified to: ```java node -> { return TokenTypes.VARIABLE_DEF == node.getType() && !node.branchContains(TokenTypes.FINAL) && !Utils.hasIllegalCombination(node.findFirstToken(TokenTypes.MODIFIERS)); } ```
private void checkForOnlyFinalFields(DetailAST objBlockToken) { Optional<DetailAST> nonFinalFieldFound = TokenUtil.findFirstTokenByPredicate(objBlockToken, (node) -> { if (TokenTypes.VARIABLE_DEF == node.getType()) { return !node.branchContains(TokenTypes.FINAL) && !Utils.hasIllegalCombination(node.findFirstToken(TokenTypes.MODIFIERS)); } return false; }); if (nonFinalFieldFound.isPresent()) { DetailAST field = nonFinalFieldFound.get().findFirstToken(TokenTypes.IDENT); log(field, String.format(ERROR_MSG, field.getText())); } }
if (TokenTypes.VARIABLE_DEF == node.getType()) {
private void checkForOnlyFinalFields(DetailAST objBlockToken) { Optional<DetailAST> nonFinalFieldFound = TokenUtil.findFirstTokenByPredicate(objBlockToken, node -> TokenTypes.VARIABLE_DEF == node.getType() && !node.branchContains(TokenTypes.FINAL) && !Utils.hasIllegalCombination(node.findFirstToken(TokenTypes.MODIFIERS))); if (nonFinalFieldFound.isPresent()) { DetailAST field = nonFinalFieldFound.get().findFirstToken(TokenTypes.IDENT); log(field, String.format(ERROR_MSG, field.getText())); } }
class are final * * @param objBlockToken the OBJBLOCK AST node */
class are final * * @param objBlockToken the OBJBLOCK AST node */
For this util method, the javadocs specifies this should be for a variable node, but you never do any checks to ensure that this isn't some random node like CLASS_DEF. Also, why protected?
protected static boolean hasIllegalCombination(DetailAST modifiers) { for (DetailAST modifier = modifiers.getFirstChild(); modifier != null; modifier = modifier.getNextSibling()) { int modifierType = modifier.getType(); if (TokenTypes.ANNOTATION == modifierType) { if (INVALID_FINAL_ANNOTATIONS.contains(modifier.findFirstToken(TokenTypes.IDENT).getText())) { return true; } } if (INVALID_FINAL_COMBINATION.contains(modifierType)) { return true; } } return false; }
for (DetailAST modifier = modifiers.getFirstChild(); modifier != null; modifier = modifier.getNextSibling()) {
protected static boolean hasIllegalCombination(DetailAST modifiers) { if (modifiers.getType() != TokenTypes.MODIFIERS) { return false; } Optional<DetailAST> illegalCombination = TokenUtil.findFirstTokenByPredicate(modifiers, (node) -> { final int type = node.getType(); return INVALID_FINAL_COMBINATION.contains(node.getType()) || (TokenTypes.ANNOTATION == type && INVALID_FINAL_ANNOTATIONS.contains(node.findFirstToken(TokenTypes.IDENT).getText())); }); return illegalCombination.isPresent(); }
class Utils { private static final Set INVALID_FINAL_COMBINATION = Collections.unmodifiableSet(new HashSet<>(Arrays.asList( TokenTypes.LITERAL_TRANSIENT, TokenTypes.LITERAL_VOLATILE ))); private static final Set INVALID_FINAL_ANNOTATIONS = Collections.unmodifiableSet(new HashSet<>(Arrays.asList( "JsonProperty" ))); /** * Check if variable modifiers contains any of the illegal combination with final modifier * For instance, we don't want to combine transient or volatile with final * * @param modifiers a DetailAST pointing to a Variable list of modifiers * @return true if there is any modifier that shouldn't be combined with final */ }
class Utils { private static final Set INVALID_FINAL_COMBINATION = Collections.unmodifiableSet(new HashSet<>(Arrays.asList( TokenTypes.LITERAL_TRANSIENT, TokenTypes.LITERAL_VOLATILE ))); private static final Set INVALID_FINAL_ANNOTATIONS = Collections.unmodifiableSet(new HashSet<>(Arrays.asList( "JsonProperty" ))); /** * Check if variable modifiers contains any of the illegal combination with final modifier * For instance, we don't want to combine transient or volatile with final * * @param modifiers a DetailAST pointing to a Variable list of modifiers * @return true if there is any modifier that shouldn't be combined with final */ }
can be replaced with: `TokenUtil.findFirstTokenByPredicate(...)`. I think this could be simplified. ```java node -> { int modifierType = modifier.getType(); return INVALID_FINAL_COMBINATION.contains(modifierType) || (TokenTypes.ANNOTATION == modifierType && INVALID_FINAL_ANNOTATIONS.contains(modifier.findFirstToken(TokenTypes.IDENT).getText())); } ```
protected static boolean hasIllegalCombination(DetailAST modifiers) { for (DetailAST modifier = modifiers.getFirstChild(); modifier != null; modifier = modifier.getNextSibling()) { int modifierType = modifier.getType(); if (TokenTypes.ANNOTATION == modifierType) { if (INVALID_FINAL_ANNOTATIONS.contains(modifier.findFirstToken(TokenTypes.IDENT).getText())) { return true; } } if (INVALID_FINAL_COMBINATION.contains(modifierType)) { return true; } } return false; }
for (DetailAST modifier = modifiers.getFirstChild(); modifier != null; modifier = modifier.getNextSibling()) {
protected static boolean hasIllegalCombination(DetailAST modifiers) { if (modifiers.getType() != TokenTypes.MODIFIERS) { return false; } Optional<DetailAST> illegalCombination = TokenUtil.findFirstTokenByPredicate(modifiers, (node) -> { final int type = node.getType(); return INVALID_FINAL_COMBINATION.contains(node.getType()) || (TokenTypes.ANNOTATION == type && INVALID_FINAL_ANNOTATIONS.contains(node.findFirstToken(TokenTypes.IDENT).getText())); }); return illegalCombination.isPresent(); }
class Utils { private static final Set INVALID_FINAL_COMBINATION = Collections.unmodifiableSet(new HashSet<>(Arrays.asList( TokenTypes.LITERAL_TRANSIENT, TokenTypes.LITERAL_VOLATILE ))); private static final Set INVALID_FINAL_ANNOTATIONS = Collections.unmodifiableSet(new HashSet<>(Arrays.asList( "JsonProperty" ))); /** * Check if variable modifiers contains any of the illegal combination with final modifier * For instance, we don't want to combine transient or volatile with final * * @param modifiers a DetailAST pointing to a Variable list of modifiers * @return true if there is any modifier that shouldn't be combined with final */ }
class Utils { private static final Set INVALID_FINAL_COMBINATION = Collections.unmodifiableSet(new HashSet<>(Arrays.asList( TokenTypes.LITERAL_TRANSIENT, TokenTypes.LITERAL_VOLATILE ))); private static final Set INVALID_FINAL_ANNOTATIONS = Collections.unmodifiableSet(new HashSet<>(Arrays.asList( "JsonProperty" ))); /** * Check if variable modifiers contains any of the illegal combination with final modifier * For instance, we don't want to combine transient or volatile with final * * @param modifiers a DetailAST pointing to a Variable list of modifiers * @return true if there is any modifier that shouldn't be combined with final */ }
yes, 👍 . updated
private void checkForOnlyFinalFields(DetailAST objBlockToken) { Optional<DetailAST> nonFinalFieldFound = TokenUtil.findFirstTokenByPredicate(objBlockToken, (node) -> { if (TokenTypes.VARIABLE_DEF == node.getType()) { return !node.branchContains(TokenTypes.FINAL) && !Utils.hasIllegalCombination(node.findFirstToken(TokenTypes.MODIFIERS)); } return false; }); if (nonFinalFieldFound.isPresent()) { DetailAST field = nonFinalFieldFound.get().findFirstToken(TokenTypes.IDENT); log(field, String.format(ERROR_MSG, field.getText())); } }
if (TokenTypes.VARIABLE_DEF == node.getType()) {
private void checkForOnlyFinalFields(DetailAST objBlockToken) { Optional<DetailAST> nonFinalFieldFound = TokenUtil.findFirstTokenByPredicate(objBlockToken, node -> TokenTypes.VARIABLE_DEF == node.getType() && !node.branchContains(TokenTypes.FINAL) && !Utils.hasIllegalCombination(node.findFirstToken(TokenTypes.MODIFIERS))); if (nonFinalFieldFound.isPresent()) { DetailAST field = nonFinalFieldFound.get().findFirstToken(TokenTypes.IDENT); log(field, String.format(ERROR_MSG, field.getText())); } }
class are final * * @param objBlockToken the OBJBLOCK AST node */
class are final * * @param objBlockToken the OBJBLOCK AST node */
@conniey , thanks for the comments! - protected to have it only available for same package `com.azure.tools.checkstyle.checks` No need to use it outside from there. All custom checks are added to this package. - I like your proposal with `findFirstTokenByPredicate()` . I will update it.
protected static boolean hasIllegalCombination(DetailAST modifiers) { for (DetailAST modifier = modifiers.getFirstChild(); modifier != null; modifier = modifier.getNextSibling()) { int modifierType = modifier.getType(); if (TokenTypes.ANNOTATION == modifierType) { if (INVALID_FINAL_ANNOTATIONS.contains(modifier.findFirstToken(TokenTypes.IDENT).getText())) { return true; } } if (INVALID_FINAL_COMBINATION.contains(modifierType)) { return true; } } return false; }
for (DetailAST modifier = modifiers.getFirstChild(); modifier != null; modifier = modifier.getNextSibling()) {
protected static boolean hasIllegalCombination(DetailAST modifiers) { if (modifiers.getType() != TokenTypes.MODIFIERS) { return false; } Optional<DetailAST> illegalCombination = TokenUtil.findFirstTokenByPredicate(modifiers, (node) -> { final int type = node.getType(); return INVALID_FINAL_COMBINATION.contains(node.getType()) || (TokenTypes.ANNOTATION == type && INVALID_FINAL_ANNOTATIONS.contains(node.findFirstToken(TokenTypes.IDENT).getText())); }); return illegalCombination.isPresent(); }
class Utils { private static final Set INVALID_FINAL_COMBINATION = Collections.unmodifiableSet(new HashSet<>(Arrays.asList( TokenTypes.LITERAL_TRANSIENT, TokenTypes.LITERAL_VOLATILE ))); private static final Set INVALID_FINAL_ANNOTATIONS = Collections.unmodifiableSet(new HashSet<>(Arrays.asList( "JsonProperty" ))); /** * Check if variable modifiers contains any of the illegal combination with final modifier * For instance, we don't want to combine transient or volatile with final * * @param modifiers a DetailAST pointing to a Variable list of modifiers * @return true if there is any modifier that shouldn't be combined with final */ }
class Utils { private static final Set INVALID_FINAL_COMBINATION = Collections.unmodifiableSet(new HashSet<>(Arrays.asList( TokenTypes.LITERAL_TRANSIENT, TokenTypes.LITERAL_VOLATILE ))); private static final Set INVALID_FINAL_ANNOTATIONS = Collections.unmodifiableSet(new HashSet<>(Arrays.asList( "JsonProperty" ))); /** * Check if variable modifiers contains any of the illegal combination with final modifier * For instance, we don't want to combine transient or volatile with final * * @param modifiers a DetailAST pointing to a Variable list of modifiers * @return true if there is any modifier that shouldn't be combined with final */ }
`ImplUtils.isNullOrEmpty`?
public AsyncDocumentClient build() { ifThrowIllegalArgException(this.serviceEndpoint == null, "cannot build client without service endpoint"); ifThrowIllegalArgException( this.masterKeyOrResourceToken == null && (permissionFeed == null || permissionFeed.isEmpty()) && this.tokenResolver == null && this.cosmosKeyCredential == null, "cannot build client without any one of masterKey, " + "resource token, permissionFeed, tokenResolver and cosmos key credential"); ifThrowIllegalArgException(cosmosKeyCredential != null && StringUtils.isEmpty(cosmosKeyCredential.key()), "cannot build client without key credential"); RxDocumentClientImpl client = new RxDocumentClientImpl(serviceEndpoint, masterKeyOrResourceToken, permissionFeed, connectionPolicy, desiredConsistencyLevel, configs, tokenResolver, cosmosKeyCredential); client.init(); return client; }
ifThrowIllegalArgException(cosmosKeyCredential != null && StringUtils.isEmpty(cosmosKeyCredential.key()),
public AsyncDocumentClient build() { ifThrowIllegalArgException(this.serviceEndpoint == null, "cannot build client without service endpoint"); ifThrowIllegalArgException( this.masterKeyOrResourceToken == null && (permissionFeed == null || permissionFeed.isEmpty()) && this.tokenResolver == null && this.cosmosKeyCredential == null, "cannot build client without any one of masterKey, " + "resource token, permissionFeed, tokenResolver and cosmos key credential"); ifThrowIllegalArgException(cosmosKeyCredential != null && StringUtils.isEmpty(cosmosKeyCredential.key()), "cannot build client without key credential"); RxDocumentClientImpl client = new RxDocumentClientImpl(serviceEndpoint, masterKeyOrResourceToken, permissionFeed, connectionPolicy, desiredConsistencyLevel, configs, tokenResolver, cosmosKeyCredential); client.init(); return client; }
class Builder { Configs configs = new Configs(); ConnectionPolicy connectionPolicy; ConsistencyLevel desiredConsistencyLevel; List<Permission> permissionFeed; String masterKeyOrResourceToken; URI serviceEndpoint; TokenResolver tokenResolver; CosmosKeyCredential cosmosKeyCredential; public Builder withServiceEndpoint(String serviceEndpoint) { try { this.serviceEndpoint = new URI(serviceEndpoint); } catch (URISyntaxException e) { throw new IllegalArgumentException(e.getMessage()); } return this; } /** * New method withMasterKeyOrResourceToken will take either master key or resource token * and perform authentication for accessing resource. * * @param masterKeyOrResourceToken MasterKey or resourceToken for authentication. * @return current Builder. * @deprecated use {@link */ @Deprecated public Builder withMasterKey(String masterKeyOrResourceToken) { this.masterKeyOrResourceToken = masterKeyOrResourceToken; return this; } /** * This method will accept the master key , additionally it can also consume * resource token too for authentication. * * @param masterKeyOrResourceToken MasterKey or resourceToken for authentication. * @return current Builder. */ public Builder withMasterKeyOrResourceToken(String masterKeyOrResourceToken) { this.masterKeyOrResourceToken = masterKeyOrResourceToken; return this; } /** * This method will accept the permission list , which contains the * resource tokens needed to access resources. * * @param permissionFeed Permission list for authentication. * @return current Builder. */ public Builder withPermissionFeed(List<Permission> permissionFeed) { this.permissionFeed = permissionFeed; return this; } public Builder withConsistencyLevel(ConsistencyLevel desiredConsistencyLevel) { this.desiredConsistencyLevel = desiredConsistencyLevel; return this; } public Builder withConfigs(Configs configs) { this.configs = configs; return this; } public Builder withConnectionPolicy(ConnectionPolicy connectionPolicy) { this.connectionPolicy = connectionPolicy; return this; } public Builder withCosmosKeyCredential(CosmosKeyCredential cosmosKeyCredential) { if (cosmosKeyCredential != null && StringUtils.isEmpty(cosmosKeyCredential.key())) { throw new IllegalArgumentException("Cannot build client with empty key credential"); } this.cosmosKeyCredential = cosmosKeyCredential; return this; } /** * This method will accept functional interface TokenResolver which helps in generation authorization * token per request. AsyncDocumentClient can be successfully initialized with this API without passing any MasterKey, ResourceToken or PermissionFeed. * @param tokenResolver The tokenResolver * @return current Builder. */ public Builder withTokenResolver(TokenResolver tokenResolver) { this.tokenResolver = tokenResolver; return this; } private void ifThrowIllegalArgException(boolean value, String error) { if (value) { throw new IllegalArgumentException(error); } } public Configs getConfigs() { return configs; } public void setConfigs(Configs configs) { this.configs = configs; } public ConnectionPolicy getConnectionPolicy() { return connectionPolicy; } public void setConnectionPolicy(ConnectionPolicy connectionPolicy) { this.connectionPolicy = connectionPolicy; } public ConsistencyLevel getDesiredConsistencyLevel() { return desiredConsistencyLevel; } public void setDesiredConsistencyLevel(ConsistencyLevel desiredConsistencyLevel) { this.desiredConsistencyLevel = desiredConsistencyLevel; } public List<Permission> getPermissionFeed() { return permissionFeed; } public void setPermissionFeed(List<Permission> permissionFeed) { this.permissionFeed = permissionFeed; } public String getMasterKeyOrResourceToken() { return masterKeyOrResourceToken; } public void setMasterKeyOrResourceToken(String masterKeyOrResourceToken) { this.masterKeyOrResourceToken = masterKeyOrResourceToken; } public URI getServiceEndpoint() { return serviceEndpoint; } public void setServiceEndpoint(URI serviceEndpoint) { this.serviceEndpoint = serviceEndpoint; } public TokenResolver getTokenResolver() { return tokenResolver; } public void setTokenResolver(TokenResolver tokenResolver) { this.tokenResolver = tokenResolver; } public CosmosKeyCredential getCosmosKeyCredential() { return cosmosKeyCredential; } }
class Builder { Configs configs = new Configs(); ConnectionPolicy connectionPolicy; ConsistencyLevel desiredConsistencyLevel; List<Permission> permissionFeed; String masterKeyOrResourceToken; URI serviceEndpoint; TokenResolver tokenResolver; CosmosKeyCredential cosmosKeyCredential; public Builder withServiceEndpoint(String serviceEndpoint) { try { this.serviceEndpoint = new URI(serviceEndpoint); } catch (URISyntaxException e) { throw new IllegalArgumentException(e.getMessage()); } return this; } /** * New method withMasterKeyOrResourceToken will take either master key or resource token * and perform authentication for accessing resource. * * @param masterKeyOrResourceToken MasterKey or resourceToken for authentication. * @return current Builder. * @deprecated use {@link */ @Deprecated public Builder withMasterKey(String masterKeyOrResourceToken) { this.masterKeyOrResourceToken = masterKeyOrResourceToken; return this; } /** * This method will accept the master key , additionally it can also consume * resource token too for authentication. * * @param masterKeyOrResourceToken MasterKey or resourceToken for authentication. * @return current Builder. */ public Builder withMasterKeyOrResourceToken(String masterKeyOrResourceToken) { this.masterKeyOrResourceToken = masterKeyOrResourceToken; return this; } /** * This method will accept the permission list , which contains the * resource tokens needed to access resources. * * @param permissionFeed Permission list for authentication. * @return current Builder. */ public Builder withPermissionFeed(List<Permission> permissionFeed) { this.permissionFeed = permissionFeed; return this; } public Builder withConsistencyLevel(ConsistencyLevel desiredConsistencyLevel) { this.desiredConsistencyLevel = desiredConsistencyLevel; return this; } public Builder withConfigs(Configs configs) { this.configs = configs; return this; } public Builder withConnectionPolicy(ConnectionPolicy connectionPolicy) { this.connectionPolicy = connectionPolicy; return this; } public Builder withCosmosKeyCredential(CosmosKeyCredential cosmosKeyCredential) { if (cosmosKeyCredential != null && StringUtils.isEmpty(cosmosKeyCredential.key())) { throw new IllegalArgumentException("Cannot build client with empty key credential"); } this.cosmosKeyCredential = cosmosKeyCredential; return this; } /** * This method will accept functional interface TokenResolver which helps in generation authorization * token per request. AsyncDocumentClient can be successfully initialized with this API without passing any MasterKey, ResourceToken or PermissionFeed. * @param tokenResolver The tokenResolver * @return current Builder. */ public Builder withTokenResolver(TokenResolver tokenResolver) { this.tokenResolver = tokenResolver; return this; } private void ifThrowIllegalArgException(boolean value, String error) { if (value) { throw new IllegalArgumentException(error); } } public Configs getConfigs() { return configs; } public void setConfigs(Configs configs) { this.configs = configs; } public ConnectionPolicy getConnectionPolicy() { return connectionPolicy; } public void setConnectionPolicy(ConnectionPolicy connectionPolicy) { this.connectionPolicy = connectionPolicy; } public ConsistencyLevel getDesiredConsistencyLevel() { return desiredConsistencyLevel; } public void setDesiredConsistencyLevel(ConsistencyLevel desiredConsistencyLevel) { this.desiredConsistencyLevel = desiredConsistencyLevel; } public List<Permission> getPermissionFeed() { return permissionFeed; } public void setPermissionFeed(List<Permission> permissionFeed) { this.permissionFeed = permissionFeed; } public String getMasterKeyOrResourceToken() { return masterKeyOrResourceToken; } public void setMasterKeyOrResourceToken(String masterKeyOrResourceToken) { this.masterKeyOrResourceToken = masterKeyOrResourceToken; } public URI getServiceEndpoint() { return serviceEndpoint; } public void setServiceEndpoint(URI serviceEndpoint) { this.serviceEndpoint = serviceEndpoint; } public TokenResolver getTokenResolver() { return tokenResolver; } public void setTokenResolver(TokenResolver tokenResolver) { this.tokenResolver = tokenResolver; } public CosmosKeyCredential getCosmosKeyCredential() { return cosmosKeyCredential; } }
Just a style suggestion: try to make the boolean expression more readable when writing long boolean expressions, e.g.: ```java return INVALID_FINAL_COMBINATION.contains(node.getType()) || (TokenTypes.ANNOTATION == type && INVALID_FINAL_ANNOTATIONS.contains(node.findFirstToken(TokenTypes.IDENT).getText())); ```
protected static boolean hasIllegalCombination(DetailAST modifiers) { if (modifiers.getType() != TokenTypes.MODIFIERS) { return false; } Optional<DetailAST> illegalCombination = TokenUtil.findFirstTokenByPredicate(modifiers, (node) -> { final int type = node.getType(); return INVALID_FINAL_COMBINATION.contains(node.getType()) || (TokenTypes.ANNOTATION == type && INVALID_FINAL_ANNOTATIONS.contains(node.findFirstToken(TokenTypes.IDENT).getText())); }); return illegalCombination.isPresent(); }
&& INVALID_FINAL_ANNOTATIONS.contains(node.findFirstToken(TokenTypes.IDENT).getText()));
protected static boolean hasIllegalCombination(DetailAST modifiers) { if (modifiers.getType() != TokenTypes.MODIFIERS) { return false; } Optional<DetailAST> illegalCombination = TokenUtil.findFirstTokenByPredicate(modifiers, (node) -> { final int type = node.getType(); return INVALID_FINAL_COMBINATION.contains(node.getType()) || (TokenTypes.ANNOTATION == type && INVALID_FINAL_ANNOTATIONS.contains(node.findFirstToken(TokenTypes.IDENT).getText())); }); return illegalCombination.isPresent(); }
class Utils { private static final Set INVALID_FINAL_COMBINATION = Collections.unmodifiableSet(new HashSet<>(Arrays.asList( TokenTypes.LITERAL_TRANSIENT, TokenTypes.LITERAL_VOLATILE ))); private static final Set INVALID_FINAL_ANNOTATIONS = Collections.unmodifiableSet(new HashSet<>(Arrays.asList( "JsonProperty" ))); /** * Check if variable modifiers contains any of the illegal combination with final modifier * For instance, we don't want to combine transient or volatile with final * * @param modifiers a DetailAST pointing to a Variable list of modifiers * @return true if there is any modifier that shouldn't be combined with final */ }
class Utils { private static final Set INVALID_FINAL_COMBINATION = Collections.unmodifiableSet(new HashSet<>(Arrays.asList( TokenTypes.LITERAL_TRANSIENT, TokenTypes.LITERAL_VOLATILE ))); private static final Set INVALID_FINAL_ANNOTATIONS = Collections.unmodifiableSet(new HashSet<>(Arrays.asList( "JsonProperty" ))); /** * Check if variable modifiers contains any of the illegal combination with final modifier * For instance, we don't want to combine transient or volatile with final * * @param modifiers a DetailAST pointing to a Variable list of modifiers * @return true if there is any modifier that shouldn't be combined with final */ }
nit: this logic is duplicated from CosmosClient - should we only have it in one place?
public AsyncDocumentClient build() { ifThrowIllegalArgException(this.serviceEndpoint == null, "cannot build client without service endpoint"); ifThrowIllegalArgException( this.masterKeyOrResourceToken == null && (permissionFeed == null || permissionFeed.isEmpty()) && this.tokenResolver == null && this.cosmosKeyCredential == null, "cannot build client without any one of masterKey, " + "resource token, permissionFeed, tokenResolver and cosmos key credential"); ifThrowIllegalArgException(cosmosKeyCredential != null && StringUtils.isEmpty(cosmosKeyCredential.getMasterKey()), "cannot build client without key credential"); RxDocumentClientImpl client = new RxDocumentClientImpl(serviceEndpoint, masterKeyOrResourceToken, permissionFeed, connectionPolicy, desiredConsistencyLevel, configs, tokenResolver, cosmosKeyCredential); client.init(); return client; }
this.masterKeyOrResourceToken == null && (permissionFeed == null || permissionFeed.isEmpty())
public AsyncDocumentClient build() { ifThrowIllegalArgException(this.serviceEndpoint == null, "cannot build client without service endpoint"); ifThrowIllegalArgException( this.masterKeyOrResourceToken == null && (permissionFeed == null || permissionFeed.isEmpty()) && this.tokenResolver == null && this.cosmosKeyCredential == null, "cannot build client without any one of masterKey, " + "resource token, permissionFeed, tokenResolver and cosmos key credential"); ifThrowIllegalArgException(cosmosKeyCredential != null && StringUtils.isEmpty(cosmosKeyCredential.key()), "cannot build client without key credential"); RxDocumentClientImpl client = new RxDocumentClientImpl(serviceEndpoint, masterKeyOrResourceToken, permissionFeed, connectionPolicy, desiredConsistencyLevel, configs, tokenResolver, cosmosKeyCredential); client.init(); return client; }
class Builder { Configs configs = new Configs(); ConnectionPolicy connectionPolicy; ConsistencyLevel desiredConsistencyLevel; List<Permission> permissionFeed; String masterKeyOrResourceToken; URI serviceEndpoint; TokenResolver tokenResolver; CosmosKeyCredential cosmosKeyCredential; public Builder withServiceEndpoint(String serviceEndpoint) { try { this.serviceEndpoint = new URI(serviceEndpoint); } catch (URISyntaxException e) { throw new IllegalArgumentException(e.getMessage()); } return this; } /** * New method withMasterKeyOrResourceToken will take either master key or resource token * and perform authentication for accessing resource. * * @param masterKeyOrResourceToken MasterKey or resourceToken for authentication. * @return current Builder. * @deprecated use {@link */ @Deprecated public Builder withMasterKey(String masterKeyOrResourceToken) { this.masterKeyOrResourceToken = masterKeyOrResourceToken; return this; } /** * This method will accept the master key , additionally it can also consume * resource token too for authentication. * * @param masterKeyOrResourceToken MasterKey or resourceToken for authentication. * @return current Builder. */ public Builder withMasterKeyOrResourceToken(String masterKeyOrResourceToken) { this.masterKeyOrResourceToken = masterKeyOrResourceToken; return this; } /** * This method will accept the permission list , which contains the * resource tokens needed to access resources. * * @param permissionFeed Permission list for authentication. * @return current Builder. */ public Builder withPermissionFeed(List<Permission> permissionFeed) { this.permissionFeed = permissionFeed; return this; } public Builder withConsistencyLevel(ConsistencyLevel desiredConsistencyLevel) { this.desiredConsistencyLevel = desiredConsistencyLevel; return this; } public Builder withConfigs(Configs configs) { this.configs = configs; return this; } public Builder withConnectionPolicy(ConnectionPolicy connectionPolicy) { this.connectionPolicy = connectionPolicy; return this; } public Builder withCosmosKeyCredential(CosmosKeyCredential cosmosKeyCredential) { if (cosmosKeyCredential != null && StringUtils.isEmpty(cosmosKeyCredential.getMasterKey())) { throw new IllegalArgumentException("Cannot build client with empty key credential"); } this.cosmosKeyCredential = cosmosKeyCredential; return this; } /** * This method will accept functional interface TokenResolver which helps in generation authorization * token per request. AsyncDocumentClient can be successfully initialized with this API without passing any MasterKey, ResourceToken or PermissionFeed. * @param tokenResolver The tokenResolver * @return current Builder. */ public Builder withTokenResolver(TokenResolver tokenResolver) { this.tokenResolver = tokenResolver; return this; } private void ifThrowIllegalArgException(boolean value, String error) { if (value) { throw new IllegalArgumentException(error); } } public Configs getConfigs() { return configs; } public void setConfigs(Configs configs) { this.configs = configs; } public ConnectionPolicy getConnectionPolicy() { return connectionPolicy; } public void setConnectionPolicy(ConnectionPolicy connectionPolicy) { this.connectionPolicy = connectionPolicy; } public ConsistencyLevel getDesiredConsistencyLevel() { return desiredConsistencyLevel; } public void setDesiredConsistencyLevel(ConsistencyLevel desiredConsistencyLevel) { this.desiredConsistencyLevel = desiredConsistencyLevel; } public List<Permission> getPermissionFeed() { return permissionFeed; } public void setPermissionFeed(List<Permission> permissionFeed) { this.permissionFeed = permissionFeed; } public String getMasterKeyOrResourceToken() { return masterKeyOrResourceToken; } public void setMasterKeyOrResourceToken(String masterKeyOrResourceToken) { this.masterKeyOrResourceToken = masterKeyOrResourceToken; } public URI getServiceEndpoint() { return serviceEndpoint; } public void setServiceEndpoint(URI serviceEndpoint) { this.serviceEndpoint = serviceEndpoint; } public TokenResolver getTokenResolver() { return tokenResolver; } public void setTokenResolver(TokenResolver tokenResolver) { this.tokenResolver = tokenResolver; } public CosmosKeyCredential getCosmosKeyCredential() { return cosmosKeyCredential; } public void setCosmosKeyCredential(CosmosKeyCredential cosmosKeyCredential) { this.cosmosKeyCredential = cosmosKeyCredential; } }
class Builder { Configs configs = new Configs(); ConnectionPolicy connectionPolicy; ConsistencyLevel desiredConsistencyLevel; List<Permission> permissionFeed; String masterKeyOrResourceToken; URI serviceEndpoint; TokenResolver tokenResolver; CosmosKeyCredential cosmosKeyCredential; public Builder withServiceEndpoint(String serviceEndpoint) { try { this.serviceEndpoint = new URI(serviceEndpoint); } catch (URISyntaxException e) { throw new IllegalArgumentException(e.getMessage()); } return this; } /** * New method withMasterKeyOrResourceToken will take either master key or resource token * and perform authentication for accessing resource. * * @param masterKeyOrResourceToken MasterKey or resourceToken for authentication. * @return current Builder. * @deprecated use {@link */ @Deprecated public Builder withMasterKey(String masterKeyOrResourceToken) { this.masterKeyOrResourceToken = masterKeyOrResourceToken; return this; } /** * This method will accept the master key , additionally it can also consume * resource token too for authentication. * * @param masterKeyOrResourceToken MasterKey or resourceToken for authentication. * @return current Builder. */ public Builder withMasterKeyOrResourceToken(String masterKeyOrResourceToken) { this.masterKeyOrResourceToken = masterKeyOrResourceToken; return this; } /** * This method will accept the permission list , which contains the * resource tokens needed to access resources. * * @param permissionFeed Permission list for authentication. * @return current Builder. */ public Builder withPermissionFeed(List<Permission> permissionFeed) { this.permissionFeed = permissionFeed; return this; } public Builder withConsistencyLevel(ConsistencyLevel desiredConsistencyLevel) { this.desiredConsistencyLevel = desiredConsistencyLevel; return this; } public Builder withConfigs(Configs configs) { this.configs = configs; return this; } public Builder withConnectionPolicy(ConnectionPolicy connectionPolicy) { this.connectionPolicy = connectionPolicy; return this; } public Builder withCosmosKeyCredential(CosmosKeyCredential cosmosKeyCredential) { if (cosmosKeyCredential != null && StringUtils.isEmpty(cosmosKeyCredential.key())) { throw new IllegalArgumentException("Cannot build client with empty key credential"); } this.cosmosKeyCredential = cosmosKeyCredential; return this; } /** * This method will accept functional interface TokenResolver which helps in generation authorization * token per request. AsyncDocumentClient can be successfully initialized with this API without passing any MasterKey, ResourceToken or PermissionFeed. * @param tokenResolver The tokenResolver * @return current Builder. */ public Builder withTokenResolver(TokenResolver tokenResolver) { this.tokenResolver = tokenResolver; return this; } private void ifThrowIllegalArgException(boolean value, String error) { if (value) { throw new IllegalArgumentException(error); } } public Configs getConfigs() { return configs; } public void setConfigs(Configs configs) { this.configs = configs; } public ConnectionPolicy getConnectionPolicy() { return connectionPolicy; } public void setConnectionPolicy(ConnectionPolicy connectionPolicy) { this.connectionPolicy = connectionPolicy; } public ConsistencyLevel getDesiredConsistencyLevel() { return desiredConsistencyLevel; } public void setDesiredConsistencyLevel(ConsistencyLevel desiredConsistencyLevel) { this.desiredConsistencyLevel = desiredConsistencyLevel; } public List<Permission> getPermissionFeed() { return permissionFeed; } public void setPermissionFeed(List<Permission> permissionFeed) { this.permissionFeed = permissionFeed; } public String getMasterKeyOrResourceToken() { return masterKeyOrResourceToken; } public void setMasterKeyOrResourceToken(String masterKeyOrResourceToken) { this.masterKeyOrResourceToken = masterKeyOrResourceToken; } public URI getServiceEndpoint() { return serviceEndpoint; } public void setServiceEndpoint(URI serviceEndpoint) { this.serviceEndpoint = serviceEndpoint; } public TokenResolver getTokenResolver() { return tokenResolver; } public void setTokenResolver(TokenResolver tokenResolver) { this.tokenResolver = tokenResolver; } public CosmosKeyCredential getCosmosKeyCredential() { return cosmosKeyCredential; } }
THere are multiple places you use `toLowerCase()`. If the same method was invoked on a JVM whose default locale is not en-US, this may not be what you want. In general, I'd suggest `toLowerCase(Locale.ROOT)` or `toLowerCase(Locale.US)`.
private void checkNamingPattern(DetailAST blockCommentToken) { if (!BlockCommentPosition.isOnMethod(blockCommentToken)) { return; } DetailNode javadocNode = null; try { javadocNode = DetailNodeTreeStringPrinter.parseJavadocAsDetailNode(blockCommentToken); } catch (IllegalArgumentException ex) { } if (javadocNode == null) { return; } for (DetailNode node : javadocNode.getChildren()) { if (node.getType() != JavadocTokenTypes.JAVADOC_INLINE_TAG) { continue; } DetailNode customNameNode = JavadocUtil.findFirstToken(node, JavadocTokenTypes.CUSTOM_NAME); if (customNameNode == null || !CODE_SNIPPET_ANNOTATION.equals(customNameNode.getText())) { return; } DetailNode descriptionNode = JavadocUtil.findFirstToken(node, JavadocTokenTypes.DESCRIPTION); if (descriptionNode == null) { log(node.getLineNumber(), MISSING_CODESNIPPET_TAG_MESSAGE); return; } String customDescription = JavadocUtil.findFirstToken(descriptionNode, JavadocTokenTypes.TEXT).getText(); DetailAST methodDefToken = methodDefStack.peek(); final String methodName = methodDefToken.findFirstToken(TokenTypes.IDENT).getText(); final String className = classNameStack.isEmpty() ? "" : classNameStack.peek(); final String parameters = constructParametersString(methodDefToken); String fullPath = packageName + "." + className + "." + methodName; if (parameters != null) { fullPath = fullPath + " } if (customDescription == null || customDescription.isEmpty() || !isNamingMatched(customDescription.toLowerCase(), fullPath.toLowerCase(), parameters)) { log(node.getLineNumber(), String.format("Naming pattern mismatch. The @codeSnippet description " + "''%s'' does not match ''%s''. Case Insensitive.", customDescription, fullPath)); } } }
!isNamingMatched(customDescription.toLowerCase(), fullPath.toLowerCase(), parameters)) {
private void checkNamingPattern(DetailAST blockCommentToken) { if (!BlockCommentPosition.isOnMethod(blockCommentToken)) { return; } DetailNode javadocNode = null; try { javadocNode = DetailNodeTreeStringPrinter.parseJavadocAsDetailNode(blockCommentToken); } catch (IllegalArgumentException ex) { } if (javadocNode == null) { return; } for (DetailNode node : javadocNode.getChildren()) { if (node.getType() != JavadocTokenTypes.JAVADOC_INLINE_TAG) { continue; } DetailNode customNameNode = JavadocUtil.findFirstToken(node, JavadocTokenTypes.CUSTOM_NAME); if (customNameNode == null || !CODE_SNIPPET_ANNOTATION.equals(customNameNode.getText())) { return; } DetailNode descriptionNode = JavadocUtil.findFirstToken(node, JavadocTokenTypes.DESCRIPTION); if (descriptionNode == null) { log(node.getLineNumber(), MISSING_CODESNIPPET_TAG_MESSAGE); return; } String customDescription = JavadocUtil.findFirstToken(descriptionNode, JavadocTokenTypes.TEXT).getText(); final String methodName = methodDefToken.findFirstToken(TokenTypes.IDENT).getText(); final String className = classNameStack.isEmpty() ? "" : classNameStack.peek(); final String parameters = constructParametersString(methodDefToken); String fullPath = packageName + "." + className + "." + methodName; final String fullPathWithoutParameters = fullPath; if (parameters != null) { fullPath = fullPath + " } if (customDescription == null || customDescription.isEmpty() || !isNamingMatched(customDescription.toLowerCase(Locale.ROOT), fullPathWithoutParameters.toLowerCase(Locale.ROOT), parameters)) { log(node.getLineNumber(), String.format("Naming pattern mismatch. The @codesnippet description " + "''%s'' does not match ''%s''. Case Insensitive.", customDescription, fullPath)); } } }
class name when leave the same token private Deque<String> classNameStack = new ArrayDeque<>(); private Deque<DetailAST> methodDefStack = new ArrayDeque<>(); @Override public int[] getDefaultTokens() { return getRequiredTokens(); }
class name when leave the same token private Deque<String> classNameStack = new ArrayDeque<>(); private DetailAST methodDefToken = null; @Override public int[] getDefaultTokens() { return getRequiredTokens(); }
you are evaluating masterkey#hashCode on every single call. And seems your intention is to only use it for comparing the new hashcode with the old hashcode. String comparison directly should be faster that hashcode computation. As for hashcode computation you are iteratring over all chars of the string and doing some arithmatic but in string comparison you just comparing the chars.
private Mac getMacInstance() { int masterKeyLatestHashCode = this.cosmosKeyCredential.getMasterKey().hashCode(); if (masterKeyLatestHashCode != this.masterKeyHashCode) { byte[] masterKeyBytes = this.cosmosKeyCredential.getMasterKey().getBytes(); byte[] masterKeyDecodedBytes = Utils.Base64Decoder.decode(masterKeyBytes); SecretKey signingKey = new SecretKeySpec(masterKeyDecodedBytes, "HMACSHA256"); try { Mac macInstance = Mac.getInstance("HMACSHA256"); macInstance.init(signingKey); this.masterKeyHashCode = masterKeyLatestHashCode; return macInstance; } catch (NoSuchAlgorithmException | InvalidKeyException e) { throw new IllegalStateException(e); } } else { try { return (Mac)this.macInstance.clone(); } catch (CloneNotSupportedException e) { throw new IllegalStateException(e); } } }
int masterKeyLatestHashCode = this.cosmosKeyCredential.getMasterKey().hashCode();
private Mac getMacInstance() { int masterKeyLatestHashCode = this.cosmosKeyCredential.keyHashCode(); if (masterKeyLatestHashCode != this.masterKeyHashCode) { byte[] masterKeyBytes = this.cosmosKeyCredential.key().getBytes(); byte[] masterKeyDecodedBytes = Utils.Base64Decoder.decode(masterKeyBytes); SecretKey signingKey = new SecretKeySpec(masterKeyDecodedBytes, "HMACSHA256"); try { Mac macInstance = Mac.getInstance("HMACSHA256"); macInstance.init(signingKey); this.masterKeyHashCode = masterKeyLatestHashCode; return macInstance; } catch (NoSuchAlgorithmException | InvalidKeyException e) { throw new IllegalStateException(e); } } else { try { return (Mac)this.macInstance.clone(); } catch (CloneNotSupportedException e) { throw new IllegalStateException(e); } } }
class BaseAuthorizationTokenProvider implements AuthorizationTokenProvider { private static final String AUTH_PREFIX = "type=master&ver=1.0&sig="; private final CosmosKeyCredential cosmosKeyCredential; private Mac macInstance; private int masterKeyHashCode; public BaseAuthorizationTokenProvider(CosmosKeyCredential cosmosKeyCredential) { this.cosmosKeyCredential = cosmosKeyCredential; this.macInstance = getMacInstance(); } private static String getResourceSegment(ResourceType resourceType) { switch (resourceType) { case Attachment: return Paths.ATTACHMENTS_PATH_SEGMENT; case Database: return Paths.DATABASES_PATH_SEGMENT; case Conflict: return Paths.CONFLICTS_PATH_SEGMENT; case Document: return Paths.DOCUMENTS_PATH_SEGMENT; case DocumentCollection: return Paths.COLLECTIONS_PATH_SEGMENT; case Offer: return Paths.OFFERS_PATH_SEGMENT; case Permission: return Paths.PERMISSIONS_PATH_SEGMENT; case StoredProcedure: return Paths.STORED_PROCEDURES_PATH_SEGMENT; case Trigger: return Paths.TRIGGERS_PATH_SEGMENT; case UserDefinedFunction: return Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT; case User: return Paths.USERS_PATH_SEGMENT; case PartitionKeyRange: return Paths.PARTITION_KEY_RANGES_PATH_SEGMENT; case Media: return Paths.MEDIA_PATH_SEGMENT; case DatabaseAccount: return ""; default: return null; } } /** * This API is a helper method to create auth header based on client request using masterkey. * * @param verb the verb. * @param resourceIdOrFullName the resource id or full name * @param resourceType the resource type. * @param headers the request headers. * @return the key authorization signature. */ public String generateKeyAuthorizationSignature(String verb, String resourceIdOrFullName, ResourceType resourceType, Map<String, String> headers) { return this.generateKeyAuthorizationSignature(verb, resourceIdOrFullName, BaseAuthorizationTokenProvider.getResourceSegment(resourceType).toLowerCase(), headers); } /** * This API is a helper method to create auth header based on client request using masterkey. * * @param verb the verb * @param resourceIdOrFullName the resource id or full name * @param resourceSegment the resource segment * @param headers the request headers * @return the key authorization signature */ public String generateKeyAuthorizationSignature(String verb, String resourceIdOrFullName, String resourceSegment, Map<String, String> headers) { if (verb == null || verb.isEmpty()) { throw new IllegalArgumentException("verb"); } if (resourceIdOrFullName == null) { resourceIdOrFullName = ""; } if (resourceSegment == null) { throw new IllegalArgumentException("resourceSegment"); } if (headers == null) { throw new IllegalArgumentException("headers"); } if (StringUtils.isEmpty(this.cosmosKeyCredential.getMasterKey())) { throw new IllegalArgumentException("key credentials cannot be empty"); } if(!PathsHelper.isNameBased(resourceIdOrFullName)) { resourceIdOrFullName = resourceIdOrFullName.toLowerCase(Locale.ROOT); } StringBuilder body = new StringBuilder(); body.append(verb.toLowerCase()) .append('\n') .append(resourceSegment) .append('\n') .append(resourceIdOrFullName) .append('\n'); if (headers.containsKey(HttpConstants.HttpHeaders.X_DATE)) { body.append(headers.get(HttpConstants.HttpHeaders.X_DATE).toLowerCase()); } body.append('\n'); if (headers.containsKey(HttpConstants.HttpHeaders.HTTP_DATE)) { body.append(headers.get(HttpConstants.HttpHeaders.HTTP_DATE).toLowerCase()); } body.append('\n'); Mac mac = getMacInstance(); byte[] digest = mac.doFinal(body.toString().getBytes()); String auth = Utils.encodeBase64String(digest); return AUTH_PREFIX + auth; } /** * This API is a helper method to create auth header based on client request using resourceTokens. * * @param resourceTokens the resource tokens. * @param path the path. * @param resourceId the resource id. * @return the authorization token. */ public String getAuthorizationTokenUsingResourceTokens(Map<String, String> resourceTokens, String path, String resourceId) { if (resourceTokens == null) { throw new IllegalArgumentException("resourceTokens"); } String resourceToken = null; if (resourceTokens.containsKey(resourceId) && resourceTokens.get(resourceId) != null) { resourceToken = resourceTokens.get(resourceId); } else if (StringUtils.isEmpty(path) || StringUtils.isEmpty(resourceId)) { if (resourceTokens.size() > 0) { resourceToken = resourceTokens.values().iterator().next(); } } else { String[] pathParts = StringUtils.split(path, "/"); String[] resourceTypes = {"dbs", "colls", "docs", "sprocs", "udfs", "triggers", "users", "permissions", "attachments", "media", "conflicts"}; HashSet<String> resourceTypesSet = new HashSet<String>(); Collections.addAll(resourceTypesSet, resourceTypes); for (int i = pathParts.length - 1; i >= 0; --i) { if (!resourceTypesSet.contains(pathParts[i]) && resourceTokens.containsKey(pathParts[i])) { resourceToken = resourceTokens.get(pathParts[i]); } } } return resourceToken; } public String generateKeyAuthorizationSignature(String verb, URI uri, Map<String, String> headers) { if (StringUtils.isEmpty(verb)) { throw new IllegalArgumentException(String.format(RMResources.StringArgumentNullOrEmpty, "verb")); } if (uri == null) { throw new IllegalArgumentException("uri"); } if (headers == null) { throw new IllegalArgumentException("headers"); } PathInfo pathInfo = new PathInfo(false, StringUtils.EMPTY, StringUtils.EMPTY, false); getResourceTypeAndIdOrFullName(uri, pathInfo); return generateKeyAuthorizationSignatureNew(verb, pathInfo.resourceIdOrFullName, pathInfo.resourcePath, headers); } private String generateKeyAuthorizationSignatureNew(String verb, String resourceIdValue, String resourceType, Map<String, String> headers) { if (StringUtils.isEmpty(verb)) { throw new IllegalArgumentException(String.format(RMResources.StringArgumentNullOrEmpty, "verb")); } if (resourceType == null) { throw new IllegalArgumentException(String.format(RMResources.StringArgumentNullOrEmpty, "resourceType")); } if (headers == null) { throw new IllegalArgumentException("headers"); } String authResourceId = getAuthorizationResourceIdOrFullName(resourceType, resourceIdValue); String payLoad = generateMessagePayload(verb, authResourceId, resourceType, headers); Mac mac = this.getMacInstance(); byte[] digest = mac.doFinal(payLoad.getBytes()); String authorizationToken = Utils.encodeBase64String(digest); String authtoken = AUTH_PREFIX + authorizationToken; return HttpUtils.urlEncode(authtoken); } private String generateMessagePayload(String verb, String resourceId, String resourceType, Map<String, String> headers) { String xDate = headers.get(HttpConstants.HttpHeaders.X_DATE); String date = headers.get(HttpConstants.HttpHeaders.HTTP_DATE); if (StringUtils.isEmpty(xDate) && (StringUtils.isEmpty(date) || StringUtils.isWhitespace(date))) { headers.put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123()); xDate = Utils.nowAsRFC1123(); } if (!PathsHelper.isNameBased(resourceId)) { resourceId = resourceId.toLowerCase(); } StringBuilder payload = new StringBuilder(); payload.append(verb.toLowerCase()) .append('\n') .append(resourceType.toLowerCase()) .append('\n') .append(resourceId) .append('\n') .append(xDate.toLowerCase()) .append('\n') .append(StringUtils.isEmpty(xDate) ? date.toLowerCase() : "") .append('\n'); return payload.toString(); } private String getAuthorizationResourceIdOrFullName(String resourceType, String resourceIdOrFullName) { if (StringUtils.isEmpty(resourceType) || StringUtils.isEmpty(resourceIdOrFullName)) { return resourceIdOrFullName; } if (PathsHelper.isNameBased(resourceIdOrFullName)) { return resourceIdOrFullName; } if (resourceType.equalsIgnoreCase(Paths.OFFERS_PATH_SEGMENT) || resourceType.equalsIgnoreCase(Paths.PARTITIONS_PATH_SEGMENT) || resourceType.equalsIgnoreCase(Paths.TOPOLOGY_PATH_SEGMENT) || resourceType.equalsIgnoreCase(Paths.RID_RANGE_PATH_SEGMENT)) { return resourceIdOrFullName; } ResourceId parsedRId = ResourceId.parse(resourceIdOrFullName); if (resourceType.equalsIgnoreCase(Paths.DATABASES_PATH_SEGMENT)) { return parsedRId.getDatabaseId().toString(); } else if (resourceType.equalsIgnoreCase(Paths.USERS_PATH_SEGMENT)) { return parsedRId.getUserId().toString(); } else if (resourceType.equalsIgnoreCase(Paths.COLLECTIONS_PATH_SEGMENT)) { return parsedRId.getDocumentCollectionId().toString(); } else if (resourceType.equalsIgnoreCase(Paths.DOCUMENTS_PATH_SEGMENT)) { return parsedRId.getDocumentId().toString(); } else { return resourceIdOrFullName; } } private void getResourceTypeAndIdOrFullName(URI uri, PathInfo pathInfo) { if (uri == null) { throw new IllegalArgumentException("uri"); } pathInfo.resourcePath = StringUtils.EMPTY; pathInfo.resourceIdOrFullName = StringUtils.EMPTY; String[] segments = StringUtils.split(uri.toString(), Constants.Properties.PATH_SEPARATOR); if (segments == null || segments.length < 1) { throw new IllegalArgumentException(RMResources.InvalidUrl); } String pathAndQuery = StringUtils.EMPTY ; if(StringUtils.isNotEmpty(uri.getPath())) { pathAndQuery+= uri.getPath(); } if(StringUtils.isNotEmpty(uri.getQuery())) { pathAndQuery+="?"; pathAndQuery+= uri.getQuery(); } if (!PathsHelper.tryParsePathSegments(pathAndQuery, pathInfo, null)) { pathInfo.resourcePath = StringUtils.EMPTY; pathInfo.resourceIdOrFullName = StringUtils.EMPTY; } } }
class BaseAuthorizationTokenProvider implements AuthorizationTokenProvider { private static final String AUTH_PREFIX = "type=master&ver=1.0&sig="; private final CosmosKeyCredential cosmosKeyCredential; private final Mac macInstance; private int masterKeyHashCode; public BaseAuthorizationTokenProvider(CosmosKeyCredential cosmosKeyCredential) { this.cosmosKeyCredential = cosmosKeyCredential; this.macInstance = getMacInstance(); } private static String getResourceSegment(ResourceType resourceType) { switch (resourceType) { case Attachment: return Paths.ATTACHMENTS_PATH_SEGMENT; case Database: return Paths.DATABASES_PATH_SEGMENT; case Conflict: return Paths.CONFLICTS_PATH_SEGMENT; case Document: return Paths.DOCUMENTS_PATH_SEGMENT; case DocumentCollection: return Paths.COLLECTIONS_PATH_SEGMENT; case Offer: return Paths.OFFERS_PATH_SEGMENT; case Permission: return Paths.PERMISSIONS_PATH_SEGMENT; case StoredProcedure: return Paths.STORED_PROCEDURES_PATH_SEGMENT; case Trigger: return Paths.TRIGGERS_PATH_SEGMENT; case UserDefinedFunction: return Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT; case User: return Paths.USERS_PATH_SEGMENT; case PartitionKeyRange: return Paths.PARTITION_KEY_RANGES_PATH_SEGMENT; case Media: return Paths.MEDIA_PATH_SEGMENT; case DatabaseAccount: return ""; default: return null; } } /** * This API is a helper method to create auth header based on client request using masterkey. * * @param verb the verb. * @param resourceIdOrFullName the resource id or full name * @param resourceType the resource type. * @param headers the request headers. * @return the key authorization signature. */ public String generateKeyAuthorizationSignature(String verb, String resourceIdOrFullName, ResourceType resourceType, Map<String, String> headers) { return this.generateKeyAuthorizationSignature(verb, resourceIdOrFullName, BaseAuthorizationTokenProvider.getResourceSegment(resourceType).toLowerCase(), headers); } /** * This API is a helper method to create auth header based on client request using masterkey. * * @param verb the verb * @param resourceIdOrFullName the resource id or full name * @param resourceSegment the resource segment * @param headers the request headers * @return the key authorization signature */ public String generateKeyAuthorizationSignature(String verb, String resourceIdOrFullName, String resourceSegment, Map<String, String> headers) { if (verb == null || verb.isEmpty()) { throw new IllegalArgumentException("verb"); } if (resourceIdOrFullName == null) { resourceIdOrFullName = ""; } if (resourceSegment == null) { throw new IllegalArgumentException("resourceSegment"); } if (headers == null) { throw new IllegalArgumentException("headers"); } if (StringUtils.isEmpty(this.cosmosKeyCredential.key())) { throw new IllegalArgumentException("key credentials cannot be empty"); } if(!PathsHelper.isNameBased(resourceIdOrFullName)) { resourceIdOrFullName = resourceIdOrFullName.toLowerCase(Locale.ROOT); } StringBuilder body = new StringBuilder(); body.append(verb.toLowerCase()) .append('\n') .append(resourceSegment) .append('\n') .append(resourceIdOrFullName) .append('\n'); if (headers.containsKey(HttpConstants.HttpHeaders.X_DATE)) { body.append(headers.get(HttpConstants.HttpHeaders.X_DATE).toLowerCase()); } body.append('\n'); if (headers.containsKey(HttpConstants.HttpHeaders.HTTP_DATE)) { body.append(headers.get(HttpConstants.HttpHeaders.HTTP_DATE).toLowerCase()); } body.append('\n'); Mac mac = getMacInstance(); byte[] digest = mac.doFinal(body.toString().getBytes()); String auth = Utils.encodeBase64String(digest); return AUTH_PREFIX + auth; } /** * This API is a helper method to create auth header based on client request using resourceTokens. * * @param resourceTokens the resource tokens. * @param path the path. * @param resourceId the resource id. * @return the authorization token. */ public String getAuthorizationTokenUsingResourceTokens(Map<String, String> resourceTokens, String path, String resourceId) { if (resourceTokens == null) { throw new IllegalArgumentException("resourceTokens"); } String resourceToken = null; if (resourceTokens.containsKey(resourceId) && resourceTokens.get(resourceId) != null) { resourceToken = resourceTokens.get(resourceId); } else if (StringUtils.isEmpty(path) || StringUtils.isEmpty(resourceId)) { if (resourceTokens.size() > 0) { resourceToken = resourceTokens.values().iterator().next(); } } else { String[] pathParts = StringUtils.split(path, "/"); String[] resourceTypes = {"dbs", "colls", "docs", "sprocs", "udfs", "triggers", "users", "permissions", "attachments", "media", "conflicts"}; HashSet<String> resourceTypesSet = new HashSet<String>(); Collections.addAll(resourceTypesSet, resourceTypes); for (int i = pathParts.length - 1; i >= 0; --i) { if (!resourceTypesSet.contains(pathParts[i]) && resourceTokens.containsKey(pathParts[i])) { resourceToken = resourceTokens.get(pathParts[i]); } } } return resourceToken; } public String generateKeyAuthorizationSignature(String verb, URI uri, Map<String, String> headers) { if (StringUtils.isEmpty(verb)) { throw new IllegalArgumentException(String.format(RMResources.StringArgumentNullOrEmpty, "verb")); } if (uri == null) { throw new IllegalArgumentException("uri"); } if (headers == null) { throw new IllegalArgumentException("headers"); } PathInfo pathInfo = new PathInfo(false, StringUtils.EMPTY, StringUtils.EMPTY, false); getResourceTypeAndIdOrFullName(uri, pathInfo); return generateKeyAuthorizationSignatureNew(verb, pathInfo.resourceIdOrFullName, pathInfo.resourcePath, headers); } private String generateKeyAuthorizationSignatureNew(String verb, String resourceIdValue, String resourceType, Map<String, String> headers) { if (StringUtils.isEmpty(verb)) { throw new IllegalArgumentException(String.format(RMResources.StringArgumentNullOrEmpty, "verb")); } if (resourceType == null) { throw new IllegalArgumentException(String.format(RMResources.StringArgumentNullOrEmpty, "resourceType")); } if (headers == null) { throw new IllegalArgumentException("headers"); } String authResourceId = getAuthorizationResourceIdOrFullName(resourceType, resourceIdValue); String payLoad = generateMessagePayload(verb, authResourceId, resourceType, headers); Mac mac = this.getMacInstance(); byte[] digest = mac.doFinal(payLoad.getBytes()); String authorizationToken = Utils.encodeBase64String(digest); String authtoken = AUTH_PREFIX + authorizationToken; return HttpUtils.urlEncode(authtoken); } private String generateMessagePayload(String verb, String resourceId, String resourceType, Map<String, String> headers) { String xDate = headers.get(HttpConstants.HttpHeaders.X_DATE); String date = headers.get(HttpConstants.HttpHeaders.HTTP_DATE); if (StringUtils.isEmpty(xDate) && (StringUtils.isEmpty(date) || StringUtils.isWhitespace(date))) { headers.put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123()); xDate = Utils.nowAsRFC1123(); } if (!PathsHelper.isNameBased(resourceId)) { resourceId = resourceId.toLowerCase(); } StringBuilder payload = new StringBuilder(); payload.append(verb.toLowerCase()) .append('\n') .append(resourceType.toLowerCase()) .append('\n') .append(resourceId) .append('\n') .append(xDate.toLowerCase()) .append('\n') .append(StringUtils.isEmpty(xDate) ? date.toLowerCase() : "") .append('\n'); return payload.toString(); } private String getAuthorizationResourceIdOrFullName(String resourceType, String resourceIdOrFullName) { if (StringUtils.isEmpty(resourceType) || StringUtils.isEmpty(resourceIdOrFullName)) { return resourceIdOrFullName; } if (PathsHelper.isNameBased(resourceIdOrFullName)) { return resourceIdOrFullName; } if (resourceType.equalsIgnoreCase(Paths.OFFERS_PATH_SEGMENT) || resourceType.equalsIgnoreCase(Paths.PARTITIONS_PATH_SEGMENT) || resourceType.equalsIgnoreCase(Paths.TOPOLOGY_PATH_SEGMENT) || resourceType.equalsIgnoreCase(Paths.RID_RANGE_PATH_SEGMENT)) { return resourceIdOrFullName; } ResourceId parsedRId = ResourceId.parse(resourceIdOrFullName); if (resourceType.equalsIgnoreCase(Paths.DATABASES_PATH_SEGMENT)) { return parsedRId.getDatabaseId().toString(); } else if (resourceType.equalsIgnoreCase(Paths.USERS_PATH_SEGMENT)) { return parsedRId.getUserId().toString(); } else if (resourceType.equalsIgnoreCase(Paths.COLLECTIONS_PATH_SEGMENT)) { return parsedRId.getDocumentCollectionId().toString(); } else if (resourceType.equalsIgnoreCase(Paths.DOCUMENTS_PATH_SEGMENT)) { return parsedRId.getDocumentId().toString(); } else { return resourceIdOrFullName; } } private void getResourceTypeAndIdOrFullName(URI uri, PathInfo pathInfo) { if (uri == null) { throw new IllegalArgumentException("uri"); } pathInfo.resourcePath = StringUtils.EMPTY; pathInfo.resourceIdOrFullName = StringUtils.EMPTY; String[] segments = StringUtils.split(uri.toString(), Constants.Properties.PATH_SEPARATOR); if (segments == null || segments.length < 1) { throw new IllegalArgumentException(RMResources.InvalidUrl); } String pathAndQuery = StringUtils.EMPTY ; if(StringUtils.isNotEmpty(uri.getPath())) { pathAndQuery+= uri.getPath(); } if(StringUtils.isNotEmpty(uri.getQuery())) { pathAndQuery+="?"; pathAndQuery+= uri.getQuery(); } if (!PathsHelper.tryParsePathSegments(pathAndQuery, pathInfo, null)) { pathInfo.resourcePath = StringUtils.EMPTY; pathInfo.resourceIdOrFullName = StringUtils.EMPTY; } } }
Should be at one place, but there are lot of references in the code, where AsyncDocumentClient.build() is being used directly without CosmosClientBuilder, so didn't want to take chances and leave this check.
public AsyncDocumentClient build() { ifThrowIllegalArgException(this.serviceEndpoint == null, "cannot build client without service endpoint"); ifThrowIllegalArgException( this.masterKeyOrResourceToken == null && (permissionFeed == null || permissionFeed.isEmpty()) && this.tokenResolver == null && this.cosmosKeyCredential == null, "cannot build client without any one of masterKey, " + "resource token, permissionFeed, tokenResolver and cosmos key credential"); ifThrowIllegalArgException(cosmosKeyCredential != null && StringUtils.isEmpty(cosmosKeyCredential.getMasterKey()), "cannot build client without key credential"); RxDocumentClientImpl client = new RxDocumentClientImpl(serviceEndpoint, masterKeyOrResourceToken, permissionFeed, connectionPolicy, desiredConsistencyLevel, configs, tokenResolver, cosmosKeyCredential); client.init(); return client; }
this.masterKeyOrResourceToken == null && (permissionFeed == null || permissionFeed.isEmpty())
public AsyncDocumentClient build() { ifThrowIllegalArgException(this.serviceEndpoint == null, "cannot build client without service endpoint"); ifThrowIllegalArgException( this.masterKeyOrResourceToken == null && (permissionFeed == null || permissionFeed.isEmpty()) && this.tokenResolver == null && this.cosmosKeyCredential == null, "cannot build client without any one of masterKey, " + "resource token, permissionFeed, tokenResolver and cosmos key credential"); ifThrowIllegalArgException(cosmosKeyCredential != null && StringUtils.isEmpty(cosmosKeyCredential.key()), "cannot build client without key credential"); RxDocumentClientImpl client = new RxDocumentClientImpl(serviceEndpoint, masterKeyOrResourceToken, permissionFeed, connectionPolicy, desiredConsistencyLevel, configs, tokenResolver, cosmosKeyCredential); client.init(); return client; }
class Builder { Configs configs = new Configs(); ConnectionPolicy connectionPolicy; ConsistencyLevel desiredConsistencyLevel; List<Permission> permissionFeed; String masterKeyOrResourceToken; URI serviceEndpoint; TokenResolver tokenResolver; CosmosKeyCredential cosmosKeyCredential; public Builder withServiceEndpoint(String serviceEndpoint) { try { this.serviceEndpoint = new URI(serviceEndpoint); } catch (URISyntaxException e) { throw new IllegalArgumentException(e.getMessage()); } return this; } /** * New method withMasterKeyOrResourceToken will take either master key or resource token * and perform authentication for accessing resource. * * @param masterKeyOrResourceToken MasterKey or resourceToken for authentication. * @return current Builder. * @deprecated use {@link */ @Deprecated public Builder withMasterKey(String masterKeyOrResourceToken) { this.masterKeyOrResourceToken = masterKeyOrResourceToken; return this; } /** * This method will accept the master key , additionally it can also consume * resource token too for authentication. * * @param masterKeyOrResourceToken MasterKey or resourceToken for authentication. * @return current Builder. */ public Builder withMasterKeyOrResourceToken(String masterKeyOrResourceToken) { this.masterKeyOrResourceToken = masterKeyOrResourceToken; return this; } /** * This method will accept the permission list , which contains the * resource tokens needed to access resources. * * @param permissionFeed Permission list for authentication. * @return current Builder. */ public Builder withPermissionFeed(List<Permission> permissionFeed) { this.permissionFeed = permissionFeed; return this; } public Builder withConsistencyLevel(ConsistencyLevel desiredConsistencyLevel) { this.desiredConsistencyLevel = desiredConsistencyLevel; return this; } public Builder withConfigs(Configs configs) { this.configs = configs; return this; } public Builder withConnectionPolicy(ConnectionPolicy connectionPolicy) { this.connectionPolicy = connectionPolicy; return this; } public Builder withCosmosKeyCredential(CosmosKeyCredential cosmosKeyCredential) { if (cosmosKeyCredential != null && StringUtils.isEmpty(cosmosKeyCredential.getMasterKey())) { throw new IllegalArgumentException("Cannot build client with empty key credential"); } this.cosmosKeyCredential = cosmosKeyCredential; return this; } /** * This method will accept functional interface TokenResolver which helps in generation authorization * token per request. AsyncDocumentClient can be successfully initialized with this API without passing any MasterKey, ResourceToken or PermissionFeed. * @param tokenResolver The tokenResolver * @return current Builder. */ public Builder withTokenResolver(TokenResolver tokenResolver) { this.tokenResolver = tokenResolver; return this; } private void ifThrowIllegalArgException(boolean value, String error) { if (value) { throw new IllegalArgumentException(error); } } public Configs getConfigs() { return configs; } public void setConfigs(Configs configs) { this.configs = configs; } public ConnectionPolicy getConnectionPolicy() { return connectionPolicy; } public void setConnectionPolicy(ConnectionPolicy connectionPolicy) { this.connectionPolicy = connectionPolicy; } public ConsistencyLevel getDesiredConsistencyLevel() { return desiredConsistencyLevel; } public void setDesiredConsistencyLevel(ConsistencyLevel desiredConsistencyLevel) { this.desiredConsistencyLevel = desiredConsistencyLevel; } public List<Permission> getPermissionFeed() { return permissionFeed; } public void setPermissionFeed(List<Permission> permissionFeed) { this.permissionFeed = permissionFeed; } public String getMasterKeyOrResourceToken() { return masterKeyOrResourceToken; } public void setMasterKeyOrResourceToken(String masterKeyOrResourceToken) { this.masterKeyOrResourceToken = masterKeyOrResourceToken; } public URI getServiceEndpoint() { return serviceEndpoint; } public void setServiceEndpoint(URI serviceEndpoint) { this.serviceEndpoint = serviceEndpoint; } public TokenResolver getTokenResolver() { return tokenResolver; } public void setTokenResolver(TokenResolver tokenResolver) { this.tokenResolver = tokenResolver; } public CosmosKeyCredential getCosmosKeyCredential() { return cosmosKeyCredential; } public void setCosmosKeyCredential(CosmosKeyCredential cosmosKeyCredential) { this.cosmosKeyCredential = cosmosKeyCredential; } }
class Builder { Configs configs = new Configs(); ConnectionPolicy connectionPolicy; ConsistencyLevel desiredConsistencyLevel; List<Permission> permissionFeed; String masterKeyOrResourceToken; URI serviceEndpoint; TokenResolver tokenResolver; CosmosKeyCredential cosmosKeyCredential; public Builder withServiceEndpoint(String serviceEndpoint) { try { this.serviceEndpoint = new URI(serviceEndpoint); } catch (URISyntaxException e) { throw new IllegalArgumentException(e.getMessage()); } return this; } /** * New method withMasterKeyOrResourceToken will take either master key or resource token * and perform authentication for accessing resource. * * @param masterKeyOrResourceToken MasterKey or resourceToken for authentication. * @return current Builder. * @deprecated use {@link */ @Deprecated public Builder withMasterKey(String masterKeyOrResourceToken) { this.masterKeyOrResourceToken = masterKeyOrResourceToken; return this; } /** * This method will accept the master key , additionally it can also consume * resource token too for authentication. * * @param masterKeyOrResourceToken MasterKey or resourceToken for authentication. * @return current Builder. */ public Builder withMasterKeyOrResourceToken(String masterKeyOrResourceToken) { this.masterKeyOrResourceToken = masterKeyOrResourceToken; return this; } /** * This method will accept the permission list , which contains the * resource tokens needed to access resources. * * @param permissionFeed Permission list for authentication. * @return current Builder. */ public Builder withPermissionFeed(List<Permission> permissionFeed) { this.permissionFeed = permissionFeed; return this; } public Builder withConsistencyLevel(ConsistencyLevel desiredConsistencyLevel) { this.desiredConsistencyLevel = desiredConsistencyLevel; return this; } public Builder withConfigs(Configs configs) { this.configs = configs; return this; } public Builder withConnectionPolicy(ConnectionPolicy connectionPolicy) { this.connectionPolicy = connectionPolicy; return this; } public Builder withCosmosKeyCredential(CosmosKeyCredential cosmosKeyCredential) { if (cosmosKeyCredential != null && StringUtils.isEmpty(cosmosKeyCredential.key())) { throw new IllegalArgumentException("Cannot build client with empty key credential"); } this.cosmosKeyCredential = cosmosKeyCredential; return this; } /** * This method will accept functional interface TokenResolver which helps in generation authorization * token per request. AsyncDocumentClient can be successfully initialized with this API without passing any MasterKey, ResourceToken or PermissionFeed. * @param tokenResolver The tokenResolver * @return current Builder. */ public Builder withTokenResolver(TokenResolver tokenResolver) { this.tokenResolver = tokenResolver; return this; } private void ifThrowIllegalArgException(boolean value, String error) { if (value) { throw new IllegalArgumentException(error); } } public Configs getConfigs() { return configs; } public void setConfigs(Configs configs) { this.configs = configs; } public ConnectionPolicy getConnectionPolicy() { return connectionPolicy; } public void setConnectionPolicy(ConnectionPolicy connectionPolicy) { this.connectionPolicy = connectionPolicy; } public ConsistencyLevel getDesiredConsistencyLevel() { return desiredConsistencyLevel; } public void setDesiredConsistencyLevel(ConsistencyLevel desiredConsistencyLevel) { this.desiredConsistencyLevel = desiredConsistencyLevel; } public List<Permission> getPermissionFeed() { return permissionFeed; } public void setPermissionFeed(List<Permission> permissionFeed) { this.permissionFeed = permissionFeed; } public String getMasterKeyOrResourceToken() { return masterKeyOrResourceToken; } public void setMasterKeyOrResourceToken(String masterKeyOrResourceToken) { this.masterKeyOrResourceToken = masterKeyOrResourceToken; } public URI getServiceEndpoint() { return serviceEndpoint; } public void setServiceEndpoint(URI serviceEndpoint) { this.serviceEndpoint = serviceEndpoint; } public TokenResolver getTokenResolver() { return tokenResolver; } public void setTokenResolver(TokenResolver tokenResolver) { this.tokenResolver = tokenResolver; } public CosmosKeyCredential getCosmosKeyCredential() { return cosmosKeyCredential; } }
Good point, but the reason for hashCode comparison was because of the fact that we don't want to store masterKey in BaseAuthorizationTokenProvider, because we always want to use cosmosKeyCredential reference to get the master key. But as you mentioned, string comparison is faster, I can store the master key and won't use it for any other thing than comparison. Makes sense ?
private Mac getMacInstance() { int masterKeyLatestHashCode = this.cosmosKeyCredential.getMasterKey().hashCode(); if (masterKeyLatestHashCode != this.masterKeyHashCode) { byte[] masterKeyBytes = this.cosmosKeyCredential.getMasterKey().getBytes(); byte[] masterKeyDecodedBytes = Utils.Base64Decoder.decode(masterKeyBytes); SecretKey signingKey = new SecretKeySpec(masterKeyDecodedBytes, "HMACSHA256"); try { Mac macInstance = Mac.getInstance("HMACSHA256"); macInstance.init(signingKey); this.masterKeyHashCode = masterKeyLatestHashCode; return macInstance; } catch (NoSuchAlgorithmException | InvalidKeyException e) { throw new IllegalStateException(e); } } else { try { return (Mac)this.macInstance.clone(); } catch (CloneNotSupportedException e) { throw new IllegalStateException(e); } } }
int masterKeyLatestHashCode = this.cosmosKeyCredential.getMasterKey().hashCode();
private Mac getMacInstance() { int masterKeyLatestHashCode = this.cosmosKeyCredential.keyHashCode(); if (masterKeyLatestHashCode != this.masterKeyHashCode) { byte[] masterKeyBytes = this.cosmosKeyCredential.key().getBytes(); byte[] masterKeyDecodedBytes = Utils.Base64Decoder.decode(masterKeyBytes); SecretKey signingKey = new SecretKeySpec(masterKeyDecodedBytes, "HMACSHA256"); try { Mac macInstance = Mac.getInstance("HMACSHA256"); macInstance.init(signingKey); this.masterKeyHashCode = masterKeyLatestHashCode; return macInstance; } catch (NoSuchAlgorithmException | InvalidKeyException e) { throw new IllegalStateException(e); } } else { try { return (Mac)this.macInstance.clone(); } catch (CloneNotSupportedException e) { throw new IllegalStateException(e); } } }
class BaseAuthorizationTokenProvider implements AuthorizationTokenProvider { private static final String AUTH_PREFIX = "type=master&ver=1.0&sig="; private final CosmosKeyCredential cosmosKeyCredential; private Mac macInstance; private int masterKeyHashCode; public BaseAuthorizationTokenProvider(CosmosKeyCredential cosmosKeyCredential) { this.cosmosKeyCredential = cosmosKeyCredential; this.macInstance = getMacInstance(); } private static String getResourceSegment(ResourceType resourceType) { switch (resourceType) { case Attachment: return Paths.ATTACHMENTS_PATH_SEGMENT; case Database: return Paths.DATABASES_PATH_SEGMENT; case Conflict: return Paths.CONFLICTS_PATH_SEGMENT; case Document: return Paths.DOCUMENTS_PATH_SEGMENT; case DocumentCollection: return Paths.COLLECTIONS_PATH_SEGMENT; case Offer: return Paths.OFFERS_PATH_SEGMENT; case Permission: return Paths.PERMISSIONS_PATH_SEGMENT; case StoredProcedure: return Paths.STORED_PROCEDURES_PATH_SEGMENT; case Trigger: return Paths.TRIGGERS_PATH_SEGMENT; case UserDefinedFunction: return Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT; case User: return Paths.USERS_PATH_SEGMENT; case PartitionKeyRange: return Paths.PARTITION_KEY_RANGES_PATH_SEGMENT; case Media: return Paths.MEDIA_PATH_SEGMENT; case DatabaseAccount: return ""; default: return null; } } /** * This API is a helper method to create auth header based on client request using masterkey. * * @param verb the verb. * @param resourceIdOrFullName the resource id or full name * @param resourceType the resource type. * @param headers the request headers. * @return the key authorization signature. */ public String generateKeyAuthorizationSignature(String verb, String resourceIdOrFullName, ResourceType resourceType, Map<String, String> headers) { return this.generateKeyAuthorizationSignature(verb, resourceIdOrFullName, BaseAuthorizationTokenProvider.getResourceSegment(resourceType).toLowerCase(), headers); } /** * This API is a helper method to create auth header based on client request using masterkey. * * @param verb the verb * @param resourceIdOrFullName the resource id or full name * @param resourceSegment the resource segment * @param headers the request headers * @return the key authorization signature */ public String generateKeyAuthorizationSignature(String verb, String resourceIdOrFullName, String resourceSegment, Map<String, String> headers) { if (verb == null || verb.isEmpty()) { throw new IllegalArgumentException("verb"); } if (resourceIdOrFullName == null) { resourceIdOrFullName = ""; } if (resourceSegment == null) { throw new IllegalArgumentException("resourceSegment"); } if (headers == null) { throw new IllegalArgumentException("headers"); } if (StringUtils.isEmpty(this.cosmosKeyCredential.getMasterKey())) { throw new IllegalArgumentException("key credentials cannot be empty"); } if(!PathsHelper.isNameBased(resourceIdOrFullName)) { resourceIdOrFullName = resourceIdOrFullName.toLowerCase(Locale.ROOT); } StringBuilder body = new StringBuilder(); body.append(verb.toLowerCase()) .append('\n') .append(resourceSegment) .append('\n') .append(resourceIdOrFullName) .append('\n'); if (headers.containsKey(HttpConstants.HttpHeaders.X_DATE)) { body.append(headers.get(HttpConstants.HttpHeaders.X_DATE).toLowerCase()); } body.append('\n'); if (headers.containsKey(HttpConstants.HttpHeaders.HTTP_DATE)) { body.append(headers.get(HttpConstants.HttpHeaders.HTTP_DATE).toLowerCase()); } body.append('\n'); Mac mac = getMacInstance(); byte[] digest = mac.doFinal(body.toString().getBytes()); String auth = Utils.encodeBase64String(digest); return AUTH_PREFIX + auth; } /** * This API is a helper method to create auth header based on client request using resourceTokens. * * @param resourceTokens the resource tokens. * @param path the path. * @param resourceId the resource id. * @return the authorization token. */ public String getAuthorizationTokenUsingResourceTokens(Map<String, String> resourceTokens, String path, String resourceId) { if (resourceTokens == null) { throw new IllegalArgumentException("resourceTokens"); } String resourceToken = null; if (resourceTokens.containsKey(resourceId) && resourceTokens.get(resourceId) != null) { resourceToken = resourceTokens.get(resourceId); } else if (StringUtils.isEmpty(path) || StringUtils.isEmpty(resourceId)) { if (resourceTokens.size() > 0) { resourceToken = resourceTokens.values().iterator().next(); } } else { String[] pathParts = StringUtils.split(path, "/"); String[] resourceTypes = {"dbs", "colls", "docs", "sprocs", "udfs", "triggers", "users", "permissions", "attachments", "media", "conflicts"}; HashSet<String> resourceTypesSet = new HashSet<String>(); Collections.addAll(resourceTypesSet, resourceTypes); for (int i = pathParts.length - 1; i >= 0; --i) { if (!resourceTypesSet.contains(pathParts[i]) && resourceTokens.containsKey(pathParts[i])) { resourceToken = resourceTokens.get(pathParts[i]); } } } return resourceToken; } public String generateKeyAuthorizationSignature(String verb, URI uri, Map<String, String> headers) { if (StringUtils.isEmpty(verb)) { throw new IllegalArgumentException(String.format(RMResources.StringArgumentNullOrEmpty, "verb")); } if (uri == null) { throw new IllegalArgumentException("uri"); } if (headers == null) { throw new IllegalArgumentException("headers"); } PathInfo pathInfo = new PathInfo(false, StringUtils.EMPTY, StringUtils.EMPTY, false); getResourceTypeAndIdOrFullName(uri, pathInfo); return generateKeyAuthorizationSignatureNew(verb, pathInfo.resourceIdOrFullName, pathInfo.resourcePath, headers); } private String generateKeyAuthorizationSignatureNew(String verb, String resourceIdValue, String resourceType, Map<String, String> headers) { if (StringUtils.isEmpty(verb)) { throw new IllegalArgumentException(String.format(RMResources.StringArgumentNullOrEmpty, "verb")); } if (resourceType == null) { throw new IllegalArgumentException(String.format(RMResources.StringArgumentNullOrEmpty, "resourceType")); } if (headers == null) { throw new IllegalArgumentException("headers"); } String authResourceId = getAuthorizationResourceIdOrFullName(resourceType, resourceIdValue); String payLoad = generateMessagePayload(verb, authResourceId, resourceType, headers); Mac mac = this.getMacInstance(); byte[] digest = mac.doFinal(payLoad.getBytes()); String authorizationToken = Utils.encodeBase64String(digest); String authtoken = AUTH_PREFIX + authorizationToken; return HttpUtils.urlEncode(authtoken); } private String generateMessagePayload(String verb, String resourceId, String resourceType, Map<String, String> headers) { String xDate = headers.get(HttpConstants.HttpHeaders.X_DATE); String date = headers.get(HttpConstants.HttpHeaders.HTTP_DATE); if (StringUtils.isEmpty(xDate) && (StringUtils.isEmpty(date) || StringUtils.isWhitespace(date))) { headers.put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123()); xDate = Utils.nowAsRFC1123(); } if (!PathsHelper.isNameBased(resourceId)) { resourceId = resourceId.toLowerCase(); } StringBuilder payload = new StringBuilder(); payload.append(verb.toLowerCase()) .append('\n') .append(resourceType.toLowerCase()) .append('\n') .append(resourceId) .append('\n') .append(xDate.toLowerCase()) .append('\n') .append(StringUtils.isEmpty(xDate) ? date.toLowerCase() : "") .append('\n'); return payload.toString(); } private String getAuthorizationResourceIdOrFullName(String resourceType, String resourceIdOrFullName) { if (StringUtils.isEmpty(resourceType) || StringUtils.isEmpty(resourceIdOrFullName)) { return resourceIdOrFullName; } if (PathsHelper.isNameBased(resourceIdOrFullName)) { return resourceIdOrFullName; } if (resourceType.equalsIgnoreCase(Paths.OFFERS_PATH_SEGMENT) || resourceType.equalsIgnoreCase(Paths.PARTITIONS_PATH_SEGMENT) || resourceType.equalsIgnoreCase(Paths.TOPOLOGY_PATH_SEGMENT) || resourceType.equalsIgnoreCase(Paths.RID_RANGE_PATH_SEGMENT)) { return resourceIdOrFullName; } ResourceId parsedRId = ResourceId.parse(resourceIdOrFullName); if (resourceType.equalsIgnoreCase(Paths.DATABASES_PATH_SEGMENT)) { return parsedRId.getDatabaseId().toString(); } else if (resourceType.equalsIgnoreCase(Paths.USERS_PATH_SEGMENT)) { return parsedRId.getUserId().toString(); } else if (resourceType.equalsIgnoreCase(Paths.COLLECTIONS_PATH_SEGMENT)) { return parsedRId.getDocumentCollectionId().toString(); } else if (resourceType.equalsIgnoreCase(Paths.DOCUMENTS_PATH_SEGMENT)) { return parsedRId.getDocumentId().toString(); } else { return resourceIdOrFullName; } } private void getResourceTypeAndIdOrFullName(URI uri, PathInfo pathInfo) { if (uri == null) { throw new IllegalArgumentException("uri"); } pathInfo.resourcePath = StringUtils.EMPTY; pathInfo.resourceIdOrFullName = StringUtils.EMPTY; String[] segments = StringUtils.split(uri.toString(), Constants.Properties.PATH_SEPARATOR); if (segments == null || segments.length < 1) { throw new IllegalArgumentException(RMResources.InvalidUrl); } String pathAndQuery = StringUtils.EMPTY ; if(StringUtils.isNotEmpty(uri.getPath())) { pathAndQuery+= uri.getPath(); } if(StringUtils.isNotEmpty(uri.getQuery())) { pathAndQuery+="?"; pathAndQuery+= uri.getQuery(); } if (!PathsHelper.tryParsePathSegments(pathAndQuery, pathInfo, null)) { pathInfo.resourcePath = StringUtils.EMPTY; pathInfo.resourceIdOrFullName = StringUtils.EMPTY; } } }
class BaseAuthorizationTokenProvider implements AuthorizationTokenProvider { private static final String AUTH_PREFIX = "type=master&ver=1.0&sig="; private final CosmosKeyCredential cosmosKeyCredential; private final Mac macInstance; private int masterKeyHashCode; public BaseAuthorizationTokenProvider(CosmosKeyCredential cosmosKeyCredential) { this.cosmosKeyCredential = cosmosKeyCredential; this.macInstance = getMacInstance(); } private static String getResourceSegment(ResourceType resourceType) { switch (resourceType) { case Attachment: return Paths.ATTACHMENTS_PATH_SEGMENT; case Database: return Paths.DATABASES_PATH_SEGMENT; case Conflict: return Paths.CONFLICTS_PATH_SEGMENT; case Document: return Paths.DOCUMENTS_PATH_SEGMENT; case DocumentCollection: return Paths.COLLECTIONS_PATH_SEGMENT; case Offer: return Paths.OFFERS_PATH_SEGMENT; case Permission: return Paths.PERMISSIONS_PATH_SEGMENT; case StoredProcedure: return Paths.STORED_PROCEDURES_PATH_SEGMENT; case Trigger: return Paths.TRIGGERS_PATH_SEGMENT; case UserDefinedFunction: return Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT; case User: return Paths.USERS_PATH_SEGMENT; case PartitionKeyRange: return Paths.PARTITION_KEY_RANGES_PATH_SEGMENT; case Media: return Paths.MEDIA_PATH_SEGMENT; case DatabaseAccount: return ""; default: return null; } } /** * This API is a helper method to create auth header based on client request using masterkey. * * @param verb the verb. * @param resourceIdOrFullName the resource id or full name * @param resourceType the resource type. * @param headers the request headers. * @return the key authorization signature. */ public String generateKeyAuthorizationSignature(String verb, String resourceIdOrFullName, ResourceType resourceType, Map<String, String> headers) { return this.generateKeyAuthorizationSignature(verb, resourceIdOrFullName, BaseAuthorizationTokenProvider.getResourceSegment(resourceType).toLowerCase(), headers); } /** * This API is a helper method to create auth header based on client request using masterkey. * * @param verb the verb * @param resourceIdOrFullName the resource id or full name * @param resourceSegment the resource segment * @param headers the request headers * @return the key authorization signature */ public String generateKeyAuthorizationSignature(String verb, String resourceIdOrFullName, String resourceSegment, Map<String, String> headers) { if (verb == null || verb.isEmpty()) { throw new IllegalArgumentException("verb"); } if (resourceIdOrFullName == null) { resourceIdOrFullName = ""; } if (resourceSegment == null) { throw new IllegalArgumentException("resourceSegment"); } if (headers == null) { throw new IllegalArgumentException("headers"); } if (StringUtils.isEmpty(this.cosmosKeyCredential.key())) { throw new IllegalArgumentException("key credentials cannot be empty"); } if(!PathsHelper.isNameBased(resourceIdOrFullName)) { resourceIdOrFullName = resourceIdOrFullName.toLowerCase(Locale.ROOT); } StringBuilder body = new StringBuilder(); body.append(verb.toLowerCase()) .append('\n') .append(resourceSegment) .append('\n') .append(resourceIdOrFullName) .append('\n'); if (headers.containsKey(HttpConstants.HttpHeaders.X_DATE)) { body.append(headers.get(HttpConstants.HttpHeaders.X_DATE).toLowerCase()); } body.append('\n'); if (headers.containsKey(HttpConstants.HttpHeaders.HTTP_DATE)) { body.append(headers.get(HttpConstants.HttpHeaders.HTTP_DATE).toLowerCase()); } body.append('\n'); Mac mac = getMacInstance(); byte[] digest = mac.doFinal(body.toString().getBytes()); String auth = Utils.encodeBase64String(digest); return AUTH_PREFIX + auth; } /** * This API is a helper method to create auth header based on client request using resourceTokens. * * @param resourceTokens the resource tokens. * @param path the path. * @param resourceId the resource id. * @return the authorization token. */ public String getAuthorizationTokenUsingResourceTokens(Map<String, String> resourceTokens, String path, String resourceId) { if (resourceTokens == null) { throw new IllegalArgumentException("resourceTokens"); } String resourceToken = null; if (resourceTokens.containsKey(resourceId) && resourceTokens.get(resourceId) != null) { resourceToken = resourceTokens.get(resourceId); } else if (StringUtils.isEmpty(path) || StringUtils.isEmpty(resourceId)) { if (resourceTokens.size() > 0) { resourceToken = resourceTokens.values().iterator().next(); } } else { String[] pathParts = StringUtils.split(path, "/"); String[] resourceTypes = {"dbs", "colls", "docs", "sprocs", "udfs", "triggers", "users", "permissions", "attachments", "media", "conflicts"}; HashSet<String> resourceTypesSet = new HashSet<String>(); Collections.addAll(resourceTypesSet, resourceTypes); for (int i = pathParts.length - 1; i >= 0; --i) { if (!resourceTypesSet.contains(pathParts[i]) && resourceTokens.containsKey(pathParts[i])) { resourceToken = resourceTokens.get(pathParts[i]); } } } return resourceToken; } public String generateKeyAuthorizationSignature(String verb, URI uri, Map<String, String> headers) { if (StringUtils.isEmpty(verb)) { throw new IllegalArgumentException(String.format(RMResources.StringArgumentNullOrEmpty, "verb")); } if (uri == null) { throw new IllegalArgumentException("uri"); } if (headers == null) { throw new IllegalArgumentException("headers"); } PathInfo pathInfo = new PathInfo(false, StringUtils.EMPTY, StringUtils.EMPTY, false); getResourceTypeAndIdOrFullName(uri, pathInfo); return generateKeyAuthorizationSignatureNew(verb, pathInfo.resourceIdOrFullName, pathInfo.resourcePath, headers); } private String generateKeyAuthorizationSignatureNew(String verb, String resourceIdValue, String resourceType, Map<String, String> headers) { if (StringUtils.isEmpty(verb)) { throw new IllegalArgumentException(String.format(RMResources.StringArgumentNullOrEmpty, "verb")); } if (resourceType == null) { throw new IllegalArgumentException(String.format(RMResources.StringArgumentNullOrEmpty, "resourceType")); } if (headers == null) { throw new IllegalArgumentException("headers"); } String authResourceId = getAuthorizationResourceIdOrFullName(resourceType, resourceIdValue); String payLoad = generateMessagePayload(verb, authResourceId, resourceType, headers); Mac mac = this.getMacInstance(); byte[] digest = mac.doFinal(payLoad.getBytes()); String authorizationToken = Utils.encodeBase64String(digest); String authtoken = AUTH_PREFIX + authorizationToken; return HttpUtils.urlEncode(authtoken); } private String generateMessagePayload(String verb, String resourceId, String resourceType, Map<String, String> headers) { String xDate = headers.get(HttpConstants.HttpHeaders.X_DATE); String date = headers.get(HttpConstants.HttpHeaders.HTTP_DATE); if (StringUtils.isEmpty(xDate) && (StringUtils.isEmpty(date) || StringUtils.isWhitespace(date))) { headers.put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123()); xDate = Utils.nowAsRFC1123(); } if (!PathsHelper.isNameBased(resourceId)) { resourceId = resourceId.toLowerCase(); } StringBuilder payload = new StringBuilder(); payload.append(verb.toLowerCase()) .append('\n') .append(resourceType.toLowerCase()) .append('\n') .append(resourceId) .append('\n') .append(xDate.toLowerCase()) .append('\n') .append(StringUtils.isEmpty(xDate) ? date.toLowerCase() : "") .append('\n'); return payload.toString(); } private String getAuthorizationResourceIdOrFullName(String resourceType, String resourceIdOrFullName) { if (StringUtils.isEmpty(resourceType) || StringUtils.isEmpty(resourceIdOrFullName)) { return resourceIdOrFullName; } if (PathsHelper.isNameBased(resourceIdOrFullName)) { return resourceIdOrFullName; } if (resourceType.equalsIgnoreCase(Paths.OFFERS_PATH_SEGMENT) || resourceType.equalsIgnoreCase(Paths.PARTITIONS_PATH_SEGMENT) || resourceType.equalsIgnoreCase(Paths.TOPOLOGY_PATH_SEGMENT) || resourceType.equalsIgnoreCase(Paths.RID_RANGE_PATH_SEGMENT)) { return resourceIdOrFullName; } ResourceId parsedRId = ResourceId.parse(resourceIdOrFullName); if (resourceType.equalsIgnoreCase(Paths.DATABASES_PATH_SEGMENT)) { return parsedRId.getDatabaseId().toString(); } else if (resourceType.equalsIgnoreCase(Paths.USERS_PATH_SEGMENT)) { return parsedRId.getUserId().toString(); } else if (resourceType.equalsIgnoreCase(Paths.COLLECTIONS_PATH_SEGMENT)) { return parsedRId.getDocumentCollectionId().toString(); } else if (resourceType.equalsIgnoreCase(Paths.DOCUMENTS_PATH_SEGMENT)) { return parsedRId.getDocumentId().toString(); } else { return resourceIdOrFullName; } } private void getResourceTypeAndIdOrFullName(URI uri, PathInfo pathInfo) { if (uri == null) { throw new IllegalArgumentException("uri"); } pathInfo.resourcePath = StringUtils.EMPTY; pathInfo.resourceIdOrFullName = StringUtils.EMPTY; String[] segments = StringUtils.split(uri.toString(), Constants.Properties.PATH_SEPARATOR); if (segments == null || segments.length < 1) { throw new IllegalArgumentException(RMResources.InvalidUrl); } String pathAndQuery = StringUtils.EMPTY ; if(StringUtils.isNotEmpty(uri.getPath())) { pathAndQuery+= uri.getPath(); } if(StringUtils.isNotEmpty(uri.getQuery())) { pathAndQuery+="?"; pathAndQuery+= uri.getQuery(); } if (!PathsHelper.tryParsePathSegments(pathAndQuery, pathInfo, null)) { pathInfo.resourcePath = StringUtils.EMPTY; pathInfo.resourceIdOrFullName = StringUtils.EMPTY; } } }
do no-op on this. Thanks for the explanation.
private Mac getMacInstance() { int masterKeyLatestHashCode = this.cosmosKeyCredential.getMasterKey().hashCode(); if (masterKeyLatestHashCode != this.masterKeyHashCode) { byte[] masterKeyBytes = this.cosmosKeyCredential.getMasterKey().getBytes(); byte[] masterKeyDecodedBytes = Utils.Base64Decoder.decode(masterKeyBytes); SecretKey signingKey = new SecretKeySpec(masterKeyDecodedBytes, "HMACSHA256"); try { Mac macInstance = Mac.getInstance("HMACSHA256"); macInstance.init(signingKey); this.masterKeyHashCode = masterKeyLatestHashCode; return macInstance; } catch (NoSuchAlgorithmException | InvalidKeyException e) { throw new IllegalStateException(e); } } else { try { return (Mac)this.macInstance.clone(); } catch (CloneNotSupportedException e) { throw new IllegalStateException(e); } } }
int masterKeyLatestHashCode = this.cosmosKeyCredential.getMasterKey().hashCode();
private Mac getMacInstance() { int masterKeyLatestHashCode = this.cosmosKeyCredential.keyHashCode(); if (masterKeyLatestHashCode != this.masterKeyHashCode) { byte[] masterKeyBytes = this.cosmosKeyCredential.key().getBytes(); byte[] masterKeyDecodedBytes = Utils.Base64Decoder.decode(masterKeyBytes); SecretKey signingKey = new SecretKeySpec(masterKeyDecodedBytes, "HMACSHA256"); try { Mac macInstance = Mac.getInstance("HMACSHA256"); macInstance.init(signingKey); this.masterKeyHashCode = masterKeyLatestHashCode; return macInstance; } catch (NoSuchAlgorithmException | InvalidKeyException e) { throw new IllegalStateException(e); } } else { try { return (Mac)this.macInstance.clone(); } catch (CloneNotSupportedException e) { throw new IllegalStateException(e); } } }
class BaseAuthorizationTokenProvider implements AuthorizationTokenProvider { private static final String AUTH_PREFIX = "type=master&ver=1.0&sig="; private final CosmosKeyCredential cosmosKeyCredential; private Mac macInstance; private int masterKeyHashCode; public BaseAuthorizationTokenProvider(CosmosKeyCredential cosmosKeyCredential) { this.cosmosKeyCredential = cosmosKeyCredential; this.macInstance = getMacInstance(); } private static String getResourceSegment(ResourceType resourceType) { switch (resourceType) { case Attachment: return Paths.ATTACHMENTS_PATH_SEGMENT; case Database: return Paths.DATABASES_PATH_SEGMENT; case Conflict: return Paths.CONFLICTS_PATH_SEGMENT; case Document: return Paths.DOCUMENTS_PATH_SEGMENT; case DocumentCollection: return Paths.COLLECTIONS_PATH_SEGMENT; case Offer: return Paths.OFFERS_PATH_SEGMENT; case Permission: return Paths.PERMISSIONS_PATH_SEGMENT; case StoredProcedure: return Paths.STORED_PROCEDURES_PATH_SEGMENT; case Trigger: return Paths.TRIGGERS_PATH_SEGMENT; case UserDefinedFunction: return Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT; case User: return Paths.USERS_PATH_SEGMENT; case PartitionKeyRange: return Paths.PARTITION_KEY_RANGES_PATH_SEGMENT; case Media: return Paths.MEDIA_PATH_SEGMENT; case DatabaseAccount: return ""; default: return null; } } /** * This API is a helper method to create auth header based on client request using masterkey. * * @param verb the verb. * @param resourceIdOrFullName the resource id or full name * @param resourceType the resource type. * @param headers the request headers. * @return the key authorization signature. */ public String generateKeyAuthorizationSignature(String verb, String resourceIdOrFullName, ResourceType resourceType, Map<String, String> headers) { return this.generateKeyAuthorizationSignature(verb, resourceIdOrFullName, BaseAuthorizationTokenProvider.getResourceSegment(resourceType).toLowerCase(), headers); } /** * This API is a helper method to create auth header based on client request using masterkey. * * @param verb the verb * @param resourceIdOrFullName the resource id or full name * @param resourceSegment the resource segment * @param headers the request headers * @return the key authorization signature */ public String generateKeyAuthorizationSignature(String verb, String resourceIdOrFullName, String resourceSegment, Map<String, String> headers) { if (verb == null || verb.isEmpty()) { throw new IllegalArgumentException("verb"); } if (resourceIdOrFullName == null) { resourceIdOrFullName = ""; } if (resourceSegment == null) { throw new IllegalArgumentException("resourceSegment"); } if (headers == null) { throw new IllegalArgumentException("headers"); } if (StringUtils.isEmpty(this.cosmosKeyCredential.getMasterKey())) { throw new IllegalArgumentException("key credentials cannot be empty"); } if(!PathsHelper.isNameBased(resourceIdOrFullName)) { resourceIdOrFullName = resourceIdOrFullName.toLowerCase(Locale.ROOT); } StringBuilder body = new StringBuilder(); body.append(verb.toLowerCase()) .append('\n') .append(resourceSegment) .append('\n') .append(resourceIdOrFullName) .append('\n'); if (headers.containsKey(HttpConstants.HttpHeaders.X_DATE)) { body.append(headers.get(HttpConstants.HttpHeaders.X_DATE).toLowerCase()); } body.append('\n'); if (headers.containsKey(HttpConstants.HttpHeaders.HTTP_DATE)) { body.append(headers.get(HttpConstants.HttpHeaders.HTTP_DATE).toLowerCase()); } body.append('\n'); Mac mac = getMacInstance(); byte[] digest = mac.doFinal(body.toString().getBytes()); String auth = Utils.encodeBase64String(digest); return AUTH_PREFIX + auth; } /** * This API is a helper method to create auth header based on client request using resourceTokens. * * @param resourceTokens the resource tokens. * @param path the path. * @param resourceId the resource id. * @return the authorization token. */ public String getAuthorizationTokenUsingResourceTokens(Map<String, String> resourceTokens, String path, String resourceId) { if (resourceTokens == null) { throw new IllegalArgumentException("resourceTokens"); } String resourceToken = null; if (resourceTokens.containsKey(resourceId) && resourceTokens.get(resourceId) != null) { resourceToken = resourceTokens.get(resourceId); } else if (StringUtils.isEmpty(path) || StringUtils.isEmpty(resourceId)) { if (resourceTokens.size() > 0) { resourceToken = resourceTokens.values().iterator().next(); } } else { String[] pathParts = StringUtils.split(path, "/"); String[] resourceTypes = {"dbs", "colls", "docs", "sprocs", "udfs", "triggers", "users", "permissions", "attachments", "media", "conflicts"}; HashSet<String> resourceTypesSet = new HashSet<String>(); Collections.addAll(resourceTypesSet, resourceTypes); for (int i = pathParts.length - 1; i >= 0; --i) { if (!resourceTypesSet.contains(pathParts[i]) && resourceTokens.containsKey(pathParts[i])) { resourceToken = resourceTokens.get(pathParts[i]); } } } return resourceToken; } public String generateKeyAuthorizationSignature(String verb, URI uri, Map<String, String> headers) { if (StringUtils.isEmpty(verb)) { throw new IllegalArgumentException(String.format(RMResources.StringArgumentNullOrEmpty, "verb")); } if (uri == null) { throw new IllegalArgumentException("uri"); } if (headers == null) { throw new IllegalArgumentException("headers"); } PathInfo pathInfo = new PathInfo(false, StringUtils.EMPTY, StringUtils.EMPTY, false); getResourceTypeAndIdOrFullName(uri, pathInfo); return generateKeyAuthorizationSignatureNew(verb, pathInfo.resourceIdOrFullName, pathInfo.resourcePath, headers); } private String generateKeyAuthorizationSignatureNew(String verb, String resourceIdValue, String resourceType, Map<String, String> headers) { if (StringUtils.isEmpty(verb)) { throw new IllegalArgumentException(String.format(RMResources.StringArgumentNullOrEmpty, "verb")); } if (resourceType == null) { throw new IllegalArgumentException(String.format(RMResources.StringArgumentNullOrEmpty, "resourceType")); } if (headers == null) { throw new IllegalArgumentException("headers"); } String authResourceId = getAuthorizationResourceIdOrFullName(resourceType, resourceIdValue); String payLoad = generateMessagePayload(verb, authResourceId, resourceType, headers); Mac mac = this.getMacInstance(); byte[] digest = mac.doFinal(payLoad.getBytes()); String authorizationToken = Utils.encodeBase64String(digest); String authtoken = AUTH_PREFIX + authorizationToken; return HttpUtils.urlEncode(authtoken); } private String generateMessagePayload(String verb, String resourceId, String resourceType, Map<String, String> headers) { String xDate = headers.get(HttpConstants.HttpHeaders.X_DATE); String date = headers.get(HttpConstants.HttpHeaders.HTTP_DATE); if (StringUtils.isEmpty(xDate) && (StringUtils.isEmpty(date) || StringUtils.isWhitespace(date))) { headers.put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123()); xDate = Utils.nowAsRFC1123(); } if (!PathsHelper.isNameBased(resourceId)) { resourceId = resourceId.toLowerCase(); } StringBuilder payload = new StringBuilder(); payload.append(verb.toLowerCase()) .append('\n') .append(resourceType.toLowerCase()) .append('\n') .append(resourceId) .append('\n') .append(xDate.toLowerCase()) .append('\n') .append(StringUtils.isEmpty(xDate) ? date.toLowerCase() : "") .append('\n'); return payload.toString(); } private String getAuthorizationResourceIdOrFullName(String resourceType, String resourceIdOrFullName) { if (StringUtils.isEmpty(resourceType) || StringUtils.isEmpty(resourceIdOrFullName)) { return resourceIdOrFullName; } if (PathsHelper.isNameBased(resourceIdOrFullName)) { return resourceIdOrFullName; } if (resourceType.equalsIgnoreCase(Paths.OFFERS_PATH_SEGMENT) || resourceType.equalsIgnoreCase(Paths.PARTITIONS_PATH_SEGMENT) || resourceType.equalsIgnoreCase(Paths.TOPOLOGY_PATH_SEGMENT) || resourceType.equalsIgnoreCase(Paths.RID_RANGE_PATH_SEGMENT)) { return resourceIdOrFullName; } ResourceId parsedRId = ResourceId.parse(resourceIdOrFullName); if (resourceType.equalsIgnoreCase(Paths.DATABASES_PATH_SEGMENT)) { return parsedRId.getDatabaseId().toString(); } else if (resourceType.equalsIgnoreCase(Paths.USERS_PATH_SEGMENT)) { return parsedRId.getUserId().toString(); } else if (resourceType.equalsIgnoreCase(Paths.COLLECTIONS_PATH_SEGMENT)) { return parsedRId.getDocumentCollectionId().toString(); } else if (resourceType.equalsIgnoreCase(Paths.DOCUMENTS_PATH_SEGMENT)) { return parsedRId.getDocumentId().toString(); } else { return resourceIdOrFullName; } } private void getResourceTypeAndIdOrFullName(URI uri, PathInfo pathInfo) { if (uri == null) { throw new IllegalArgumentException("uri"); } pathInfo.resourcePath = StringUtils.EMPTY; pathInfo.resourceIdOrFullName = StringUtils.EMPTY; String[] segments = StringUtils.split(uri.toString(), Constants.Properties.PATH_SEPARATOR); if (segments == null || segments.length < 1) { throw new IllegalArgumentException(RMResources.InvalidUrl); } String pathAndQuery = StringUtils.EMPTY ; if(StringUtils.isNotEmpty(uri.getPath())) { pathAndQuery+= uri.getPath(); } if(StringUtils.isNotEmpty(uri.getQuery())) { pathAndQuery+="?"; pathAndQuery+= uri.getQuery(); } if (!PathsHelper.tryParsePathSegments(pathAndQuery, pathInfo, null)) { pathInfo.resourcePath = StringUtils.EMPTY; pathInfo.resourceIdOrFullName = StringUtils.EMPTY; } } }
class BaseAuthorizationTokenProvider implements AuthorizationTokenProvider { private static final String AUTH_PREFIX = "type=master&ver=1.0&sig="; private final CosmosKeyCredential cosmosKeyCredential; private final Mac macInstance; private int masterKeyHashCode; public BaseAuthorizationTokenProvider(CosmosKeyCredential cosmosKeyCredential) { this.cosmosKeyCredential = cosmosKeyCredential; this.macInstance = getMacInstance(); } private static String getResourceSegment(ResourceType resourceType) { switch (resourceType) { case Attachment: return Paths.ATTACHMENTS_PATH_SEGMENT; case Database: return Paths.DATABASES_PATH_SEGMENT; case Conflict: return Paths.CONFLICTS_PATH_SEGMENT; case Document: return Paths.DOCUMENTS_PATH_SEGMENT; case DocumentCollection: return Paths.COLLECTIONS_PATH_SEGMENT; case Offer: return Paths.OFFERS_PATH_SEGMENT; case Permission: return Paths.PERMISSIONS_PATH_SEGMENT; case StoredProcedure: return Paths.STORED_PROCEDURES_PATH_SEGMENT; case Trigger: return Paths.TRIGGERS_PATH_SEGMENT; case UserDefinedFunction: return Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT; case User: return Paths.USERS_PATH_SEGMENT; case PartitionKeyRange: return Paths.PARTITION_KEY_RANGES_PATH_SEGMENT; case Media: return Paths.MEDIA_PATH_SEGMENT; case DatabaseAccount: return ""; default: return null; } } /** * This API is a helper method to create auth header based on client request using masterkey. * * @param verb the verb. * @param resourceIdOrFullName the resource id or full name * @param resourceType the resource type. * @param headers the request headers. * @return the key authorization signature. */ public String generateKeyAuthorizationSignature(String verb, String resourceIdOrFullName, ResourceType resourceType, Map<String, String> headers) { return this.generateKeyAuthorizationSignature(verb, resourceIdOrFullName, BaseAuthorizationTokenProvider.getResourceSegment(resourceType).toLowerCase(), headers); } /** * This API is a helper method to create auth header based on client request using masterkey. * * @param verb the verb * @param resourceIdOrFullName the resource id or full name * @param resourceSegment the resource segment * @param headers the request headers * @return the key authorization signature */ public String generateKeyAuthorizationSignature(String verb, String resourceIdOrFullName, String resourceSegment, Map<String, String> headers) { if (verb == null || verb.isEmpty()) { throw new IllegalArgumentException("verb"); } if (resourceIdOrFullName == null) { resourceIdOrFullName = ""; } if (resourceSegment == null) { throw new IllegalArgumentException("resourceSegment"); } if (headers == null) { throw new IllegalArgumentException("headers"); } if (StringUtils.isEmpty(this.cosmosKeyCredential.key())) { throw new IllegalArgumentException("key credentials cannot be empty"); } if(!PathsHelper.isNameBased(resourceIdOrFullName)) { resourceIdOrFullName = resourceIdOrFullName.toLowerCase(Locale.ROOT); } StringBuilder body = new StringBuilder(); body.append(verb.toLowerCase()) .append('\n') .append(resourceSegment) .append('\n') .append(resourceIdOrFullName) .append('\n'); if (headers.containsKey(HttpConstants.HttpHeaders.X_DATE)) { body.append(headers.get(HttpConstants.HttpHeaders.X_DATE).toLowerCase()); } body.append('\n'); if (headers.containsKey(HttpConstants.HttpHeaders.HTTP_DATE)) { body.append(headers.get(HttpConstants.HttpHeaders.HTTP_DATE).toLowerCase()); } body.append('\n'); Mac mac = getMacInstance(); byte[] digest = mac.doFinal(body.toString().getBytes()); String auth = Utils.encodeBase64String(digest); return AUTH_PREFIX + auth; } /** * This API is a helper method to create auth header based on client request using resourceTokens. * * @param resourceTokens the resource tokens. * @param path the path. * @param resourceId the resource id. * @return the authorization token. */ public String getAuthorizationTokenUsingResourceTokens(Map<String, String> resourceTokens, String path, String resourceId) { if (resourceTokens == null) { throw new IllegalArgumentException("resourceTokens"); } String resourceToken = null; if (resourceTokens.containsKey(resourceId) && resourceTokens.get(resourceId) != null) { resourceToken = resourceTokens.get(resourceId); } else if (StringUtils.isEmpty(path) || StringUtils.isEmpty(resourceId)) { if (resourceTokens.size() > 0) { resourceToken = resourceTokens.values().iterator().next(); } } else { String[] pathParts = StringUtils.split(path, "/"); String[] resourceTypes = {"dbs", "colls", "docs", "sprocs", "udfs", "triggers", "users", "permissions", "attachments", "media", "conflicts"}; HashSet<String> resourceTypesSet = new HashSet<String>(); Collections.addAll(resourceTypesSet, resourceTypes); for (int i = pathParts.length - 1; i >= 0; --i) { if (!resourceTypesSet.contains(pathParts[i]) && resourceTokens.containsKey(pathParts[i])) { resourceToken = resourceTokens.get(pathParts[i]); } } } return resourceToken; } public String generateKeyAuthorizationSignature(String verb, URI uri, Map<String, String> headers) { if (StringUtils.isEmpty(verb)) { throw new IllegalArgumentException(String.format(RMResources.StringArgumentNullOrEmpty, "verb")); } if (uri == null) { throw new IllegalArgumentException("uri"); } if (headers == null) { throw new IllegalArgumentException("headers"); } PathInfo pathInfo = new PathInfo(false, StringUtils.EMPTY, StringUtils.EMPTY, false); getResourceTypeAndIdOrFullName(uri, pathInfo); return generateKeyAuthorizationSignatureNew(verb, pathInfo.resourceIdOrFullName, pathInfo.resourcePath, headers); } private String generateKeyAuthorizationSignatureNew(String verb, String resourceIdValue, String resourceType, Map<String, String> headers) { if (StringUtils.isEmpty(verb)) { throw new IllegalArgumentException(String.format(RMResources.StringArgumentNullOrEmpty, "verb")); } if (resourceType == null) { throw new IllegalArgumentException(String.format(RMResources.StringArgumentNullOrEmpty, "resourceType")); } if (headers == null) { throw new IllegalArgumentException("headers"); } String authResourceId = getAuthorizationResourceIdOrFullName(resourceType, resourceIdValue); String payLoad = generateMessagePayload(verb, authResourceId, resourceType, headers); Mac mac = this.getMacInstance(); byte[] digest = mac.doFinal(payLoad.getBytes()); String authorizationToken = Utils.encodeBase64String(digest); String authtoken = AUTH_PREFIX + authorizationToken; return HttpUtils.urlEncode(authtoken); } private String generateMessagePayload(String verb, String resourceId, String resourceType, Map<String, String> headers) { String xDate = headers.get(HttpConstants.HttpHeaders.X_DATE); String date = headers.get(HttpConstants.HttpHeaders.HTTP_DATE); if (StringUtils.isEmpty(xDate) && (StringUtils.isEmpty(date) || StringUtils.isWhitespace(date))) { headers.put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123()); xDate = Utils.nowAsRFC1123(); } if (!PathsHelper.isNameBased(resourceId)) { resourceId = resourceId.toLowerCase(); } StringBuilder payload = new StringBuilder(); payload.append(verb.toLowerCase()) .append('\n') .append(resourceType.toLowerCase()) .append('\n') .append(resourceId) .append('\n') .append(xDate.toLowerCase()) .append('\n') .append(StringUtils.isEmpty(xDate) ? date.toLowerCase() : "") .append('\n'); return payload.toString(); } private String getAuthorizationResourceIdOrFullName(String resourceType, String resourceIdOrFullName) { if (StringUtils.isEmpty(resourceType) || StringUtils.isEmpty(resourceIdOrFullName)) { return resourceIdOrFullName; } if (PathsHelper.isNameBased(resourceIdOrFullName)) { return resourceIdOrFullName; } if (resourceType.equalsIgnoreCase(Paths.OFFERS_PATH_SEGMENT) || resourceType.equalsIgnoreCase(Paths.PARTITIONS_PATH_SEGMENT) || resourceType.equalsIgnoreCase(Paths.TOPOLOGY_PATH_SEGMENT) || resourceType.equalsIgnoreCase(Paths.RID_RANGE_PATH_SEGMENT)) { return resourceIdOrFullName; } ResourceId parsedRId = ResourceId.parse(resourceIdOrFullName); if (resourceType.equalsIgnoreCase(Paths.DATABASES_PATH_SEGMENT)) { return parsedRId.getDatabaseId().toString(); } else if (resourceType.equalsIgnoreCase(Paths.USERS_PATH_SEGMENT)) { return parsedRId.getUserId().toString(); } else if (resourceType.equalsIgnoreCase(Paths.COLLECTIONS_PATH_SEGMENT)) { return parsedRId.getDocumentCollectionId().toString(); } else if (resourceType.equalsIgnoreCase(Paths.DOCUMENTS_PATH_SEGMENT)) { return parsedRId.getDocumentId().toString(); } else { return resourceIdOrFullName; } } private void getResourceTypeAndIdOrFullName(URI uri, PathInfo pathInfo) { if (uri == null) { throw new IllegalArgumentException("uri"); } pathInfo.resourcePath = StringUtils.EMPTY; pathInfo.resourceIdOrFullName = StringUtils.EMPTY; String[] segments = StringUtils.split(uri.toString(), Constants.Properties.PATH_SEPARATOR); if (segments == null || segments.length < 1) { throw new IllegalArgumentException(RMResources.InvalidUrl); } String pathAndQuery = StringUtils.EMPTY ; if(StringUtils.isNotEmpty(uri.getPath())) { pathAndQuery+= uri.getPath(); } if(StringUtils.isNotEmpty(uri.getQuery())) { pathAndQuery+="?"; pathAndQuery+= uri.getQuery(); } if (!PathsHelper.tryParsePathSegments(pathAndQuery, pathInfo, null)) { pathInfo.resourcePath = StringUtils.EMPTY; pathInfo.resourceIdOrFullName = StringUtils.EMPTY; } } }
All other samples are making this extra call for many APIs. Not doing here would be odd.
public void listKeySnippets() { KeyClient keyClient = createClient(); for (KeyBase key : keyClient.listKeys()) { Key keyWithMaterial = keyClient.getKey(key); System.out.printf("Received key with name %s and type %s", keyWithMaterial.name(), keyWithMaterial.keyMaterial().kty()); } for (KeyBase key : keyClient.listKeys(new Context(key2, value2))) { Key keyWithMaterial = keyClient.getKey(key); System.out.printf("Received key with name %s and type %s", keyWithMaterial.name(), keyWithMaterial.keyMaterial().kty()); } keyClient.listKeys().iterableByPage().forEach(resp -> { System.out.printf("Response headers are %s. Url %s and status code %d %n", resp.headers(), resp.request().url(), resp.statusCode()); resp.items().forEach(value -> { Key keyWithMaterial = keyClient.getKey(value); System.out.printf("Received key with name %s and type %s %n", keyWithMaterial.name(), keyWithMaterial.keyMaterial().kty()); }); }); }
Key keyWithMaterial = keyClient.getKey(value);
public void listKeySnippets() { KeyClient keyClient = createClient(); for (KeyBase key : keyClient.listKeys()) { Key keyWithMaterial = keyClient.getKey(key); System.out.printf("Received key with name %s and type %s", keyWithMaterial.name(), keyWithMaterial.keyMaterial().kty()); } for (KeyBase key : keyClient.listKeys(new Context(key2, value2))) { Key keyWithMaterial = keyClient.getKey(key); System.out.printf("Received key with name %s and type %s", keyWithMaterial.name(), keyWithMaterial.keyMaterial().kty()); } keyClient.listKeys().iterableByPage().forEach(resp -> { System.out.printf("Got response headers . Url: %s, Status code: %d %n", resp.request().url(), resp.statusCode()); resp.items().forEach(value -> { Key keyWithMaterial = keyClient.getKey(value); System.out.printf("Received key with name %s and type %s %n", keyWithMaterial.name(), keyWithMaterial.keyMaterial().kty()); }); }); }
class KeyClientJavaDocCodeSnippets { private String key1 = "key1"; private String key2 = "key2"; private String value1 = "val1"; private String value2 = "val2"; /** * Generates code sample for creating a {@link KeyClient} * @return An instance of {@link KeyClient} */ public KeyClient createClient() { KeyClient keyClient = new KeyClientBuilder() .endpoint("https: .credential(new DefaultAzureCredentialBuilder().build()) .buildClient(); return keyClient; } /** * Generates a code sample for using {@link KeyClient */ public void createKey() { KeyClient keyClient = createClient(); Key key = keyClient.createKey("keyName", KeyType.EC); System.out.printf("Key is created with name %s and id %s %n", key.name(), key.id()); KeyCreateOptions keyCreateOptions = new KeyCreateOptions("keyName", KeyType.RSA) .notBefore(OffsetDateTime.now().plusDays(1)) .expires(OffsetDateTime.now().plusYears(1)); Key optionsKey = keyClient.createKey(keyCreateOptions); System.out.printf("Key is created with name %s and id %s \n", optionsKey.name(), optionsKey.id()); RsaKeyCreateOptions rsaKeyCreateOptions = new RsaKeyCreateOptions("keyName") .keySize(2048) .notBefore(OffsetDateTime.now().plusDays(1)) .expires(OffsetDateTime.now().plusYears(1)); Key rsaKey = keyClient.createRsaKey(rsaKeyCreateOptions); System.out.printf("Key is created with name %s and id %s \n", rsaKey.name(), rsaKey.id()); EcKeyCreateOptions ecKeyCreateOptions = new EcKeyCreateOptions("keyName") .curve(KeyCurveName.P_384) .notBefore(OffsetDateTime.now().plusDays(1)) .expires(OffsetDateTime.now().plusYears(1)); Key ecKey = keyClient.createEcKey(ecKeyCreateOptions); System.out.printf("Key is created with name %s and id %s \n", ecKey.name(), ecKey.id()); } /** * Generates a code sample for using {@link KeyClient */ public void deleteKeySnippets() { KeyClient keyClient = createClient(); Key key = keyClient.getKey("keyName"); DeletedKey deletedKey = keyClient.deleteKey("keyName"); System.out.printf("Deleted Key's Recovery Id %s", deletedKey.recoveryId()); } /** * Generates a code sample for using {@link KeyClient */ public void getDeletedKeySnippets() { KeyClient keyClient = createClient(); DeletedKey deletedKey = keyClient.getDeletedKey("keyName"); System.out.printf("Deleted Key's Recovery Id %s", deletedKey.recoveryId()); } /** * Generates a code sample for using {@link KeyClient */ public void createKeyWithResponses() { KeyClient keyClient = createClient(); KeyCreateOptions keyCreateOptions = new KeyCreateOptions("keyName", KeyType.RSA) .notBefore(OffsetDateTime.now().plusDays(1)) .expires(OffsetDateTime.now().plusYears(1)); Key optionsKey = keyClient.createKeyWithResponse(keyCreateOptions, new Context(key1, value1)).value(); System.out.printf("Key is created with name %s and id %s \n", optionsKey.name(), optionsKey.id()); RsaKeyCreateOptions rsaKeyCreateOptions = new RsaKeyCreateOptions("keyName") .keySize(2048) .notBefore(OffsetDateTime.now().plusDays(1)) .expires(OffsetDateTime.now().plusYears(1)); Key rsaKey = keyClient.createRsaKeyWithResponse(rsaKeyCreateOptions, new Context(key1, value1)).value(); System.out.printf("Key is created with name %s and id %s \n", rsaKey.name(), rsaKey.id()); EcKeyCreateOptions ecKeyCreateOptions = new EcKeyCreateOptions("keyName") .curve(KeyCurveName.P_384) .notBefore(OffsetDateTime.now().plusDays(1)) .expires(OffsetDateTime.now().plusYears(1)); Key ecKey = keyClient.createEcKeyWithResponse(ecKeyCreateOptions, new Context(key1, value1)).value(); System.out.printf("Key is created with name %s and id %s \n", ecKey.name(), ecKey.id()); } /** * Generates a code sample for using {@link KeyClient */ public void getKeyWithResponseSnippets() { KeyClient keyClient = createClient(); String keyVersion = "6A385B124DEF4096AF1361A85B16C204"; Key keyWithVersion = keyClient.getKeyWithResponse("keyName", keyVersion, new Context(key1, value1)).value(); System.out.printf("Key is returned with name %s and id %s \n", keyWithVersion.name(), keyWithVersion.id()); for (KeyBase key : keyClient.listKeys()) { Key keyResponse = keyClient.getKeyWithResponse(key, new Context(key1, value1)).value(); System.out.printf("Received key with name %s and type %s", keyResponse.name(), keyResponse.keyMaterial().kty()); } } /** * Generates a code sample for using {@link KeyClient */ public void getKeySnippets() { KeyClient keyClient = createClient(); String keyVersion = "6A385B124DEF4096AF1361A85B16C204"; Key keyWithVersion = keyClient.getKey("keyName", keyVersion); System.out.printf("Key is returned with name %s and id %s \n", keyWithVersion.name(), keyWithVersion.id()); Key keyWithVersionValue = keyClient.getKey("keyName"); System.out.printf("Key is returned with name %s and id %s \n", keyWithVersionValue.name(), keyWithVersionValue.id()); for (KeyBase key : keyClient.listKeys()) { Key keyResponse = keyClient.getKey(key); System.out.printf("Received key with name %s and type %s", keyResponse.name(), keyResponse.keyMaterial().kty()); } } /** * Generates a code sample for using {@link KeyClient */ public void updateKeyWithResponseSnippets() { KeyClient keyClient = createClient(); Key key = keyClient.getKey("keyName"); key.expires(OffsetDateTime.now().plusDays(60)); KeyBase updatedKeyBase = keyClient.updateKeyWithResponse(key, new Context(key1, value1), KeyOperation.ENCRYPT, KeyOperation.DECRYPT).value(); Key updatedKey = keyClient.getKey(updatedKeyBase.name()); System.out.printf("Key is updated with name %s and id %s \n", updatedKey.name(), updatedKey.id()); } /** * Generates a code sample for using {@link KeyClient */ public void updateKeySnippets() { KeyClient keyClient = createClient(); Key key = keyClient.getKey("keyName"); key.expires(OffsetDateTime.now().plusDays(60)); KeyBase updatedKeyBase = keyClient.updateKey(key, KeyOperation.ENCRYPT, KeyOperation.DECRYPT); Key updatedKey = keyClient.getKey(updatedKeyBase.name()); System.out.printf("Key is updated with name %s and id %s \n", updatedKey.name(), updatedKey.id()); Key updateKey = keyClient.getKey("keyName"); key.expires(OffsetDateTime.now().plusDays(60)); KeyBase updatedKeyBaseValue = keyClient.updateKey(updateKey); Key updatedKeyValue = keyClient.getKey(updatedKeyBaseValue.name()); System.out.printf("Key is updated with name %s and id %s \n", updatedKeyValue.name(), updatedKeyValue.id()); } /** * Generates a code sample for using {@link KeyClient */ public void deleteKeyWithResponseSnippets() { KeyClient keyClient = createClient(); Key key = keyClient.getKey("keyName"); DeletedKey deletedKey = keyClient.deleteKeyWithResponse("keyName", new Context(key1, value1)).value(); System.out.printf("Deleted Key's Recovery Id %s", deletedKey.recoveryId()); } /** * Generates a code sample for using {@link KeyClient */ public void getDeleteKeyWithResponseSnippets() { KeyClient keyClient = createClient(); DeletedKey deletedKey = keyClient.getDeletedKeyWithResponse("keyName", new Context(key1, value1)).value(); System.out.printf("Deleted Key with recovery Id %s \n", deletedKey.recoveryId()); } /** * Generates a code sample for using {@link KeyClient */ public void purgeDeletedKeySnippets() { KeyClient keyClient = createClient(); VoidResponse purgeResponse = keyClient.purgeDeletedKey("deletedKeyName"); System.out.printf("Purge Status Code: %rsaPrivateExponent", purgeResponse.statusCode()); VoidResponse purgedResponse = keyClient.purgeDeletedKey("deletedKeyName", new Context(key2, value2)); System.out.printf("Purge Status Code: %rsaPrivateExponent", purgedResponse.statusCode()); } /** * Generates a code sample for using {@link KeyClient */ public void recoverDeletedKeyWithResponseSnippets() { KeyClient keyClient = createClient(); Key recoveredKey = keyClient.recoverDeletedKeyWithResponse("deletedKeyName", new Context(key2, value2)).value(); System.out.printf("Recovered key with name %s", recoveredKey.name()); } /** * Generates a code sample for using {@link KeyClient */ public void recoverDeletedKeySnippets() { KeyClient keyClient = createClient(); Key recoveredKey = keyClient.recoverDeletedKey("deletedKeyName"); System.out.printf("Recovered key with name %s", recoveredKey.name()); } /** * Generates a code sample for using {@link KeyClient */ public void backupKeySnippets() { KeyClient keyClient = createClient(); byte[] keyBackup = keyClient.backupKey("keyName"); System.out.printf("Key's Backup Byte array's length %s", keyBackup.length); } /** * Generates a code sample for using {@link KeyClient */ public void backupKeyWithResponseSnippets() { KeyClient keyClient = createClient(); byte[] keyBackup = keyClient.backupKeyWithResponse("keyName", new Context(key2, value2)).value(); System.out.printf("Key's Backup Byte array's length %s", keyBackup.length); } /** * Generates a code sample for using {@link KeyClient */ public void restoreKeySnippets() { KeyClient keyClient = createClient(); byte[] keyBackupByteArray = {}; Key keyResponse = keyClient.restoreKey(keyBackupByteArray); System.out.printf("Restored Key with name %s and id %s \n", keyResponse.name(), keyResponse.id()); } /** * Generates a code sample for using {@link KeyClient */ public void restoreKeyWithResponseSnippets() { KeyClient keyClient = createClient(); byte[] keyBackupByteArray = {}; Response<Key> keyResponse = keyClient.restoreKeyWithResponse(keyBackupByteArray, new Context(key1, value1)); System.out.printf("Restored Key with name %s and id %s \n", keyResponse.value().name(), keyResponse.value().id()); } /** * Generates a code sample for using {@link KeyClient */ /** * Generates a code sample for using {@link KeyClient */ public void listDeletedKeysSnippets() { KeyClient keyClient = createClient(); for (DeletedKey deletedKey : keyClient.listDeletedKeys()) { System.out.printf("Deleted key's recovery Id %s", deletedKey.recoveryId()); } for (DeletedKey deletedKey : keyClient.listDeletedKeys(new Context(key2, value2))) { System.out.printf("Deleted key's recovery Id %s", deletedKey.recoveryId()); } keyClient.listDeletedKeys().iterableByPage().forEach(resp -> { System.out.printf("Response headers are %s. Url %s and status code %d %n", resp.headers(), resp.request().url(), resp.statusCode()); resp.items().forEach(value -> { System.out.printf("Deleted key's recovery Id %s %n", value.recoveryId()); }); }); } /** * Generates code sample for using {@link KeyClient */ public void listKeyVersions() { KeyClient keyClient = createClient(); for (KeyBase key : keyClient.listKeyVersions("keyName")) { Key keyWithMaterial = keyClient.getKey(key); System.out.printf("Received key's version with name %s, type %s and version %s", keyWithMaterial.name(), keyWithMaterial.keyMaterial().kty(), keyWithMaterial.version()); } for (KeyBase key : keyClient.listKeyVersions("keyName", new Context(key2, value2))) { Key keyWithMaterial = keyClient.getKey(key); System.out.printf("Received key's version with name %s, type %s and version %s", keyWithMaterial.name(), keyWithMaterial.keyMaterial().kty(), keyWithMaterial.version()); } keyClient.listKeyVersions("keyName").iterableByPage().forEach(resp -> { System.out.printf("Response headers are %s. Url %s and status code %d %n", resp.headers(), resp.request().url(), resp.statusCode()); resp.items().forEach(value -> { System.out.printf("Response value is %d %n", value); }); }); } /** * Implementation not provided for this method * @return {@code null} */ private TokenCredential getKeyVaultCredential() { return null; } }
class KeyClientJavaDocCodeSnippets { private String key1 = "key1"; private String key2 = "key2"; private String value1 = "val1"; private String value2 = "val2"; /** * Generates code sample for creating a {@link KeyClient} * @return An instance of {@link KeyClient} */ public KeyClient createClient() { KeyClient keyClient = new KeyClientBuilder() .endpoint("https: .credential(new DefaultAzureCredentialBuilder().build()) .buildClient(); return keyClient; } /** * Generates a code sample for using {@link KeyClient */ public void createKey() { KeyClient keyClient = createClient(); Key key = keyClient.createKey("keyName", KeyType.EC); System.out.printf("Key is created with name %s and id %s %n", key.name(), key.id()); KeyCreateOptions keyCreateOptions = new KeyCreateOptions("keyName", KeyType.RSA) .notBefore(OffsetDateTime.now().plusDays(1)) .expires(OffsetDateTime.now().plusYears(1)); Key optionsKey = keyClient.createKey(keyCreateOptions); System.out.printf("Key is created with name %s and id %s \n", optionsKey.name(), optionsKey.id()); RsaKeyCreateOptions rsaKeyCreateOptions = new RsaKeyCreateOptions("keyName") .keySize(2048) .notBefore(OffsetDateTime.now().plusDays(1)) .expires(OffsetDateTime.now().plusYears(1)); Key rsaKey = keyClient.createRsaKey(rsaKeyCreateOptions); System.out.printf("Key is created with name %s and id %s \n", rsaKey.name(), rsaKey.id()); EcKeyCreateOptions ecKeyCreateOptions = new EcKeyCreateOptions("keyName") .curve(KeyCurveName.P_384) .notBefore(OffsetDateTime.now().plusDays(1)) .expires(OffsetDateTime.now().plusYears(1)); Key ecKey = keyClient.createEcKey(ecKeyCreateOptions); System.out.printf("Key is created with name %s and id %s \n", ecKey.name(), ecKey.id()); } /** * Generates a code sample for using {@link KeyClient */ public void deleteKeySnippets() { KeyClient keyClient = createClient(); Key key = keyClient.getKey("keyName"); DeletedKey deletedKey = keyClient.deleteKey("keyName"); System.out.printf("Deleted Key's Recovery Id %s", deletedKey.recoveryId()); } /** * Generates a code sample for using {@link KeyClient */ public void getDeletedKeySnippets() { KeyClient keyClient = createClient(); DeletedKey deletedKey = keyClient.getDeletedKey("keyName"); System.out.printf("Deleted Key's Recovery Id %s", deletedKey.recoveryId()); } /** * Generates a code sample for using {@link KeyClient */ public void createKeyWithResponses() { KeyClient keyClient = createClient(); KeyCreateOptions keyCreateOptions = new KeyCreateOptions("keyName", KeyType.RSA) .notBefore(OffsetDateTime.now().plusDays(1)) .expires(OffsetDateTime.now().plusYears(1)); Key optionsKey = keyClient.createKeyWithResponse(keyCreateOptions, new Context(key1, value1)).value(); System.out.printf("Key is created with name %s and id %s \n", optionsKey.name(), optionsKey.id()); RsaKeyCreateOptions rsaKeyCreateOptions = new RsaKeyCreateOptions("keyName") .keySize(2048) .notBefore(OffsetDateTime.now().plusDays(1)) .expires(OffsetDateTime.now().plusYears(1)); Key rsaKey = keyClient.createRsaKeyWithResponse(rsaKeyCreateOptions, new Context(key1, value1)).value(); System.out.printf("Key is created with name %s and id %s \n", rsaKey.name(), rsaKey.id()); EcKeyCreateOptions ecKeyCreateOptions = new EcKeyCreateOptions("keyName") .curve(KeyCurveName.P_384) .notBefore(OffsetDateTime.now().plusDays(1)) .expires(OffsetDateTime.now().plusYears(1)); Key ecKey = keyClient.createEcKeyWithResponse(ecKeyCreateOptions, new Context(key1, value1)).value(); System.out.printf("Key is created with name %s and id %s \n", ecKey.name(), ecKey.id()); } /** * Generates a code sample for using {@link KeyClient */ public void getKeyWithResponseSnippets() { KeyClient keyClient = createClient(); String keyVersion = "6A385B124DEF4096AF1361A85B16C204"; Key keyWithVersion = keyClient.getKeyWithResponse("keyName", keyVersion, new Context(key1, value1)).value(); System.out.printf("Key is returned with name %s and id %s \n", keyWithVersion.name(), keyWithVersion.id()); for (KeyBase key : keyClient.listKeys()) { Key keyResponse = keyClient.getKeyWithResponse(key, new Context(key1, value1)).value(); System.out.printf("Received key with name %s and type %s", keyResponse.name(), keyResponse.keyMaterial().kty()); } } /** * Generates a code sample for using {@link KeyClient */ public void getKeySnippets() { KeyClient keyClient = createClient(); String keyVersion = "6A385B124DEF4096AF1361A85B16C204"; Key keyWithVersion = keyClient.getKey("keyName", keyVersion); System.out.printf("Key is returned with name %s and id %s \n", keyWithVersion.name(), keyWithVersion.id()); Key keyWithVersionValue = keyClient.getKey("keyName"); System.out.printf("Key is returned with name %s and id %s \n", keyWithVersionValue.name(), keyWithVersionValue.id()); for (KeyBase key : keyClient.listKeys()) { Key keyResponse = keyClient.getKey(key); System.out.printf("Received key with name %s and type %s", keyResponse.name(), keyResponse.keyMaterial().kty()); } } /** * Generates a code sample for using {@link KeyClient */ public void updateKeyWithResponseSnippets() { KeyClient keyClient = createClient(); Key key = keyClient.getKey("keyName"); key.expires(OffsetDateTime.now().plusDays(60)); KeyBase updatedKeyBase = keyClient.updateKeyWithResponse(key, new Context(key1, value1), KeyOperation.ENCRYPT, KeyOperation.DECRYPT).value(); Key updatedKey = keyClient.getKey(updatedKeyBase.name()); System.out.printf("Key is updated with name %s and id %s \n", updatedKey.name(), updatedKey.id()); } /** * Generates a code sample for using {@link KeyClient */ public void updateKeySnippets() { KeyClient keyClient = createClient(); Key key = keyClient.getKey("keyName"); key.expires(OffsetDateTime.now().plusDays(60)); KeyBase updatedKeyBase = keyClient.updateKey(key, KeyOperation.ENCRYPT, KeyOperation.DECRYPT); Key updatedKey = keyClient.getKey(updatedKeyBase.name()); System.out.printf("Key is updated with name %s and id %s \n", updatedKey.name(), updatedKey.id()); Key updateKey = keyClient.getKey("keyName"); key.expires(OffsetDateTime.now().plusDays(60)); KeyBase updatedKeyBaseValue = keyClient.updateKey(updateKey); Key updatedKeyValue = keyClient.getKey(updatedKeyBaseValue.name()); System.out.printf("Key is updated with name %s and id %s \n", updatedKeyValue.name(), updatedKeyValue.id()); } /** * Generates a code sample for using {@link KeyClient */ public void deleteKeyWithResponseSnippets() { KeyClient keyClient = createClient(); Key key = keyClient.getKey("keyName"); DeletedKey deletedKey = keyClient.deleteKeyWithResponse("keyName", new Context(key1, value1)).value(); System.out.printf("Deleted Key's Recovery Id %s", deletedKey.recoveryId()); } /** * Generates a code sample for using {@link KeyClient */ public void getDeleteKeyWithResponseSnippets() { KeyClient keyClient = createClient(); DeletedKey deletedKey = keyClient.getDeletedKeyWithResponse("keyName", new Context(key1, value1)).value(); System.out.printf("Deleted Key with recovery Id %s \n", deletedKey.recoveryId()); } /** * Generates a code sample for using {@link KeyClient */ public void purgeDeletedKeySnippets() { KeyClient keyClient = createClient(); VoidResponse purgeResponse = keyClient.purgeDeletedKey("deletedKeyName"); System.out.printf("Purge Status Code: %rsaPrivateExponent", purgeResponse.statusCode()); VoidResponse purgedResponse = keyClient.purgeDeletedKey("deletedKeyName", new Context(key2, value2)); System.out.printf("Purge Status Code: %rsaPrivateExponent", purgedResponse.statusCode()); } /** * Generates a code sample for using {@link KeyClient */ public void recoverDeletedKeyWithResponseSnippets() { KeyClient keyClient = createClient(); Key recoveredKey = keyClient.recoverDeletedKeyWithResponse("deletedKeyName", new Context(key2, value2)).value(); System.out.printf("Recovered key with name %s", recoveredKey.name()); } /** * Generates a code sample for using {@link KeyClient */ public void recoverDeletedKeySnippets() { KeyClient keyClient = createClient(); Key recoveredKey = keyClient.recoverDeletedKey("deletedKeyName"); System.out.printf("Recovered key with name %s", recoveredKey.name()); } /** * Generates a code sample for using {@link KeyClient */ public void backupKeySnippets() { KeyClient keyClient = createClient(); byte[] keyBackup = keyClient.backupKey("keyName"); System.out.printf("Key's Backup Byte array's length %s", keyBackup.length); } /** * Generates a code sample for using {@link KeyClient */ public void backupKeyWithResponseSnippets() { KeyClient keyClient = createClient(); byte[] keyBackup = keyClient.backupKeyWithResponse("keyName", new Context(key2, value2)).value(); System.out.printf("Key's Backup Byte array's length %s", keyBackup.length); } /** * Generates a code sample for using {@link KeyClient */ public void restoreKeySnippets() { KeyClient keyClient = createClient(); byte[] keyBackupByteArray = {}; Key keyResponse = keyClient.restoreKey(keyBackupByteArray); System.out.printf("Restored Key with name %s and id %s \n", keyResponse.name(), keyResponse.id()); } /** * Generates a code sample for using {@link KeyClient */ public void restoreKeyWithResponseSnippets() { KeyClient keyClient = createClient(); byte[] keyBackupByteArray = {}; Response<Key> keyResponse = keyClient.restoreKeyWithResponse(keyBackupByteArray, new Context(key1, value1)); System.out.printf("Restored Key with name %s and id %s \n", keyResponse.value().name(), keyResponse.value().id()); } /** * Generates a code sample for using {@link KeyClient */ /** * Generates a code sample for using {@link KeyClient */ public void listDeletedKeysSnippets() { KeyClient keyClient = createClient(); for (DeletedKey deletedKey : keyClient.listDeletedKeys()) { System.out.printf("Deleted key's recovery Id %s", deletedKey.recoveryId()); } for (DeletedKey deletedKey : keyClient.listDeletedKeys(new Context(key2, value2))) { System.out.printf("Deleted key's recovery Id %s", deletedKey.recoveryId()); } keyClient.listDeletedKeys().iterableByPage().forEach(resp -> { System.out.printf("Got response headers . Url: %s, Status code: %d %n", resp.request().url(), resp.statusCode()); resp.items().forEach(value -> { System.out.printf("Deleted key's recovery Id %s %n", value.recoveryId()); }); }); } /** * Generates code sample for using {@link KeyClient */ public void listKeyVersions() { KeyClient keyClient = createClient(); for (KeyBase key : keyClient.listKeyVersions("keyName")) { Key keyWithMaterial = keyClient.getKey(key); System.out.printf("Received key's version with name %s, type %s and version %s", keyWithMaterial.name(), keyWithMaterial.keyMaterial().kty(), keyWithMaterial.version()); } for (KeyBase key : keyClient.listKeyVersions("keyName", new Context(key2, value2))) { Key keyWithMaterial = keyClient.getKey(key); System.out.printf("Received key's version with name %s, type %s and version %s", keyWithMaterial.name(), keyWithMaterial.keyMaterial().kty(), keyWithMaterial.version()); } keyClient.listKeyVersions("keyName").iterableByPage().forEach(resp -> { System.out.printf("Got response headers . Url: %s, Status code: %d %n", resp.request().url(), resp.statusCode()); resp.items().forEach(value -> { System.out.printf("Key name: %s, Key version: %s %n", value.name(), value.version()); }); }); } /** * Implementation not provided for this method * @return {@code null} */ private TokenCredential getKeyVaultCredential() { return null; } }
looks like we are not looking for the `@throws` statement any more. Is this comment out of date?
private void checkNamingPattern(DetailAST blockCommentToken) { if (!BlockCommentPosition.isOnMethod(blockCommentToken)) { return; } DetailNode javadocNode = null; try { javadocNode = DetailNodeTreeStringPrinter.parseJavadocAsDetailNode(blockCommentToken); } catch (IllegalArgumentException ex) { } if (javadocNode == null) { return; } for (DetailNode node : javadocNode.getChildren()) { if (node.getType() != JavadocTokenTypes.JAVADOC_INLINE_TAG) { continue; } DetailNode customNameNode = JavadocUtil.findFirstToken(node, JavadocTokenTypes.CUSTOM_NAME); if (customNameNode == null || !CODE_SNIPPET_ANNOTATION.equals(customNameNode.getText())) { return; } DetailNode descriptionNode = JavadocUtil.findFirstToken(node, JavadocTokenTypes.DESCRIPTION); if (descriptionNode == null) { log(node.getLineNumber(), MISSING_CODESNIPPET_TAG_MESSAGE); return; } String customDescription = JavadocUtil.findFirstToken(descriptionNode, JavadocTokenTypes.TEXT).getText(); final DetailAST methodDefToken = methodDefStack.peek(); final String methodName = methodDefToken.findFirstToken(TokenTypes.IDENT).getText(); final String className = classNameStack.isEmpty() ? "" : classNameStack.peek(); final String parameters = constructParametersString(methodDefToken); String fullPath = packageName + "." + className + "." + methodName; if (parameters != null) { fullPath = fullPath + " } if (customDescription == null || customDescription.isEmpty() || !isNamingMatched(customDescription.toLowerCase(Locale.ROOT), fullPath.toLowerCase(Locale.ROOT), parameters)) { log(node.getLineNumber(), String.format("Naming pattern mismatch. The @codeSnippet description " + "''%s'' does not match ''%s''. Case Insensitive.", customDescription, fullPath)); } } }
private void checkNamingPattern(DetailAST blockCommentToken) { if (!BlockCommentPosition.isOnMethod(blockCommentToken)) { return; } DetailNode javadocNode = null; try { javadocNode = DetailNodeTreeStringPrinter.parseJavadocAsDetailNode(blockCommentToken); } catch (IllegalArgumentException ex) { } if (javadocNode == null) { return; } for (DetailNode node : javadocNode.getChildren()) { if (node.getType() != JavadocTokenTypes.JAVADOC_INLINE_TAG) { continue; } DetailNode customNameNode = JavadocUtil.findFirstToken(node, JavadocTokenTypes.CUSTOM_NAME); if (customNameNode == null || !CODE_SNIPPET_ANNOTATION.equals(customNameNode.getText())) { return; } DetailNode descriptionNode = JavadocUtil.findFirstToken(node, JavadocTokenTypes.DESCRIPTION); if (descriptionNode == null) { log(node.getLineNumber(), MISSING_CODESNIPPET_TAG_MESSAGE); return; } String customDescription = JavadocUtil.findFirstToken(descriptionNode, JavadocTokenTypes.TEXT).getText(); final String methodName = methodDefToken.findFirstToken(TokenTypes.IDENT).getText(); final String className = classNameStack.isEmpty() ? "" : classNameStack.peek(); final String parameters = constructParametersString(methodDefToken); String fullPath = packageName + "." + className + "." + methodName; final String fullPathWithoutParameters = fullPath; if (parameters != null) { fullPath = fullPath + " } if (customDescription == null || customDescription.isEmpty() || !isNamingMatched(customDescription.toLowerCase(Locale.ROOT), fullPathWithoutParameters.toLowerCase(Locale.ROOT), parameters)) { log(node.getLineNumber(), String.format("Naming pattern mismatch. The @codesnippet description " + "''%s'' does not match ''%s''. Case Insensitive.", customDescription, fullPath)); } } }
class name when leave the same token private Deque<String> classNameStack = new ArrayDeque<>(); private Deque<DetailAST> methodDefStack = new ArrayDeque<>(); @Override public int[] getDefaultTokens() { return getRequiredTokens(); }
class name when leave the same token private Deque<String> classNameStack = new ArrayDeque<>(); private DetailAST methodDefToken = null; @Override public int[] getDefaultTokens() { return getRequiredTokens(); }
`ImplUtils.isNullOrEmpty` if possible
public CosmosClient build() { ifThrowIllegalArgException(this.serviceEndpoint == null, "cannot build client without service endpoint"); ifThrowIllegalArgException( this.keyOrResourceToken == null && (permissions == null || permissions.isEmpty()) && this.tokenResolver == null && this.cosmosKeyCredential == null, "cannot build client without any one of key, resource token, permissions, token resolver, and cosmos key credential"); ifThrowIllegalArgException(cosmosKeyCredential != null && StringUtils.isEmpty(cosmosKeyCredential.key()), "cannot build client without key credential"); return new CosmosClient(this); }
ifThrowIllegalArgException(cosmosKeyCredential != null && StringUtils.isEmpty(cosmosKeyCredential.key()),
public CosmosClient build() { ifThrowIllegalArgException(this.serviceEndpoint == null, "cannot build client without service endpoint"); ifThrowIllegalArgException( this.keyOrResourceToken == null && (permissions == null || permissions.isEmpty()) && this.tokenResolver == null && this.cosmosKeyCredential == null, "cannot build client without any one of key, resource token, permissions, token resolver, and cosmos key credential"); ifThrowIllegalArgException(cosmosKeyCredential != null && StringUtils.isEmpty(cosmosKeyCredential.key()), "cannot build client without key credential"); return new CosmosClient(this); }
class CosmosClientBuilder { private Configs configs = new Configs(); private String serviceEndpoint; private String keyOrResourceToken; private ConnectionPolicy connectionPolicy; private ConsistencyLevel desiredConsistencyLevel; private List<Permission> permissions; private TokenResolver tokenResolver; private CosmosKeyCredential cosmosKeyCredential; CosmosClientBuilder() { } /** * Gets the token resolver * @return the token resolver */ public TokenResolver tokenResolver() { return tokenResolver; } /** * Sets the token resolver * @param tokenResolver * @return current builder */ public CosmosClientBuilder tokenResolver(TokenResolver tokenResolver) { this.tokenResolver = tokenResolver; return this; } /** * Gets the Azure Cosmos DB endpoint the SDK will connect to * @return the endpoint */ public String endpoint() { return serviceEndpoint; } /** * Sets the Azure Cosmos DB endpoint the SDK will connect to * @param endpoint the service endpoint * @return current Builder */ public CosmosClientBuilder endpoint(String endpoint) { this.serviceEndpoint = endpoint; return this; } /** * Gets either a master or readonly key used to perform authentication * for accessing resource. * @return the key */ public String key() { return keyOrResourceToken; } /** * Sets either a master or readonly key used to perform authentication * for accessing resource. * * @param key master or readonly key * @return current Builder. */ public CosmosClientBuilder key(String key) { this.keyOrResourceToken = key; return this; } /** * Sets a resource token used to perform authentication * for accessing resource. * @return the resourceToken */ public String resourceToken() { return keyOrResourceToken; } /** * Sets a resource token used to perform authentication * for accessing resource. * * @param resourceToken resourceToken for authentication * @return current Builder. */ public CosmosClientBuilder resourceToken(String resourceToken) { this.keyOrResourceToken = resourceToken; return this; } /** * Gets the permission list, which contains the * resource tokens needed to access resources. * @return the permission list */ public List<Permission> permissions() { return permissions; } /** * Sets the permission list, which contains the * resource tokens needed to access resources. * * @param permissions Permission list for authentication. * @return current Builder. */ public CosmosClientBuilder permissions(List<Permission> permissions) { this.permissions = permissions; return this; } /** * Gets the {@link ConsistencyLevel} to be used * @return the consistency level */ public ConsistencyLevel consistencyLevel() { return this.desiredConsistencyLevel; } /** * Sets the {@link ConsistencyLevel} to be used * @param desiredConsistencyLevel {@link ConsistencyLevel} * @return current Builder */ public CosmosClientBuilder consistencyLevel(ConsistencyLevel desiredConsistencyLevel) { this.desiredConsistencyLevel = desiredConsistencyLevel; return this; } /** * Gets the (@link ConnectionPolicy) to be used * @return the connection policy */ public ConnectionPolicy connectionPolicy() { return connectionPolicy; } /** * Sets the {@link ConnectionPolicy} to be used * @param connectionPolicy {@link ConnectionPolicy} * @return current Builder */ public CosmosClientBuilder connectionPolicy(ConnectionPolicy connectionPolicy) { this.connectionPolicy = connectionPolicy; return this; } /** * Gets the {@link CosmosKeyCredential} to be used * @return cosmosKeyCredential */ public CosmosKeyCredential cosmosKeyCredential() { return cosmosKeyCredential; } /** * Sets the {@link CosmosKeyCredential} to be used * @param cosmosKeyCredential {@link CosmosKeyCredential} * @return current builder */ public CosmosClientBuilder cosmosKeyCredential(CosmosKeyCredential cosmosKeyCredential) { this.cosmosKeyCredential = cosmosKeyCredential; return this; } /** * Builds a cosmos configuration object with the provided properties * @return CosmosClient */ Configs configs() { return configs; } /** * Configs * @param configs * @return current builder */ CosmosClientBuilder configs(Configs configs) { this.configs = configs; return this; } private void ifThrowIllegalArgException(boolean value, String error) { if (value) { throw new IllegalArgumentException(error); } } }
class CosmosClientBuilder { private Configs configs = new Configs(); private String serviceEndpoint; private String keyOrResourceToken; private ConnectionPolicy connectionPolicy; private ConsistencyLevel desiredConsistencyLevel; private List<Permission> permissions; private TokenResolver tokenResolver; private CosmosKeyCredential cosmosKeyCredential; CosmosClientBuilder() { } /** * Gets the token resolver * @return the token resolver */ public TokenResolver tokenResolver() { return tokenResolver; } /** * Sets the token resolver * @param tokenResolver * @return current builder */ public CosmosClientBuilder tokenResolver(TokenResolver tokenResolver) { this.tokenResolver = tokenResolver; return this; } /** * Gets the Azure Cosmos DB endpoint the SDK will connect to * @return the endpoint */ public String endpoint() { return serviceEndpoint; } /** * Sets the Azure Cosmos DB endpoint the SDK will connect to * @param endpoint the service endpoint * @return current Builder */ public CosmosClientBuilder endpoint(String endpoint) { this.serviceEndpoint = endpoint; return this; } /** * Gets either a master or readonly key used to perform authentication * for accessing resource. * @return the key */ public String key() { return keyOrResourceToken; } /** * Sets either a master or readonly key used to perform authentication * for accessing resource. * * @param key master or readonly key * @return current Builder. */ public CosmosClientBuilder key(String key) { this.keyOrResourceToken = key; return this; } /** * Sets a resource token used to perform authentication * for accessing resource. * @return the resourceToken */ public String resourceToken() { return keyOrResourceToken; } /** * Sets a resource token used to perform authentication * for accessing resource. * * @param resourceToken resourceToken for authentication * @return current Builder. */ public CosmosClientBuilder resourceToken(String resourceToken) { this.keyOrResourceToken = resourceToken; return this; } /** * Gets the permission list, which contains the * resource tokens needed to access resources. * @return the permission list */ public List<Permission> permissions() { return permissions; } /** * Sets the permission list, which contains the * resource tokens needed to access resources. * * @param permissions Permission list for authentication. * @return current Builder. */ public CosmosClientBuilder permissions(List<Permission> permissions) { this.permissions = permissions; return this; } /** * Gets the {@link ConsistencyLevel} to be used * @return the consistency level */ public ConsistencyLevel consistencyLevel() { return this.desiredConsistencyLevel; } /** * Sets the {@link ConsistencyLevel} to be used * @param desiredConsistencyLevel {@link ConsistencyLevel} * @return current Builder */ public CosmosClientBuilder consistencyLevel(ConsistencyLevel desiredConsistencyLevel) { this.desiredConsistencyLevel = desiredConsistencyLevel; return this; } /** * Gets the (@link ConnectionPolicy) to be used * @return the connection policy */ public ConnectionPolicy connectionPolicy() { return connectionPolicy; } /** * Sets the {@link ConnectionPolicy} to be used * @param connectionPolicy {@link ConnectionPolicy} * @return current Builder */ public CosmosClientBuilder connectionPolicy(ConnectionPolicy connectionPolicy) { this.connectionPolicy = connectionPolicy; return this; } /** * Gets the {@link CosmosKeyCredential} to be used * @return cosmosKeyCredential */ public CosmosKeyCredential cosmosKeyCredential() { return cosmosKeyCredential; } /** * Sets the {@link CosmosKeyCredential} to be used * @param cosmosKeyCredential {@link CosmosKeyCredential} * @return current builder */ public CosmosClientBuilder cosmosKeyCredential(CosmosKeyCredential cosmosKeyCredential) { this.cosmosKeyCredential = cosmosKeyCredential; return this; } /** * Builds a cosmos configuration object with the provided properties * @return CosmosClient */ Configs configs() { return configs; } /** * Configs * @param configs * @return current builder */ CosmosClientBuilder configs(Configs configs) { this.configs = configs; return this; } private void ifThrowIllegalArgException(boolean value, String error) { if (value) { throw new IllegalArgumentException(error); } } }
logErrorAndThrow() will throw RuntimeError internally. A ClientLogger can't be static and the naming of ClientLogger variable has to be 'logger'. It applies to other changes as well.
public static X509Certificate publicKeyFromPem(byte[] pem) { Pattern pattern = Pattern.compile("(?s)-----BEGIN CERTIFICATE-----.*-----END CERTIFICATE-----"); Matcher matcher = pattern.matcher(new String(pem, StandardCharsets.UTF_8)); if (!matcher.find()) { throw LOGGER.logErrorAndThrow(new IllegalArgumentException("PEM certificate provided does not contain -----BEGIN CERTIFICATE-----END CERTIFICATE----- block")); } try { CertificateFactory factory = CertificateFactory.getInstance("X.509"); InputStream stream = new ByteArrayInputStream(matcher.group().getBytes(StandardCharsets.UTF_8)); return (X509Certificate) factory.generateCertificate(stream); } catch (CertificateException e) { throw LOGGER.logErrorAndThrow(new IllegalStateException(e)); } }
throw LOGGER.logErrorAndThrow(new IllegalArgumentException("PEM certificate provided does not contain -----BEGIN CERTIFICATE-----END CERTIFICATE----- block"));
public static X509Certificate publicKeyFromPem(byte[] pem) { Pattern pattern = Pattern.compile("(?s)-----BEGIN CERTIFICATE-----.*-----END CERTIFICATE-----"); Matcher matcher = pattern.matcher(new String(pem, StandardCharsets.UTF_8)); if (!matcher.find()) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("PEM certificate provided does not contain -----BEGIN CERTIFICATE-----END CERTIFICATE----- block")); } try { CertificateFactory factory = CertificateFactory.getInstance("X.509"); InputStream stream = new ByteArrayInputStream(matcher.group().getBytes(StandardCharsets.UTF_8)); return (X509Certificate) factory.generateCertificate(stream); } catch (CertificateException e) { throw LOGGER.logExceptionAsError(new IllegalStateException(e)); } }
class CertificateUtil { private static final ClientLogger LOGGER = new ClientLogger(CertificateUtil.class); /** * Extracts the PrivateKey from a PEM certificate. * @param pem the contents of a PEM certificate. * @return the PrivateKey */ public static PrivateKey privateKeyFromPem(byte[] pem) { Pattern pattern = Pattern.compile("(?s)-----BEGIN PRIVATE KEY-----.*-----END PRIVATE KEY-----"); Matcher matcher = pattern.matcher(new String(pem, StandardCharsets.UTF_8)); if (!matcher.find()) { throw LOGGER.logErrorAndThrow(new IllegalArgumentException("Certificate file provided is not a valid PEM file.")); } String base64 = matcher.group() .replace("-----BEGIN PRIVATE KEY-----", "") .replace("-----END PRIVATE KEY-----", "") .replace("\n", "") .replace("\r", ""); byte[] key = Base64Util.decode(base64.getBytes(StandardCharsets.UTF_8)); PKCS8EncodedKeySpec spec = new PKCS8EncodedKeySpec(key); try { KeyFactory kf = KeyFactory.getInstance("RSA"); return kf.generatePrivate(spec); } catch (NoSuchAlgorithmException | InvalidKeySpecException e) { throw LOGGER.logErrorAndThrow(new IllegalStateException(e)); } } /** * Extracts the X509Certificate certificate from a PEM certificate. * @param pem the contents of a PEM certificate. * @return the X509Certificate certificate */ private CertificateUtil() { } }
class CertificateUtil { private static final ClientLogger LOGGER = new ClientLogger(CertificateUtil.class); /** * Extracts the PrivateKey from a PEM certificate. * @param pem the contents of a PEM certificate. * @return the PrivateKey */ public static PrivateKey privateKeyFromPem(byte[] pem) { Pattern pattern = Pattern.compile("(?s)-----BEGIN PRIVATE KEY-----.*-----END PRIVATE KEY-----"); Matcher matcher = pattern.matcher(new String(pem, StandardCharsets.UTF_8)); if (!matcher.find()) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("Certificate file provided is not a valid PEM file.")); } String base64 = matcher.group() .replace("-----BEGIN PRIVATE KEY-----", "") .replace("-----END PRIVATE KEY-----", "") .replace("\n", "") .replace("\r", ""); byte[] key = Base64Util.decode(base64.getBytes(StandardCharsets.UTF_8)); PKCS8EncodedKeySpec spec = new PKCS8EncodedKeySpec(key); try { KeyFactory kf = KeyFactory.getInstance("RSA"); return kf.generatePrivate(spec); } catch (NoSuchAlgorithmException | InvalidKeySpecException e) { throw LOGGER.logExceptionAsError(new IllegalStateException(e)); } } /** * Extracts the X509Certificate certificate from a PEM certificate. * @param pem the contents of a PEM certificate. * @return the X509Certificate certificate */ private CertificateUtil() { } }
Rather than saying "Response value" can say "Key name %s, Key Version %s" here.
public void listKeyVersions() { KeyClient keyClient = createClient(); for (KeyBase key : keyClient.listKeyVersions("keyName")) { Key keyWithMaterial = keyClient.getKey(key); System.out.printf("Received key's version with name %s, type %s and version %s", keyWithMaterial.name(), keyWithMaterial.keyMaterial().kty(), keyWithMaterial.version()); } for (KeyBase key : keyClient.listKeyVersions("keyName", new Context(key2, value2))) { Key keyWithMaterial = keyClient.getKey(key); System.out.printf("Received key's version with name %s, type %s and version %s", keyWithMaterial.name(), keyWithMaterial.keyMaterial().kty(), keyWithMaterial.version()); } keyClient.listKeyVersions("keyName").iterableByPage().forEach(resp -> { System.out.printf("Got response headers . Url: %s, Status code: %d %n", resp.request().url(), resp.statusCode()); resp.items().forEach(value -> { System.out.printf("Response value is %d %n", value); }); }); }
System.out.printf("Response value is %d %n", value);
public void listKeyVersions() { KeyClient keyClient = createClient(); for (KeyBase key : keyClient.listKeyVersions("keyName")) { Key keyWithMaterial = keyClient.getKey(key); System.out.printf("Received key's version with name %s, type %s and version %s", keyWithMaterial.name(), keyWithMaterial.keyMaterial().kty(), keyWithMaterial.version()); } for (KeyBase key : keyClient.listKeyVersions("keyName", new Context(key2, value2))) { Key keyWithMaterial = keyClient.getKey(key); System.out.printf("Received key's version with name %s, type %s and version %s", keyWithMaterial.name(), keyWithMaterial.keyMaterial().kty(), keyWithMaterial.version()); } keyClient.listKeyVersions("keyName").iterableByPage().forEach(resp -> { System.out.printf("Got response headers . Url: %s, Status code: %d %n", resp.request().url(), resp.statusCode()); resp.items().forEach(value -> { System.out.printf("Key name: %s, Key version: %s %n", value.name(), value.version()); }); }); }
class KeyClientJavaDocCodeSnippets { private String key1 = "key1"; private String key2 = "key2"; private String value1 = "val1"; private String value2 = "val2"; /** * Generates code sample for creating a {@link KeyClient} * @return An instance of {@link KeyClient} */ public KeyClient createClient() { KeyClient keyClient = new KeyClientBuilder() .endpoint("https: .credential(new DefaultAzureCredentialBuilder().build()) .buildClient(); return keyClient; } /** * Generates a code sample for using {@link KeyClient */ public void createKey() { KeyClient keyClient = createClient(); Key key = keyClient.createKey("keyName", KeyType.EC); System.out.printf("Key is created with name %s and id %s %n", key.name(), key.id()); KeyCreateOptions keyCreateOptions = new KeyCreateOptions("keyName", KeyType.RSA) .notBefore(OffsetDateTime.now().plusDays(1)) .expires(OffsetDateTime.now().plusYears(1)); Key optionsKey = keyClient.createKey(keyCreateOptions); System.out.printf("Key is created with name %s and id %s \n", optionsKey.name(), optionsKey.id()); RsaKeyCreateOptions rsaKeyCreateOptions = new RsaKeyCreateOptions("keyName") .keySize(2048) .notBefore(OffsetDateTime.now().plusDays(1)) .expires(OffsetDateTime.now().plusYears(1)); Key rsaKey = keyClient.createRsaKey(rsaKeyCreateOptions); System.out.printf("Key is created with name %s and id %s \n", rsaKey.name(), rsaKey.id()); EcKeyCreateOptions ecKeyCreateOptions = new EcKeyCreateOptions("keyName") .curve(KeyCurveName.P_384) .notBefore(OffsetDateTime.now().plusDays(1)) .expires(OffsetDateTime.now().plusYears(1)); Key ecKey = keyClient.createEcKey(ecKeyCreateOptions); System.out.printf("Key is created with name %s and id %s \n", ecKey.name(), ecKey.id()); } /** * Generates a code sample for using {@link KeyClient */ public void deleteKeySnippets() { KeyClient keyClient = createClient(); Key key = keyClient.getKey("keyName"); DeletedKey deletedKey = keyClient.deleteKey("keyName"); System.out.printf("Deleted Key's Recovery Id %s", deletedKey.recoveryId()); } /** * Generates a code sample for using {@link KeyClient */ public void getDeletedKeySnippets() { KeyClient keyClient = createClient(); DeletedKey deletedKey = keyClient.getDeletedKey("keyName"); System.out.printf("Deleted Key's Recovery Id %s", deletedKey.recoveryId()); } /** * Generates a code sample for using {@link KeyClient */ public void createKeyWithResponses() { KeyClient keyClient = createClient(); KeyCreateOptions keyCreateOptions = new KeyCreateOptions("keyName", KeyType.RSA) .notBefore(OffsetDateTime.now().plusDays(1)) .expires(OffsetDateTime.now().plusYears(1)); Key optionsKey = keyClient.createKeyWithResponse(keyCreateOptions, new Context(key1, value1)).value(); System.out.printf("Key is created with name %s and id %s \n", optionsKey.name(), optionsKey.id()); RsaKeyCreateOptions rsaKeyCreateOptions = new RsaKeyCreateOptions("keyName") .keySize(2048) .notBefore(OffsetDateTime.now().plusDays(1)) .expires(OffsetDateTime.now().plusYears(1)); Key rsaKey = keyClient.createRsaKeyWithResponse(rsaKeyCreateOptions, new Context(key1, value1)).value(); System.out.printf("Key is created with name %s and id %s \n", rsaKey.name(), rsaKey.id()); EcKeyCreateOptions ecKeyCreateOptions = new EcKeyCreateOptions("keyName") .curve(KeyCurveName.P_384) .notBefore(OffsetDateTime.now().plusDays(1)) .expires(OffsetDateTime.now().plusYears(1)); Key ecKey = keyClient.createEcKeyWithResponse(ecKeyCreateOptions, new Context(key1, value1)).value(); System.out.printf("Key is created with name %s and id %s \n", ecKey.name(), ecKey.id()); } /** * Generates a code sample for using {@link KeyClient */ public void getKeyWithResponseSnippets() { KeyClient keyClient = createClient(); String keyVersion = "6A385B124DEF4096AF1361A85B16C204"; Key keyWithVersion = keyClient.getKeyWithResponse("keyName", keyVersion, new Context(key1, value1)).value(); System.out.printf("Key is returned with name %s and id %s \n", keyWithVersion.name(), keyWithVersion.id()); for (KeyBase key : keyClient.listKeys()) { Key keyResponse = keyClient.getKeyWithResponse(key, new Context(key1, value1)).value(); System.out.printf("Received key with name %s and type %s", keyResponse.name(), keyResponse.keyMaterial().kty()); } } /** * Generates a code sample for using {@link KeyClient */ public void getKeySnippets() { KeyClient keyClient = createClient(); String keyVersion = "6A385B124DEF4096AF1361A85B16C204"; Key keyWithVersion = keyClient.getKey("keyName", keyVersion); System.out.printf("Key is returned with name %s and id %s \n", keyWithVersion.name(), keyWithVersion.id()); Key keyWithVersionValue = keyClient.getKey("keyName"); System.out.printf("Key is returned with name %s and id %s \n", keyWithVersionValue.name(), keyWithVersionValue.id()); for (KeyBase key : keyClient.listKeys()) { Key keyResponse = keyClient.getKey(key); System.out.printf("Received key with name %s and type %s", keyResponse.name(), keyResponse.keyMaterial().kty()); } } /** * Generates a code sample for using {@link KeyClient */ public void updateKeyWithResponseSnippets() { KeyClient keyClient = createClient(); Key key = keyClient.getKey("keyName"); key.expires(OffsetDateTime.now().plusDays(60)); KeyBase updatedKeyBase = keyClient.updateKeyWithResponse(key, new Context(key1, value1), KeyOperation.ENCRYPT, KeyOperation.DECRYPT).value(); Key updatedKey = keyClient.getKey(updatedKeyBase.name()); System.out.printf("Key is updated with name %s and id %s \n", updatedKey.name(), updatedKey.id()); } /** * Generates a code sample for using {@link KeyClient */ public void updateKeySnippets() { KeyClient keyClient = createClient(); Key key = keyClient.getKey("keyName"); key.expires(OffsetDateTime.now().plusDays(60)); KeyBase updatedKeyBase = keyClient.updateKey(key, KeyOperation.ENCRYPT, KeyOperation.DECRYPT); Key updatedKey = keyClient.getKey(updatedKeyBase.name()); System.out.printf("Key is updated with name %s and id %s \n", updatedKey.name(), updatedKey.id()); Key updateKey = keyClient.getKey("keyName"); key.expires(OffsetDateTime.now().plusDays(60)); KeyBase updatedKeyBaseValue = keyClient.updateKey(updateKey); Key updatedKeyValue = keyClient.getKey(updatedKeyBaseValue.name()); System.out.printf("Key is updated with name %s and id %s \n", updatedKeyValue.name(), updatedKeyValue.id()); } /** * Generates a code sample for using {@link KeyClient */ public void deleteKeyWithResponseSnippets() { KeyClient keyClient = createClient(); Key key = keyClient.getKey("keyName"); DeletedKey deletedKey = keyClient.deleteKeyWithResponse("keyName", new Context(key1, value1)).value(); System.out.printf("Deleted Key's Recovery Id %s", deletedKey.recoveryId()); } /** * Generates a code sample for using {@link KeyClient */ public void getDeleteKeyWithResponseSnippets() { KeyClient keyClient = createClient(); DeletedKey deletedKey = keyClient.getDeletedKeyWithResponse("keyName", new Context(key1, value1)).value(); System.out.printf("Deleted Key with recovery Id %s \n", deletedKey.recoveryId()); } /** * Generates a code sample for using {@link KeyClient */ public void purgeDeletedKeySnippets() { KeyClient keyClient = createClient(); VoidResponse purgeResponse = keyClient.purgeDeletedKey("deletedKeyName"); System.out.printf("Purge Status Code: %rsaPrivateExponent", purgeResponse.statusCode()); VoidResponse purgedResponse = keyClient.purgeDeletedKey("deletedKeyName", new Context(key2, value2)); System.out.printf("Purge Status Code: %rsaPrivateExponent", purgedResponse.statusCode()); } /** * Generates a code sample for using {@link KeyClient */ public void recoverDeletedKeyWithResponseSnippets() { KeyClient keyClient = createClient(); Key recoveredKey = keyClient.recoverDeletedKeyWithResponse("deletedKeyName", new Context(key2, value2)).value(); System.out.printf("Recovered key with name %s", recoveredKey.name()); } /** * Generates a code sample for using {@link KeyClient */ public void recoverDeletedKeySnippets() { KeyClient keyClient = createClient(); Key recoveredKey = keyClient.recoverDeletedKey("deletedKeyName"); System.out.printf("Recovered key with name %s", recoveredKey.name()); } /** * Generates a code sample for using {@link KeyClient */ public void backupKeySnippets() { KeyClient keyClient = createClient(); byte[] keyBackup = keyClient.backupKey("keyName"); System.out.printf("Key's Backup Byte array's length %s", keyBackup.length); } /** * Generates a code sample for using {@link KeyClient */ public void backupKeyWithResponseSnippets() { KeyClient keyClient = createClient(); byte[] keyBackup = keyClient.backupKeyWithResponse("keyName", new Context(key2, value2)).value(); System.out.printf("Key's Backup Byte array's length %s", keyBackup.length); } /** * Generates a code sample for using {@link KeyClient */ public void restoreKeySnippets() { KeyClient keyClient = createClient(); byte[] keyBackupByteArray = {}; Key keyResponse = keyClient.restoreKey(keyBackupByteArray); System.out.printf("Restored Key with name %s and id %s \n", keyResponse.name(), keyResponse.id()); } /** * Generates a code sample for using {@link KeyClient */ public void restoreKeyWithResponseSnippets() { KeyClient keyClient = createClient(); byte[] keyBackupByteArray = {}; Response<Key> keyResponse = keyClient.restoreKeyWithResponse(keyBackupByteArray, new Context(key1, value1)); System.out.printf("Restored Key with name %s and id %s \n", keyResponse.value().name(), keyResponse.value().id()); } /** * Generates a code sample for using {@link KeyClient */ public void listKeySnippets() { KeyClient keyClient = createClient(); for (KeyBase key : keyClient.listKeys()) { Key keyWithMaterial = keyClient.getKey(key); System.out.printf("Received key with name %s and type %s", keyWithMaterial.name(), keyWithMaterial.keyMaterial().kty()); } for (KeyBase key : keyClient.listKeys(new Context(key2, value2))) { Key keyWithMaterial = keyClient.getKey(key); System.out.printf("Received key with name %s and type %s", keyWithMaterial.name(), keyWithMaterial.keyMaterial().kty()); } keyClient.listKeys().iterableByPage().forEach(resp -> { System.out.printf("Got response headers . Url: %s, Status code: %d %n", resp.request().url(), resp.statusCode()); resp.items().forEach(value -> { Key keyWithMaterial = keyClient.getKey(value); System.out.printf("Received key with name %s and type %s %n", keyWithMaterial.name(), keyWithMaterial.keyMaterial().kty()); }); }); } /** * Generates a code sample for using {@link KeyClient */ public void listDeletedKeysSnippets() { KeyClient keyClient = createClient(); for (DeletedKey deletedKey : keyClient.listDeletedKeys()) { System.out.printf("Deleted key's recovery Id %s", deletedKey.recoveryId()); } for (DeletedKey deletedKey : keyClient.listDeletedKeys(new Context(key2, value2))) { System.out.printf("Deleted key's recovery Id %s", deletedKey.recoveryId()); } keyClient.listDeletedKeys().iterableByPage().forEach(resp -> { System.out.printf("Got response headers . Url: %s, Status code: %d %n", resp.request().url(), resp.statusCode()); resp.items().forEach(value -> { System.out.printf("Deleted key's recovery Id %s %n", value.recoveryId()); }); }); } /** * Generates code sample for using {@link KeyClient */ /** * Implementation not provided for this method * @return {@code null} */ private TokenCredential getKeyVaultCredential() { return null; } }
class KeyClientJavaDocCodeSnippets { private String key1 = "key1"; private String key2 = "key2"; private String value1 = "val1"; private String value2 = "val2"; /** * Generates code sample for creating a {@link KeyClient} * @return An instance of {@link KeyClient} */ public KeyClient createClient() { KeyClient keyClient = new KeyClientBuilder() .endpoint("https: .credential(new DefaultAzureCredentialBuilder().build()) .buildClient(); return keyClient; } /** * Generates a code sample for using {@link KeyClient */ public void createKey() { KeyClient keyClient = createClient(); Key key = keyClient.createKey("keyName", KeyType.EC); System.out.printf("Key is created with name %s and id %s %n", key.name(), key.id()); KeyCreateOptions keyCreateOptions = new KeyCreateOptions("keyName", KeyType.RSA) .notBefore(OffsetDateTime.now().plusDays(1)) .expires(OffsetDateTime.now().plusYears(1)); Key optionsKey = keyClient.createKey(keyCreateOptions); System.out.printf("Key is created with name %s and id %s \n", optionsKey.name(), optionsKey.id()); RsaKeyCreateOptions rsaKeyCreateOptions = new RsaKeyCreateOptions("keyName") .keySize(2048) .notBefore(OffsetDateTime.now().plusDays(1)) .expires(OffsetDateTime.now().plusYears(1)); Key rsaKey = keyClient.createRsaKey(rsaKeyCreateOptions); System.out.printf("Key is created with name %s and id %s \n", rsaKey.name(), rsaKey.id()); EcKeyCreateOptions ecKeyCreateOptions = new EcKeyCreateOptions("keyName") .curve(KeyCurveName.P_384) .notBefore(OffsetDateTime.now().plusDays(1)) .expires(OffsetDateTime.now().plusYears(1)); Key ecKey = keyClient.createEcKey(ecKeyCreateOptions); System.out.printf("Key is created with name %s and id %s \n", ecKey.name(), ecKey.id()); } /** * Generates a code sample for using {@link KeyClient */ public void deleteKeySnippets() { KeyClient keyClient = createClient(); Key key = keyClient.getKey("keyName"); DeletedKey deletedKey = keyClient.deleteKey("keyName"); System.out.printf("Deleted Key's Recovery Id %s", deletedKey.recoveryId()); } /** * Generates a code sample for using {@link KeyClient */ public void getDeletedKeySnippets() { KeyClient keyClient = createClient(); DeletedKey deletedKey = keyClient.getDeletedKey("keyName"); System.out.printf("Deleted Key's Recovery Id %s", deletedKey.recoveryId()); } /** * Generates a code sample for using {@link KeyClient */ public void createKeyWithResponses() { KeyClient keyClient = createClient(); KeyCreateOptions keyCreateOptions = new KeyCreateOptions("keyName", KeyType.RSA) .notBefore(OffsetDateTime.now().plusDays(1)) .expires(OffsetDateTime.now().plusYears(1)); Key optionsKey = keyClient.createKeyWithResponse(keyCreateOptions, new Context(key1, value1)).value(); System.out.printf("Key is created with name %s and id %s \n", optionsKey.name(), optionsKey.id()); RsaKeyCreateOptions rsaKeyCreateOptions = new RsaKeyCreateOptions("keyName") .keySize(2048) .notBefore(OffsetDateTime.now().plusDays(1)) .expires(OffsetDateTime.now().plusYears(1)); Key rsaKey = keyClient.createRsaKeyWithResponse(rsaKeyCreateOptions, new Context(key1, value1)).value(); System.out.printf("Key is created with name %s and id %s \n", rsaKey.name(), rsaKey.id()); EcKeyCreateOptions ecKeyCreateOptions = new EcKeyCreateOptions("keyName") .curve(KeyCurveName.P_384) .notBefore(OffsetDateTime.now().plusDays(1)) .expires(OffsetDateTime.now().plusYears(1)); Key ecKey = keyClient.createEcKeyWithResponse(ecKeyCreateOptions, new Context(key1, value1)).value(); System.out.printf("Key is created with name %s and id %s \n", ecKey.name(), ecKey.id()); } /** * Generates a code sample for using {@link KeyClient */ public void getKeyWithResponseSnippets() { KeyClient keyClient = createClient(); String keyVersion = "6A385B124DEF4096AF1361A85B16C204"; Key keyWithVersion = keyClient.getKeyWithResponse("keyName", keyVersion, new Context(key1, value1)).value(); System.out.printf("Key is returned with name %s and id %s \n", keyWithVersion.name(), keyWithVersion.id()); for (KeyBase key : keyClient.listKeys()) { Key keyResponse = keyClient.getKeyWithResponse(key, new Context(key1, value1)).value(); System.out.printf("Received key with name %s and type %s", keyResponse.name(), keyResponse.keyMaterial().kty()); } } /** * Generates a code sample for using {@link KeyClient */ public void getKeySnippets() { KeyClient keyClient = createClient(); String keyVersion = "6A385B124DEF4096AF1361A85B16C204"; Key keyWithVersion = keyClient.getKey("keyName", keyVersion); System.out.printf("Key is returned with name %s and id %s \n", keyWithVersion.name(), keyWithVersion.id()); Key keyWithVersionValue = keyClient.getKey("keyName"); System.out.printf("Key is returned with name %s and id %s \n", keyWithVersionValue.name(), keyWithVersionValue.id()); for (KeyBase key : keyClient.listKeys()) { Key keyResponse = keyClient.getKey(key); System.out.printf("Received key with name %s and type %s", keyResponse.name(), keyResponse.keyMaterial().kty()); } } /** * Generates a code sample for using {@link KeyClient */ public void updateKeyWithResponseSnippets() { KeyClient keyClient = createClient(); Key key = keyClient.getKey("keyName"); key.expires(OffsetDateTime.now().plusDays(60)); KeyBase updatedKeyBase = keyClient.updateKeyWithResponse(key, new Context(key1, value1), KeyOperation.ENCRYPT, KeyOperation.DECRYPT).value(); Key updatedKey = keyClient.getKey(updatedKeyBase.name()); System.out.printf("Key is updated with name %s and id %s \n", updatedKey.name(), updatedKey.id()); } /** * Generates a code sample for using {@link KeyClient */ public void updateKeySnippets() { KeyClient keyClient = createClient(); Key key = keyClient.getKey("keyName"); key.expires(OffsetDateTime.now().plusDays(60)); KeyBase updatedKeyBase = keyClient.updateKey(key, KeyOperation.ENCRYPT, KeyOperation.DECRYPT); Key updatedKey = keyClient.getKey(updatedKeyBase.name()); System.out.printf("Key is updated with name %s and id %s \n", updatedKey.name(), updatedKey.id()); Key updateKey = keyClient.getKey("keyName"); key.expires(OffsetDateTime.now().plusDays(60)); KeyBase updatedKeyBaseValue = keyClient.updateKey(updateKey); Key updatedKeyValue = keyClient.getKey(updatedKeyBaseValue.name()); System.out.printf("Key is updated with name %s and id %s \n", updatedKeyValue.name(), updatedKeyValue.id()); } /** * Generates a code sample for using {@link KeyClient */ public void deleteKeyWithResponseSnippets() { KeyClient keyClient = createClient(); Key key = keyClient.getKey("keyName"); DeletedKey deletedKey = keyClient.deleteKeyWithResponse("keyName", new Context(key1, value1)).value(); System.out.printf("Deleted Key's Recovery Id %s", deletedKey.recoveryId()); } /** * Generates a code sample for using {@link KeyClient */ public void getDeleteKeyWithResponseSnippets() { KeyClient keyClient = createClient(); DeletedKey deletedKey = keyClient.getDeletedKeyWithResponse("keyName", new Context(key1, value1)).value(); System.out.printf("Deleted Key with recovery Id %s \n", deletedKey.recoveryId()); } /** * Generates a code sample for using {@link KeyClient */ public void purgeDeletedKeySnippets() { KeyClient keyClient = createClient(); VoidResponse purgeResponse = keyClient.purgeDeletedKey("deletedKeyName"); System.out.printf("Purge Status Code: %rsaPrivateExponent", purgeResponse.statusCode()); VoidResponse purgedResponse = keyClient.purgeDeletedKey("deletedKeyName", new Context(key2, value2)); System.out.printf("Purge Status Code: %rsaPrivateExponent", purgedResponse.statusCode()); } /** * Generates a code sample for using {@link KeyClient */ public void recoverDeletedKeyWithResponseSnippets() { KeyClient keyClient = createClient(); Key recoveredKey = keyClient.recoverDeletedKeyWithResponse("deletedKeyName", new Context(key2, value2)).value(); System.out.printf("Recovered key with name %s", recoveredKey.name()); } /** * Generates a code sample for using {@link KeyClient */ public void recoverDeletedKeySnippets() { KeyClient keyClient = createClient(); Key recoveredKey = keyClient.recoverDeletedKey("deletedKeyName"); System.out.printf("Recovered key with name %s", recoveredKey.name()); } /** * Generates a code sample for using {@link KeyClient */ public void backupKeySnippets() { KeyClient keyClient = createClient(); byte[] keyBackup = keyClient.backupKey("keyName"); System.out.printf("Key's Backup Byte array's length %s", keyBackup.length); } /** * Generates a code sample for using {@link KeyClient */ public void backupKeyWithResponseSnippets() { KeyClient keyClient = createClient(); byte[] keyBackup = keyClient.backupKeyWithResponse("keyName", new Context(key2, value2)).value(); System.out.printf("Key's Backup Byte array's length %s", keyBackup.length); } /** * Generates a code sample for using {@link KeyClient */ public void restoreKeySnippets() { KeyClient keyClient = createClient(); byte[] keyBackupByteArray = {}; Key keyResponse = keyClient.restoreKey(keyBackupByteArray); System.out.printf("Restored Key with name %s and id %s \n", keyResponse.name(), keyResponse.id()); } /** * Generates a code sample for using {@link KeyClient */ public void restoreKeyWithResponseSnippets() { KeyClient keyClient = createClient(); byte[] keyBackupByteArray = {}; Response<Key> keyResponse = keyClient.restoreKeyWithResponse(keyBackupByteArray, new Context(key1, value1)); System.out.printf("Restored Key with name %s and id %s \n", keyResponse.value().name(), keyResponse.value().id()); } /** * Generates a code sample for using {@link KeyClient */ public void listKeySnippets() { KeyClient keyClient = createClient(); for (KeyBase key : keyClient.listKeys()) { Key keyWithMaterial = keyClient.getKey(key); System.out.printf("Received key with name %s and type %s", keyWithMaterial.name(), keyWithMaterial.keyMaterial().kty()); } for (KeyBase key : keyClient.listKeys(new Context(key2, value2))) { Key keyWithMaterial = keyClient.getKey(key); System.out.printf("Received key with name %s and type %s", keyWithMaterial.name(), keyWithMaterial.keyMaterial().kty()); } keyClient.listKeys().iterableByPage().forEach(resp -> { System.out.printf("Got response headers . Url: %s, Status code: %d %n", resp.request().url(), resp.statusCode()); resp.items().forEach(value -> { Key keyWithMaterial = keyClient.getKey(value); System.out.printf("Received key with name %s and type %s %n", keyWithMaterial.name(), keyWithMaterial.keyMaterial().kty()); }); }); } /** * Generates a code sample for using {@link KeyClient */ public void listDeletedKeysSnippets() { KeyClient keyClient = createClient(); for (DeletedKey deletedKey : keyClient.listDeletedKeys()) { System.out.printf("Deleted key's recovery Id %s", deletedKey.recoveryId()); } for (DeletedKey deletedKey : keyClient.listDeletedKeys(new Context(key2, value2))) { System.out.printf("Deleted key's recovery Id %s", deletedKey.recoveryId()); } keyClient.listDeletedKeys().iterableByPage().forEach(resp -> { System.out.printf("Got response headers . Url: %s, Status code: %d %n", resp.request().url(), resp.statusCode()); resp.items().forEach(value -> { System.out.printf("Deleted key's recovery Id %s %n", value.recoveryId()); }); }); } /** * Generates code sample for using {@link KeyClient */ /** * Implementation not provided for this method * @return {@code null} */ private TokenCredential getKeyVaultCredential() { return null; } }
I've changed it to return the RuntimeException to be thrown as part of this change, having it throw inside the method and needing to return null afterwards in a lot of cases isn't a good pattern and may result in strange code to be used in cases where final variables need to be set.
public static X509Certificate publicKeyFromPem(byte[] pem) { Pattern pattern = Pattern.compile("(?s)-----BEGIN CERTIFICATE-----.*-----END CERTIFICATE-----"); Matcher matcher = pattern.matcher(new String(pem, StandardCharsets.UTF_8)); if (!matcher.find()) { throw LOGGER.logErrorAndThrow(new IllegalArgumentException("PEM certificate provided does not contain -----BEGIN CERTIFICATE-----END CERTIFICATE----- block")); } try { CertificateFactory factory = CertificateFactory.getInstance("X.509"); InputStream stream = new ByteArrayInputStream(matcher.group().getBytes(StandardCharsets.UTF_8)); return (X509Certificate) factory.generateCertificate(stream); } catch (CertificateException e) { throw LOGGER.logErrorAndThrow(new IllegalStateException(e)); } }
throw LOGGER.logErrorAndThrow(new IllegalArgumentException("PEM certificate provided does not contain -----BEGIN CERTIFICATE-----END CERTIFICATE----- block"));
public static X509Certificate publicKeyFromPem(byte[] pem) { Pattern pattern = Pattern.compile("(?s)-----BEGIN CERTIFICATE-----.*-----END CERTIFICATE-----"); Matcher matcher = pattern.matcher(new String(pem, StandardCharsets.UTF_8)); if (!matcher.find()) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("PEM certificate provided does not contain -----BEGIN CERTIFICATE-----END CERTIFICATE----- block")); } try { CertificateFactory factory = CertificateFactory.getInstance("X.509"); InputStream stream = new ByteArrayInputStream(matcher.group().getBytes(StandardCharsets.UTF_8)); return (X509Certificate) factory.generateCertificate(stream); } catch (CertificateException e) { throw LOGGER.logExceptionAsError(new IllegalStateException(e)); } }
class CertificateUtil { private static final ClientLogger LOGGER = new ClientLogger(CertificateUtil.class); /** * Extracts the PrivateKey from a PEM certificate. * @param pem the contents of a PEM certificate. * @return the PrivateKey */ public static PrivateKey privateKeyFromPem(byte[] pem) { Pattern pattern = Pattern.compile("(?s)-----BEGIN PRIVATE KEY-----.*-----END PRIVATE KEY-----"); Matcher matcher = pattern.matcher(new String(pem, StandardCharsets.UTF_8)); if (!matcher.find()) { throw LOGGER.logErrorAndThrow(new IllegalArgumentException("Certificate file provided is not a valid PEM file.")); } String base64 = matcher.group() .replace("-----BEGIN PRIVATE KEY-----", "") .replace("-----END PRIVATE KEY-----", "") .replace("\n", "") .replace("\r", ""); byte[] key = Base64Util.decode(base64.getBytes(StandardCharsets.UTF_8)); PKCS8EncodedKeySpec spec = new PKCS8EncodedKeySpec(key); try { KeyFactory kf = KeyFactory.getInstance("RSA"); return kf.generatePrivate(spec); } catch (NoSuchAlgorithmException | InvalidKeySpecException e) { throw LOGGER.logErrorAndThrow(new IllegalStateException(e)); } } /** * Extracts the X509Certificate certificate from a PEM certificate. * @param pem the contents of a PEM certificate. * @return the X509Certificate certificate */ private CertificateUtil() { } }
class CertificateUtil { private static final ClientLogger LOGGER = new ClientLogger(CertificateUtil.class); /** * Extracts the PrivateKey from a PEM certificate. * @param pem the contents of a PEM certificate. * @return the PrivateKey */ public static PrivateKey privateKeyFromPem(byte[] pem) { Pattern pattern = Pattern.compile("(?s)-----BEGIN PRIVATE KEY-----.*-----END PRIVATE KEY-----"); Matcher matcher = pattern.matcher(new String(pem, StandardCharsets.UTF_8)); if (!matcher.find()) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("Certificate file provided is not a valid PEM file.")); } String base64 = matcher.group() .replace("-----BEGIN PRIVATE KEY-----", "") .replace("-----END PRIVATE KEY-----", "") .replace("\n", "") .replace("\r", ""); byte[] key = Base64Util.decode(base64.getBytes(StandardCharsets.UTF_8)); PKCS8EncodedKeySpec spec = new PKCS8EncodedKeySpec(key); try { KeyFactory kf = KeyFactory.getInstance("RSA"); return kf.generatePrivate(spec); } catch (NoSuchAlgorithmException | InvalidKeySpecException e) { throw LOGGER.logExceptionAsError(new IllegalStateException(e)); } } /** * Extracts the X509Certificate certificate from a PEM certificate. * @param pem the contents of a PEM certificate. * @return the X509Certificate certificate */ private CertificateUtil() { } }
This is out of scope of this PR but while you are modifying this file, could you also instead import the package and remove this fully-qualified class name?
public boolean tryAdd(final EventData eventData) { if (eventData == null) { throw logger.logWarningAndThrow(new IllegalArgumentException("eventData cannot be null")); } final int size; try { size = getSize(eventData, events.isEmpty()); } catch (java.nio.BufferOverflowException exception) { throw logger.logWarningAndThrow(new AmqpException(false, ErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, String.format(Locale.US, "Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024), contextProvider.getErrorContext())); } synchronized (lock) { if (this.sizeInBytes + size > this.maxMessageSize) { return false; } this.sizeInBytes += size; } this.events.add(eventData); return true; }
} catch (java.nio.BufferOverflowException exception) {
public boolean tryAdd(final EventData eventData) { if (eventData == null) { throw logger.logExceptionAsWarning(new IllegalArgumentException("eventData cannot be null")); } final int size; try { size = getSize(eventData, events.isEmpty()); } catch (BufferOverflowException exception) { throw logger.logExceptionAsWarning(new AmqpException(false, ErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, String.format(Locale.US, "Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024), contextProvider.getErrorContext())); } synchronized (lock) { if (this.sizeInBytes + size > this.maxMessageSize) { return false; } this.sizeInBytes += size; } this.events.add(eventData); return true; }
class EventDataBatch { private final ClientLogger logger = new ClientLogger(EventDataBatch.class); private final Object lock = new Object(); private final int maxMessageSize; private final String partitionKey; private final ErrorContextProvider contextProvider; private final List<EventData> events; private final byte[] eventBytes; private int sizeInBytes; EventDataBatch(int maxMessageSize, String partitionKey, ErrorContextProvider contextProvider) { this.maxMessageSize = maxMessageSize; this.partitionKey = partitionKey; this.contextProvider = contextProvider; this.events = new LinkedList<>(); this.sizeInBytes = (maxMessageSize / 65536) * 1024; this.eventBytes = new byte[maxMessageSize]; } /** * Gets the number of {@link EventData events} in the batch. * * @return The number of {@link EventData events} in the batch. */ public int getSize() { return events.size(); } /** * Gets the size of the {@link EventDataBatch} in bytes. * * @return the size of the {@link EventDataBatch} in bytes. */ public int getSizeInBytes() { return this.sizeInBytes; } /** * Tries to add an {@link EventData eventData} to the batch. * * @param eventData The {@link EventData} to add to the batch. * @return {@code true} if the event could be added to the batch; {@code false} if the event was too large to fit in * the batch. * @throws IllegalArgumentException if {@code eventData} is {@code null}. * @throws AmqpException if {@code eventData} is larger than the maximum size of the {@link * EventDataBatch}. */ List<EventData> getEvents() { return events; } String getPartitionKey() { return this.partitionKey; } private int getSize(final EventData eventData, final boolean isFirst) { Objects.requireNonNull(eventData); final Message amqpMessage = createAmqpMessage(eventData, partitionKey); int eventSize = amqpMessage.encode(this.eventBytes, 0, maxMessageSize); eventSize += 16; if (isFirst) { amqpMessage.setBody(null); amqpMessage.setApplicationProperties(null); amqpMessage.setProperties(null); amqpMessage.setDeliveryAnnotations(null); eventSize += amqpMessage.encode(this.eventBytes, 0, maxMessageSize); } return eventSize; } /* * Creates the AMQP message represented by the event data */ private Message createAmqpMessage(EventData event, String partitionKey) { final Message message = Proton.message(); if (event.properties() != null && !event.properties().isEmpty()) { final ApplicationProperties applicationProperties = new ApplicationProperties(event.properties()); message.setApplicationProperties(applicationProperties); } if (event.systemProperties() != null) { event.systemProperties().forEach((key, value) -> { if (EventData.RESERVED_SYSTEM_PROPERTIES.contains(key)) { return; } final MessageConstant constant = MessageConstant.fromString(key); if (constant != null) { switch (constant) { case MESSAGE_ID: message.setMessageId(value); break; case USER_ID: message.setUserId((byte[]) value); break; case TO: message.setAddress((String) value); break; case SUBJECT: message.setSubject((String) value); break; case REPLY_TO: message.setReplyTo((String) value); break; case CORRELATION_ID: message.setCorrelationId(value); break; case CONTENT_TYPE: message.setContentType((String) value); break; case CONTENT_ENCODING: message.setContentEncoding((String) value); break; case ABSOLUTE_EXPIRY_TIME: message.setExpiryTime((long) value); break; case CREATION_TIME: message.setCreationTime((long) value); break; case GROUP_ID: message.setGroupId((String) value); break; case GROUP_SEQUENCE: message.setGroupSequence((long) value); break; case REPLY_TO_GROUP_ID: message.setReplyToGroupId((String) value); break; default: throw logger.logWarningAndThrow(new IllegalArgumentException(String.format(Locale.US, "Property is not a recognized reserved property name: %s", key))); } } else { final MessageAnnotations messageAnnotations = (message.getMessageAnnotations() == null) ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(Symbol.getSymbol(key), value); message.setMessageAnnotations(messageAnnotations); } }); } if (partitionKey != null) { final MessageAnnotations messageAnnotations = (message.getMessageAnnotations() == null) ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } if (event.body() != null) { message.setBody(new Data(Binary.create(event.body()))); } return message; } }
class EventDataBatch { private final ClientLogger logger = new ClientLogger(EventDataBatch.class); private final Object lock = new Object(); private final int maxMessageSize; private final String partitionKey; private final ErrorContextProvider contextProvider; private final List<EventData> events; private final byte[] eventBytes; private int sizeInBytes; EventDataBatch(int maxMessageSize, String partitionKey, ErrorContextProvider contextProvider) { this.maxMessageSize = maxMessageSize; this.partitionKey = partitionKey; this.contextProvider = contextProvider; this.events = new LinkedList<>(); this.sizeInBytes = (maxMessageSize / 65536) * 1024; this.eventBytes = new byte[maxMessageSize]; } /** * Gets the number of {@link EventData events} in the batch. * * @return The number of {@link EventData events} in the batch. */ public int getSize() { return events.size(); } /** * Gets the size of the {@link EventDataBatch} in bytes. * * @return the size of the {@link EventDataBatch} in bytes. */ public int getSizeInBytes() { return this.sizeInBytes; } /** * Tries to add an {@link EventData eventData} to the batch. * * @param eventData The {@link EventData} to add to the batch. * @return {@code true} if the event could be added to the batch; {@code false} if the event was too large to fit in * the batch. * @throws IllegalArgumentException if {@code eventData} is {@code null}. * @throws AmqpException if {@code eventData} is larger than the maximum size of the {@link * EventDataBatch}. */ List<EventData> getEvents() { return events; } String getPartitionKey() { return this.partitionKey; } private int getSize(final EventData eventData, final boolean isFirst) { Objects.requireNonNull(eventData); final Message amqpMessage = createAmqpMessage(eventData, partitionKey); int eventSize = amqpMessage.encode(this.eventBytes, 0, maxMessageSize); eventSize += 16; if (isFirst) { amqpMessage.setBody(null); amqpMessage.setApplicationProperties(null); amqpMessage.setProperties(null); amqpMessage.setDeliveryAnnotations(null); eventSize += amqpMessage.encode(this.eventBytes, 0, maxMessageSize); } return eventSize; } /* * Creates the AMQP message represented by the event data */ private Message createAmqpMessage(EventData event, String partitionKey) { final Message message = Proton.message(); if (event.properties() != null && !event.properties().isEmpty()) { final ApplicationProperties applicationProperties = new ApplicationProperties(event.properties()); message.setApplicationProperties(applicationProperties); } if (event.systemProperties() != null) { event.systemProperties().forEach((key, value) -> { if (EventData.RESERVED_SYSTEM_PROPERTIES.contains(key)) { return; } final MessageConstant constant = MessageConstant.fromString(key); if (constant != null) { switch (constant) { case MESSAGE_ID: message.setMessageId(value); break; case USER_ID: message.setUserId((byte[]) value); break; case TO: message.setAddress((String) value); break; case SUBJECT: message.setSubject((String) value); break; case REPLY_TO: message.setReplyTo((String) value); break; case CORRELATION_ID: message.setCorrelationId(value); break; case CONTENT_TYPE: message.setContentType((String) value); break; case CONTENT_ENCODING: message.setContentEncoding((String) value); break; case ABSOLUTE_EXPIRY_TIME: message.setExpiryTime((long) value); break; case CREATION_TIME: message.setCreationTime((long) value); break; case GROUP_ID: message.setGroupId((String) value); break; case GROUP_SEQUENCE: message.setGroupSequence((long) value); break; case REPLY_TO_GROUP_ID: message.setReplyToGroupId((String) value); break; default: throw logger.logExceptionAsWarning(new IllegalArgumentException(String.format(Locale.US, "Property is not a recognized reserved property name: %s", key))); } } else { final MessageAnnotations messageAnnotations = (message.getMessageAnnotations() == null) ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(Symbol.getSymbol(key), value); message.setMessageAnnotations(messageAnnotations); } }); } if (partitionKey != null) { final MessageAnnotations messageAnnotations = (message.getMessageAnnotations() == null) ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } if (event.body() != null) { message.setBody(new Data(Binary.create(event.body()))); } return message; } }
Is this going to `toString` correctly for the headers? If you want to do associations for URL and status code show it like this: `URL: %s, Status code: %d`
public void listKeySnippets() { KeyClient keyClient = createClient(); for (KeyBase key : keyClient.listKeys()) { Key keyWithMaterial = keyClient.getKey(key); System.out.printf("Received key with name %s and type %s", keyWithMaterial.name(), keyWithMaterial.keyMaterial().kty()); } for (KeyBase key : keyClient.listKeys(new Context(key2, value2))) { Key keyWithMaterial = keyClient.getKey(key); System.out.printf("Received key with name %s and type %s", keyWithMaterial.name(), keyWithMaterial.keyMaterial().kty()); } keyClient.listKeys().iterableByPage().forEach(resp -> { System.out.printf("Response headers are %s. Url %s and status code %d %n", resp.headers(), resp.request().url(), resp.statusCode()); resp.items().forEach(value -> { Key keyWithMaterial = keyClient.getKey(value); System.out.printf("Received key with name %s and type %s %n", keyWithMaterial.name(), keyWithMaterial.keyMaterial().kty()); }); }); }
System.out.printf("Response headers are %s. Url %s and status code %d %n", resp.headers(),
public void listKeySnippets() { KeyClient keyClient = createClient(); for (KeyBase key : keyClient.listKeys()) { Key keyWithMaterial = keyClient.getKey(key); System.out.printf("Received key with name %s and type %s", keyWithMaterial.name(), keyWithMaterial.keyMaterial().kty()); } for (KeyBase key : keyClient.listKeys(new Context(key2, value2))) { Key keyWithMaterial = keyClient.getKey(key); System.out.printf("Received key with name %s and type %s", keyWithMaterial.name(), keyWithMaterial.keyMaterial().kty()); } keyClient.listKeys().iterableByPage().forEach(resp -> { System.out.printf("Got response headers . Url: %s, Status code: %d %n", resp.request().url(), resp.statusCode()); resp.items().forEach(value -> { Key keyWithMaterial = keyClient.getKey(value); System.out.printf("Received key with name %s and type %s %n", keyWithMaterial.name(), keyWithMaterial.keyMaterial().kty()); }); }); }
class KeyClientJavaDocCodeSnippets { private String key1 = "key1"; private String key2 = "key2"; private String value1 = "val1"; private String value2 = "val2"; /** * Generates code sample for creating a {@link KeyClient} * @return An instance of {@link KeyClient} */ public KeyClient createClient() { KeyClient keyClient = new KeyClientBuilder() .endpoint("https: .credential(new DefaultAzureCredentialBuilder().build()) .buildClient(); return keyClient; } /** * Generates a code sample for using {@link KeyClient */ public void createKey() { KeyClient keyClient = createClient(); Key key = keyClient.createKey("keyName", KeyType.EC); System.out.printf("Key is created with name %s and id %s %n", key.name(), key.id()); KeyCreateOptions keyCreateOptions = new KeyCreateOptions("keyName", KeyType.RSA) .notBefore(OffsetDateTime.now().plusDays(1)) .expires(OffsetDateTime.now().plusYears(1)); Key optionsKey = keyClient.createKey(keyCreateOptions); System.out.printf("Key is created with name %s and id %s \n", optionsKey.name(), optionsKey.id()); RsaKeyCreateOptions rsaKeyCreateOptions = new RsaKeyCreateOptions("keyName") .keySize(2048) .notBefore(OffsetDateTime.now().plusDays(1)) .expires(OffsetDateTime.now().plusYears(1)); Key rsaKey = keyClient.createRsaKey(rsaKeyCreateOptions); System.out.printf("Key is created with name %s and id %s \n", rsaKey.name(), rsaKey.id()); EcKeyCreateOptions ecKeyCreateOptions = new EcKeyCreateOptions("keyName") .curve(KeyCurveName.P_384) .notBefore(OffsetDateTime.now().plusDays(1)) .expires(OffsetDateTime.now().plusYears(1)); Key ecKey = keyClient.createEcKey(ecKeyCreateOptions); System.out.printf("Key is created with name %s and id %s \n", ecKey.name(), ecKey.id()); } /** * Generates a code sample for using {@link KeyClient */ public void deleteKeySnippets() { KeyClient keyClient = createClient(); Key key = keyClient.getKey("keyName"); DeletedKey deletedKey = keyClient.deleteKey("keyName"); System.out.printf("Deleted Key's Recovery Id %s", deletedKey.recoveryId()); } /** * Generates a code sample for using {@link KeyClient */ public void getDeletedKeySnippets() { KeyClient keyClient = createClient(); DeletedKey deletedKey = keyClient.getDeletedKey("keyName"); System.out.printf("Deleted Key's Recovery Id %s", deletedKey.recoveryId()); } /** * Generates a code sample for using {@link KeyClient */ public void createKeyWithResponses() { KeyClient keyClient = createClient(); KeyCreateOptions keyCreateOptions = new KeyCreateOptions("keyName", KeyType.RSA) .notBefore(OffsetDateTime.now().plusDays(1)) .expires(OffsetDateTime.now().plusYears(1)); Key optionsKey = keyClient.createKeyWithResponse(keyCreateOptions, new Context(key1, value1)).value(); System.out.printf("Key is created with name %s and id %s \n", optionsKey.name(), optionsKey.id()); RsaKeyCreateOptions rsaKeyCreateOptions = new RsaKeyCreateOptions("keyName") .keySize(2048) .notBefore(OffsetDateTime.now().plusDays(1)) .expires(OffsetDateTime.now().plusYears(1)); Key rsaKey = keyClient.createRsaKeyWithResponse(rsaKeyCreateOptions, new Context(key1, value1)).value(); System.out.printf("Key is created with name %s and id %s \n", rsaKey.name(), rsaKey.id()); EcKeyCreateOptions ecKeyCreateOptions = new EcKeyCreateOptions("keyName") .curve(KeyCurveName.P_384) .notBefore(OffsetDateTime.now().plusDays(1)) .expires(OffsetDateTime.now().plusYears(1)); Key ecKey = keyClient.createEcKeyWithResponse(ecKeyCreateOptions, new Context(key1, value1)).value(); System.out.printf("Key is created with name %s and id %s \n", ecKey.name(), ecKey.id()); } /** * Generates a code sample for using {@link KeyClient */ public void getKeyWithResponseSnippets() { KeyClient keyClient = createClient(); String keyVersion = "6A385B124DEF4096AF1361A85B16C204"; Key keyWithVersion = keyClient.getKeyWithResponse("keyName", keyVersion, new Context(key1, value1)).value(); System.out.printf("Key is returned with name %s and id %s \n", keyWithVersion.name(), keyWithVersion.id()); for (KeyBase key : keyClient.listKeys()) { Key keyResponse = keyClient.getKeyWithResponse(key, new Context(key1, value1)).value(); System.out.printf("Received key with name %s and type %s", keyResponse.name(), keyResponse.keyMaterial().kty()); } } /** * Generates a code sample for using {@link KeyClient */ public void getKeySnippets() { KeyClient keyClient = createClient(); String keyVersion = "6A385B124DEF4096AF1361A85B16C204"; Key keyWithVersion = keyClient.getKey("keyName", keyVersion); System.out.printf("Key is returned with name %s and id %s \n", keyWithVersion.name(), keyWithVersion.id()); Key keyWithVersionValue = keyClient.getKey("keyName"); System.out.printf("Key is returned with name %s and id %s \n", keyWithVersionValue.name(), keyWithVersionValue.id()); for (KeyBase key : keyClient.listKeys()) { Key keyResponse = keyClient.getKey(key); System.out.printf("Received key with name %s and type %s", keyResponse.name(), keyResponse.keyMaterial().kty()); } } /** * Generates a code sample for using {@link KeyClient */ public void updateKeyWithResponseSnippets() { KeyClient keyClient = createClient(); Key key = keyClient.getKey("keyName"); key.expires(OffsetDateTime.now().plusDays(60)); KeyBase updatedKeyBase = keyClient.updateKeyWithResponse(key, new Context(key1, value1), KeyOperation.ENCRYPT, KeyOperation.DECRYPT).value(); Key updatedKey = keyClient.getKey(updatedKeyBase.name()); System.out.printf("Key is updated with name %s and id %s \n", updatedKey.name(), updatedKey.id()); } /** * Generates a code sample for using {@link KeyClient */ public void updateKeySnippets() { KeyClient keyClient = createClient(); Key key = keyClient.getKey("keyName"); key.expires(OffsetDateTime.now().plusDays(60)); KeyBase updatedKeyBase = keyClient.updateKey(key, KeyOperation.ENCRYPT, KeyOperation.DECRYPT); Key updatedKey = keyClient.getKey(updatedKeyBase.name()); System.out.printf("Key is updated with name %s and id %s \n", updatedKey.name(), updatedKey.id()); Key updateKey = keyClient.getKey("keyName"); key.expires(OffsetDateTime.now().plusDays(60)); KeyBase updatedKeyBaseValue = keyClient.updateKey(updateKey); Key updatedKeyValue = keyClient.getKey(updatedKeyBaseValue.name()); System.out.printf("Key is updated with name %s and id %s \n", updatedKeyValue.name(), updatedKeyValue.id()); } /** * Generates a code sample for using {@link KeyClient */ public void deleteKeyWithResponseSnippets() { KeyClient keyClient = createClient(); Key key = keyClient.getKey("keyName"); DeletedKey deletedKey = keyClient.deleteKeyWithResponse("keyName", new Context(key1, value1)).value(); System.out.printf("Deleted Key's Recovery Id %s", deletedKey.recoveryId()); } /** * Generates a code sample for using {@link KeyClient */ public void getDeleteKeyWithResponseSnippets() { KeyClient keyClient = createClient(); DeletedKey deletedKey = keyClient.getDeletedKeyWithResponse("keyName", new Context(key1, value1)).value(); System.out.printf("Deleted Key with recovery Id %s \n", deletedKey.recoveryId()); } /** * Generates a code sample for using {@link KeyClient */ public void purgeDeletedKeySnippets() { KeyClient keyClient = createClient(); VoidResponse purgeResponse = keyClient.purgeDeletedKey("deletedKeyName"); System.out.printf("Purge Status Code: %rsaPrivateExponent", purgeResponse.statusCode()); VoidResponse purgedResponse = keyClient.purgeDeletedKey("deletedKeyName", new Context(key2, value2)); System.out.printf("Purge Status Code: %rsaPrivateExponent", purgedResponse.statusCode()); } /** * Generates a code sample for using {@link KeyClient */ public void recoverDeletedKeyWithResponseSnippets() { KeyClient keyClient = createClient(); Key recoveredKey = keyClient.recoverDeletedKeyWithResponse("deletedKeyName", new Context(key2, value2)).value(); System.out.printf("Recovered key with name %s", recoveredKey.name()); } /** * Generates a code sample for using {@link KeyClient */ public void recoverDeletedKeySnippets() { KeyClient keyClient = createClient(); Key recoveredKey = keyClient.recoverDeletedKey("deletedKeyName"); System.out.printf("Recovered key with name %s", recoveredKey.name()); } /** * Generates a code sample for using {@link KeyClient */ public void backupKeySnippets() { KeyClient keyClient = createClient(); byte[] keyBackup = keyClient.backupKey("keyName"); System.out.printf("Key's Backup Byte array's length %s", keyBackup.length); } /** * Generates a code sample for using {@link KeyClient */ public void backupKeyWithResponseSnippets() { KeyClient keyClient = createClient(); byte[] keyBackup = keyClient.backupKeyWithResponse("keyName", new Context(key2, value2)).value(); System.out.printf("Key's Backup Byte array's length %s", keyBackup.length); } /** * Generates a code sample for using {@link KeyClient */ public void restoreKeySnippets() { KeyClient keyClient = createClient(); byte[] keyBackupByteArray = {}; Key keyResponse = keyClient.restoreKey(keyBackupByteArray); System.out.printf("Restored Key with name %s and id %s \n", keyResponse.name(), keyResponse.id()); } /** * Generates a code sample for using {@link KeyClient */ public void restoreKeyWithResponseSnippets() { KeyClient keyClient = createClient(); byte[] keyBackupByteArray = {}; Response<Key> keyResponse = keyClient.restoreKeyWithResponse(keyBackupByteArray, new Context(key1, value1)); System.out.printf("Restored Key with name %s and id %s \n", keyResponse.value().name(), keyResponse.value().id()); } /** * Generates a code sample for using {@link KeyClient */ /** * Generates a code sample for using {@link KeyClient */ public void listDeletedKeysSnippets() { KeyClient keyClient = createClient(); for (DeletedKey deletedKey : keyClient.listDeletedKeys()) { System.out.printf("Deleted key's recovery Id %s", deletedKey.recoveryId()); } for (DeletedKey deletedKey : keyClient.listDeletedKeys(new Context(key2, value2))) { System.out.printf("Deleted key's recovery Id %s", deletedKey.recoveryId()); } keyClient.listDeletedKeys().iterableByPage().forEach(resp -> { System.out.printf("Response headers are %s. Url %s and status code %d %n", resp.headers(), resp.request().url(), resp.statusCode()); resp.items().forEach(value -> { System.out.printf("Deleted key's recovery Id %s %n", value.recoveryId()); }); }); } /** * Generates code sample for using {@link KeyClient */ public void listKeyVersions() { KeyClient keyClient = createClient(); for (KeyBase key : keyClient.listKeyVersions("keyName")) { Key keyWithMaterial = keyClient.getKey(key); System.out.printf("Received key's version with name %s, type %s and version %s", keyWithMaterial.name(), keyWithMaterial.keyMaterial().kty(), keyWithMaterial.version()); } for (KeyBase key : keyClient.listKeyVersions("keyName", new Context(key2, value2))) { Key keyWithMaterial = keyClient.getKey(key); System.out.printf("Received key's version with name %s, type %s and version %s", keyWithMaterial.name(), keyWithMaterial.keyMaterial().kty(), keyWithMaterial.version()); } keyClient.listKeyVersions("keyName").iterableByPage().forEach(resp -> { System.out.printf("Response headers are %s. Url %s and status code %d %n", resp.headers(), resp.request().url(), resp.statusCode()); resp.items().forEach(value -> { System.out.printf("Response value is %d %n", value); }); }); } /** * Implementation not provided for this method * @return {@code null} */ private TokenCredential getKeyVaultCredential() { return null; } }
class KeyClientJavaDocCodeSnippets { private String key1 = "key1"; private String key2 = "key2"; private String value1 = "val1"; private String value2 = "val2"; /** * Generates code sample for creating a {@link KeyClient} * @return An instance of {@link KeyClient} */ public KeyClient createClient() { KeyClient keyClient = new KeyClientBuilder() .endpoint("https: .credential(new DefaultAzureCredentialBuilder().build()) .buildClient(); return keyClient; } /** * Generates a code sample for using {@link KeyClient */ public void createKey() { KeyClient keyClient = createClient(); Key key = keyClient.createKey("keyName", KeyType.EC); System.out.printf("Key is created with name %s and id %s %n", key.name(), key.id()); KeyCreateOptions keyCreateOptions = new KeyCreateOptions("keyName", KeyType.RSA) .notBefore(OffsetDateTime.now().plusDays(1)) .expires(OffsetDateTime.now().plusYears(1)); Key optionsKey = keyClient.createKey(keyCreateOptions); System.out.printf("Key is created with name %s and id %s \n", optionsKey.name(), optionsKey.id()); RsaKeyCreateOptions rsaKeyCreateOptions = new RsaKeyCreateOptions("keyName") .keySize(2048) .notBefore(OffsetDateTime.now().plusDays(1)) .expires(OffsetDateTime.now().plusYears(1)); Key rsaKey = keyClient.createRsaKey(rsaKeyCreateOptions); System.out.printf("Key is created with name %s and id %s \n", rsaKey.name(), rsaKey.id()); EcKeyCreateOptions ecKeyCreateOptions = new EcKeyCreateOptions("keyName") .curve(KeyCurveName.P_384) .notBefore(OffsetDateTime.now().plusDays(1)) .expires(OffsetDateTime.now().plusYears(1)); Key ecKey = keyClient.createEcKey(ecKeyCreateOptions); System.out.printf("Key is created with name %s and id %s \n", ecKey.name(), ecKey.id()); } /** * Generates a code sample for using {@link KeyClient */ public void deleteKeySnippets() { KeyClient keyClient = createClient(); Key key = keyClient.getKey("keyName"); DeletedKey deletedKey = keyClient.deleteKey("keyName"); System.out.printf("Deleted Key's Recovery Id %s", deletedKey.recoveryId()); } /** * Generates a code sample for using {@link KeyClient */ public void getDeletedKeySnippets() { KeyClient keyClient = createClient(); DeletedKey deletedKey = keyClient.getDeletedKey("keyName"); System.out.printf("Deleted Key's Recovery Id %s", deletedKey.recoveryId()); } /** * Generates a code sample for using {@link KeyClient */ public void createKeyWithResponses() { KeyClient keyClient = createClient(); KeyCreateOptions keyCreateOptions = new KeyCreateOptions("keyName", KeyType.RSA) .notBefore(OffsetDateTime.now().plusDays(1)) .expires(OffsetDateTime.now().plusYears(1)); Key optionsKey = keyClient.createKeyWithResponse(keyCreateOptions, new Context(key1, value1)).value(); System.out.printf("Key is created with name %s and id %s \n", optionsKey.name(), optionsKey.id()); RsaKeyCreateOptions rsaKeyCreateOptions = new RsaKeyCreateOptions("keyName") .keySize(2048) .notBefore(OffsetDateTime.now().plusDays(1)) .expires(OffsetDateTime.now().plusYears(1)); Key rsaKey = keyClient.createRsaKeyWithResponse(rsaKeyCreateOptions, new Context(key1, value1)).value(); System.out.printf("Key is created with name %s and id %s \n", rsaKey.name(), rsaKey.id()); EcKeyCreateOptions ecKeyCreateOptions = new EcKeyCreateOptions("keyName") .curve(KeyCurveName.P_384) .notBefore(OffsetDateTime.now().plusDays(1)) .expires(OffsetDateTime.now().plusYears(1)); Key ecKey = keyClient.createEcKeyWithResponse(ecKeyCreateOptions, new Context(key1, value1)).value(); System.out.printf("Key is created with name %s and id %s \n", ecKey.name(), ecKey.id()); } /** * Generates a code sample for using {@link KeyClient */ public void getKeyWithResponseSnippets() { KeyClient keyClient = createClient(); String keyVersion = "6A385B124DEF4096AF1361A85B16C204"; Key keyWithVersion = keyClient.getKeyWithResponse("keyName", keyVersion, new Context(key1, value1)).value(); System.out.printf("Key is returned with name %s and id %s \n", keyWithVersion.name(), keyWithVersion.id()); for (KeyBase key : keyClient.listKeys()) { Key keyResponse = keyClient.getKeyWithResponse(key, new Context(key1, value1)).value(); System.out.printf("Received key with name %s and type %s", keyResponse.name(), keyResponse.keyMaterial().kty()); } } /** * Generates a code sample for using {@link KeyClient */ public void getKeySnippets() { KeyClient keyClient = createClient(); String keyVersion = "6A385B124DEF4096AF1361A85B16C204"; Key keyWithVersion = keyClient.getKey("keyName", keyVersion); System.out.printf("Key is returned with name %s and id %s \n", keyWithVersion.name(), keyWithVersion.id()); Key keyWithVersionValue = keyClient.getKey("keyName"); System.out.printf("Key is returned with name %s and id %s \n", keyWithVersionValue.name(), keyWithVersionValue.id()); for (KeyBase key : keyClient.listKeys()) { Key keyResponse = keyClient.getKey(key); System.out.printf("Received key with name %s and type %s", keyResponse.name(), keyResponse.keyMaterial().kty()); } } /** * Generates a code sample for using {@link KeyClient */ public void updateKeyWithResponseSnippets() { KeyClient keyClient = createClient(); Key key = keyClient.getKey("keyName"); key.expires(OffsetDateTime.now().plusDays(60)); KeyBase updatedKeyBase = keyClient.updateKeyWithResponse(key, new Context(key1, value1), KeyOperation.ENCRYPT, KeyOperation.DECRYPT).value(); Key updatedKey = keyClient.getKey(updatedKeyBase.name()); System.out.printf("Key is updated with name %s and id %s \n", updatedKey.name(), updatedKey.id()); } /** * Generates a code sample for using {@link KeyClient */ public void updateKeySnippets() { KeyClient keyClient = createClient(); Key key = keyClient.getKey("keyName"); key.expires(OffsetDateTime.now().plusDays(60)); KeyBase updatedKeyBase = keyClient.updateKey(key, KeyOperation.ENCRYPT, KeyOperation.DECRYPT); Key updatedKey = keyClient.getKey(updatedKeyBase.name()); System.out.printf("Key is updated with name %s and id %s \n", updatedKey.name(), updatedKey.id()); Key updateKey = keyClient.getKey("keyName"); key.expires(OffsetDateTime.now().plusDays(60)); KeyBase updatedKeyBaseValue = keyClient.updateKey(updateKey); Key updatedKeyValue = keyClient.getKey(updatedKeyBaseValue.name()); System.out.printf("Key is updated with name %s and id %s \n", updatedKeyValue.name(), updatedKeyValue.id()); } /** * Generates a code sample for using {@link KeyClient */ public void deleteKeyWithResponseSnippets() { KeyClient keyClient = createClient(); Key key = keyClient.getKey("keyName"); DeletedKey deletedKey = keyClient.deleteKeyWithResponse("keyName", new Context(key1, value1)).value(); System.out.printf("Deleted Key's Recovery Id %s", deletedKey.recoveryId()); } /** * Generates a code sample for using {@link KeyClient */ public void getDeleteKeyWithResponseSnippets() { KeyClient keyClient = createClient(); DeletedKey deletedKey = keyClient.getDeletedKeyWithResponse("keyName", new Context(key1, value1)).value(); System.out.printf("Deleted Key with recovery Id %s \n", deletedKey.recoveryId()); } /** * Generates a code sample for using {@link KeyClient */ public void purgeDeletedKeySnippets() { KeyClient keyClient = createClient(); VoidResponse purgeResponse = keyClient.purgeDeletedKey("deletedKeyName"); System.out.printf("Purge Status Code: %rsaPrivateExponent", purgeResponse.statusCode()); VoidResponse purgedResponse = keyClient.purgeDeletedKey("deletedKeyName", new Context(key2, value2)); System.out.printf("Purge Status Code: %rsaPrivateExponent", purgedResponse.statusCode()); } /** * Generates a code sample for using {@link KeyClient */ public void recoverDeletedKeyWithResponseSnippets() { KeyClient keyClient = createClient(); Key recoveredKey = keyClient.recoverDeletedKeyWithResponse("deletedKeyName", new Context(key2, value2)).value(); System.out.printf("Recovered key with name %s", recoveredKey.name()); } /** * Generates a code sample for using {@link KeyClient */ public void recoverDeletedKeySnippets() { KeyClient keyClient = createClient(); Key recoveredKey = keyClient.recoverDeletedKey("deletedKeyName"); System.out.printf("Recovered key with name %s", recoveredKey.name()); } /** * Generates a code sample for using {@link KeyClient */ public void backupKeySnippets() { KeyClient keyClient = createClient(); byte[] keyBackup = keyClient.backupKey("keyName"); System.out.printf("Key's Backup Byte array's length %s", keyBackup.length); } /** * Generates a code sample for using {@link KeyClient */ public void backupKeyWithResponseSnippets() { KeyClient keyClient = createClient(); byte[] keyBackup = keyClient.backupKeyWithResponse("keyName", new Context(key2, value2)).value(); System.out.printf("Key's Backup Byte array's length %s", keyBackup.length); } /** * Generates a code sample for using {@link KeyClient */ public void restoreKeySnippets() { KeyClient keyClient = createClient(); byte[] keyBackupByteArray = {}; Key keyResponse = keyClient.restoreKey(keyBackupByteArray); System.out.printf("Restored Key with name %s and id %s \n", keyResponse.name(), keyResponse.id()); } /** * Generates a code sample for using {@link KeyClient */ public void restoreKeyWithResponseSnippets() { KeyClient keyClient = createClient(); byte[] keyBackupByteArray = {}; Response<Key> keyResponse = keyClient.restoreKeyWithResponse(keyBackupByteArray, new Context(key1, value1)); System.out.printf("Restored Key with name %s and id %s \n", keyResponse.value().name(), keyResponse.value().id()); } /** * Generates a code sample for using {@link KeyClient */ /** * Generates a code sample for using {@link KeyClient */ public void listDeletedKeysSnippets() { KeyClient keyClient = createClient(); for (DeletedKey deletedKey : keyClient.listDeletedKeys()) { System.out.printf("Deleted key's recovery Id %s", deletedKey.recoveryId()); } for (DeletedKey deletedKey : keyClient.listDeletedKeys(new Context(key2, value2))) { System.out.printf("Deleted key's recovery Id %s", deletedKey.recoveryId()); } keyClient.listDeletedKeys().iterableByPage().forEach(resp -> { System.out.printf("Got response headers . Url: %s, Status code: %d %n", resp.request().url(), resp.statusCode()); resp.items().forEach(value -> { System.out.printf("Deleted key's recovery Id %s %n", value.recoveryId()); }); }); } /** * Generates code sample for using {@link KeyClient */ public void listKeyVersions() { KeyClient keyClient = createClient(); for (KeyBase key : keyClient.listKeyVersions("keyName")) { Key keyWithMaterial = keyClient.getKey(key); System.out.printf("Received key's version with name %s, type %s and version %s", keyWithMaterial.name(), keyWithMaterial.keyMaterial().kty(), keyWithMaterial.version()); } for (KeyBase key : keyClient.listKeyVersions("keyName", new Context(key2, value2))) { Key keyWithMaterial = keyClient.getKey(key); System.out.printf("Received key's version with name %s, type %s and version %s", keyWithMaterial.name(), keyWithMaterial.keyMaterial().kty(), keyWithMaterial.version()); } keyClient.listKeyVersions("keyName").iterableByPage().forEach(resp -> { System.out.printf("Got response headers . Url: %s, Status code: %d %n", resp.request().url(), resp.statusCode()); resp.items().forEach(value -> { System.out.printf("Key name: %s, Key version: %s %n", value.name(), value.version()); }); }); } /** * Implementation not provided for this method * @return {@code null} */ private TokenCredential getKeyVaultCredential() { return null; } }
Do we want to make a call to get each key again? Why not just print the name?
public void listKeySnippets() { KeyClient keyClient = createClient(); for (KeyBase key : keyClient.listKeys()) { Key keyWithMaterial = keyClient.getKey(key); System.out.printf("Received key with name %s and type %s", keyWithMaterial.name(), keyWithMaterial.keyMaterial().kty()); } for (KeyBase key : keyClient.listKeys(new Context(key2, value2))) { Key keyWithMaterial = keyClient.getKey(key); System.out.printf("Received key with name %s and type %s", keyWithMaterial.name(), keyWithMaterial.keyMaterial().kty()); } keyClient.listKeys().iterableByPage().forEach(resp -> { System.out.printf("Response headers are %s. Url %s and status code %d %n", resp.headers(), resp.request().url(), resp.statusCode()); resp.items().forEach(value -> { Key keyWithMaterial = keyClient.getKey(value); System.out.printf("Received key with name %s and type %s %n", keyWithMaterial.name(), keyWithMaterial.keyMaterial().kty()); }); }); }
Key keyWithMaterial = keyClient.getKey(value);
public void listKeySnippets() { KeyClient keyClient = createClient(); for (KeyBase key : keyClient.listKeys()) { Key keyWithMaterial = keyClient.getKey(key); System.out.printf("Received key with name %s and type %s", keyWithMaterial.name(), keyWithMaterial.keyMaterial().kty()); } for (KeyBase key : keyClient.listKeys(new Context(key2, value2))) { Key keyWithMaterial = keyClient.getKey(key); System.out.printf("Received key with name %s and type %s", keyWithMaterial.name(), keyWithMaterial.keyMaterial().kty()); } keyClient.listKeys().iterableByPage().forEach(resp -> { System.out.printf("Got response headers . Url: %s, Status code: %d %n", resp.request().url(), resp.statusCode()); resp.items().forEach(value -> { Key keyWithMaterial = keyClient.getKey(value); System.out.printf("Received key with name %s and type %s %n", keyWithMaterial.name(), keyWithMaterial.keyMaterial().kty()); }); }); }
class KeyClientJavaDocCodeSnippets { private String key1 = "key1"; private String key2 = "key2"; private String value1 = "val1"; private String value2 = "val2"; /** * Generates code sample for creating a {@link KeyClient} * @return An instance of {@link KeyClient} */ public KeyClient createClient() { KeyClient keyClient = new KeyClientBuilder() .endpoint("https: .credential(new DefaultAzureCredentialBuilder().build()) .buildClient(); return keyClient; } /** * Generates a code sample for using {@link KeyClient */ public void createKey() { KeyClient keyClient = createClient(); Key key = keyClient.createKey("keyName", KeyType.EC); System.out.printf("Key is created with name %s and id %s %n", key.name(), key.id()); KeyCreateOptions keyCreateOptions = new KeyCreateOptions("keyName", KeyType.RSA) .notBefore(OffsetDateTime.now().plusDays(1)) .expires(OffsetDateTime.now().plusYears(1)); Key optionsKey = keyClient.createKey(keyCreateOptions); System.out.printf("Key is created with name %s and id %s \n", optionsKey.name(), optionsKey.id()); RsaKeyCreateOptions rsaKeyCreateOptions = new RsaKeyCreateOptions("keyName") .keySize(2048) .notBefore(OffsetDateTime.now().plusDays(1)) .expires(OffsetDateTime.now().plusYears(1)); Key rsaKey = keyClient.createRsaKey(rsaKeyCreateOptions); System.out.printf("Key is created with name %s and id %s \n", rsaKey.name(), rsaKey.id()); EcKeyCreateOptions ecKeyCreateOptions = new EcKeyCreateOptions("keyName") .curve(KeyCurveName.P_384) .notBefore(OffsetDateTime.now().plusDays(1)) .expires(OffsetDateTime.now().plusYears(1)); Key ecKey = keyClient.createEcKey(ecKeyCreateOptions); System.out.printf("Key is created with name %s and id %s \n", ecKey.name(), ecKey.id()); } /** * Generates a code sample for using {@link KeyClient */ public void deleteKeySnippets() { KeyClient keyClient = createClient(); Key key = keyClient.getKey("keyName"); DeletedKey deletedKey = keyClient.deleteKey("keyName"); System.out.printf("Deleted Key's Recovery Id %s", deletedKey.recoveryId()); } /** * Generates a code sample for using {@link KeyClient */ public void getDeletedKeySnippets() { KeyClient keyClient = createClient(); DeletedKey deletedKey = keyClient.getDeletedKey("keyName"); System.out.printf("Deleted Key's Recovery Id %s", deletedKey.recoveryId()); } /** * Generates a code sample for using {@link KeyClient */ public void createKeyWithResponses() { KeyClient keyClient = createClient(); KeyCreateOptions keyCreateOptions = new KeyCreateOptions("keyName", KeyType.RSA) .notBefore(OffsetDateTime.now().plusDays(1)) .expires(OffsetDateTime.now().plusYears(1)); Key optionsKey = keyClient.createKeyWithResponse(keyCreateOptions, new Context(key1, value1)).value(); System.out.printf("Key is created with name %s and id %s \n", optionsKey.name(), optionsKey.id()); RsaKeyCreateOptions rsaKeyCreateOptions = new RsaKeyCreateOptions("keyName") .keySize(2048) .notBefore(OffsetDateTime.now().plusDays(1)) .expires(OffsetDateTime.now().plusYears(1)); Key rsaKey = keyClient.createRsaKeyWithResponse(rsaKeyCreateOptions, new Context(key1, value1)).value(); System.out.printf("Key is created with name %s and id %s \n", rsaKey.name(), rsaKey.id()); EcKeyCreateOptions ecKeyCreateOptions = new EcKeyCreateOptions("keyName") .curve(KeyCurveName.P_384) .notBefore(OffsetDateTime.now().plusDays(1)) .expires(OffsetDateTime.now().plusYears(1)); Key ecKey = keyClient.createEcKeyWithResponse(ecKeyCreateOptions, new Context(key1, value1)).value(); System.out.printf("Key is created with name %s and id %s \n", ecKey.name(), ecKey.id()); } /** * Generates a code sample for using {@link KeyClient */ public void getKeyWithResponseSnippets() { KeyClient keyClient = createClient(); String keyVersion = "6A385B124DEF4096AF1361A85B16C204"; Key keyWithVersion = keyClient.getKeyWithResponse("keyName", keyVersion, new Context(key1, value1)).value(); System.out.printf("Key is returned with name %s and id %s \n", keyWithVersion.name(), keyWithVersion.id()); for (KeyBase key : keyClient.listKeys()) { Key keyResponse = keyClient.getKeyWithResponse(key, new Context(key1, value1)).value(); System.out.printf("Received key with name %s and type %s", keyResponse.name(), keyResponse.keyMaterial().kty()); } } /** * Generates a code sample for using {@link KeyClient */ public void getKeySnippets() { KeyClient keyClient = createClient(); String keyVersion = "6A385B124DEF4096AF1361A85B16C204"; Key keyWithVersion = keyClient.getKey("keyName", keyVersion); System.out.printf("Key is returned with name %s and id %s \n", keyWithVersion.name(), keyWithVersion.id()); Key keyWithVersionValue = keyClient.getKey("keyName"); System.out.printf("Key is returned with name %s and id %s \n", keyWithVersionValue.name(), keyWithVersionValue.id()); for (KeyBase key : keyClient.listKeys()) { Key keyResponse = keyClient.getKey(key); System.out.printf("Received key with name %s and type %s", keyResponse.name(), keyResponse.keyMaterial().kty()); } } /** * Generates a code sample for using {@link KeyClient */ public void updateKeyWithResponseSnippets() { KeyClient keyClient = createClient(); Key key = keyClient.getKey("keyName"); key.expires(OffsetDateTime.now().plusDays(60)); KeyBase updatedKeyBase = keyClient.updateKeyWithResponse(key, new Context(key1, value1), KeyOperation.ENCRYPT, KeyOperation.DECRYPT).value(); Key updatedKey = keyClient.getKey(updatedKeyBase.name()); System.out.printf("Key is updated with name %s and id %s \n", updatedKey.name(), updatedKey.id()); } /** * Generates a code sample for using {@link KeyClient */ public void updateKeySnippets() { KeyClient keyClient = createClient(); Key key = keyClient.getKey("keyName"); key.expires(OffsetDateTime.now().plusDays(60)); KeyBase updatedKeyBase = keyClient.updateKey(key, KeyOperation.ENCRYPT, KeyOperation.DECRYPT); Key updatedKey = keyClient.getKey(updatedKeyBase.name()); System.out.printf("Key is updated with name %s and id %s \n", updatedKey.name(), updatedKey.id()); Key updateKey = keyClient.getKey("keyName"); key.expires(OffsetDateTime.now().plusDays(60)); KeyBase updatedKeyBaseValue = keyClient.updateKey(updateKey); Key updatedKeyValue = keyClient.getKey(updatedKeyBaseValue.name()); System.out.printf("Key is updated with name %s and id %s \n", updatedKeyValue.name(), updatedKeyValue.id()); } /** * Generates a code sample for using {@link KeyClient */ public void deleteKeyWithResponseSnippets() { KeyClient keyClient = createClient(); Key key = keyClient.getKey("keyName"); DeletedKey deletedKey = keyClient.deleteKeyWithResponse("keyName", new Context(key1, value1)).value(); System.out.printf("Deleted Key's Recovery Id %s", deletedKey.recoveryId()); } /** * Generates a code sample for using {@link KeyClient */ public void getDeleteKeyWithResponseSnippets() { KeyClient keyClient = createClient(); DeletedKey deletedKey = keyClient.getDeletedKeyWithResponse("keyName", new Context(key1, value1)).value(); System.out.printf("Deleted Key with recovery Id %s \n", deletedKey.recoveryId()); } /** * Generates a code sample for using {@link KeyClient */ public void purgeDeletedKeySnippets() { KeyClient keyClient = createClient(); VoidResponse purgeResponse = keyClient.purgeDeletedKey("deletedKeyName"); System.out.printf("Purge Status Code: %rsaPrivateExponent", purgeResponse.statusCode()); VoidResponse purgedResponse = keyClient.purgeDeletedKey("deletedKeyName", new Context(key2, value2)); System.out.printf("Purge Status Code: %rsaPrivateExponent", purgedResponse.statusCode()); } /** * Generates a code sample for using {@link KeyClient */ public void recoverDeletedKeyWithResponseSnippets() { KeyClient keyClient = createClient(); Key recoveredKey = keyClient.recoverDeletedKeyWithResponse("deletedKeyName", new Context(key2, value2)).value(); System.out.printf("Recovered key with name %s", recoveredKey.name()); } /** * Generates a code sample for using {@link KeyClient */ public void recoverDeletedKeySnippets() { KeyClient keyClient = createClient(); Key recoveredKey = keyClient.recoverDeletedKey("deletedKeyName"); System.out.printf("Recovered key with name %s", recoveredKey.name()); } /** * Generates a code sample for using {@link KeyClient */ public void backupKeySnippets() { KeyClient keyClient = createClient(); byte[] keyBackup = keyClient.backupKey("keyName"); System.out.printf("Key's Backup Byte array's length %s", keyBackup.length); } /** * Generates a code sample for using {@link KeyClient */ public void backupKeyWithResponseSnippets() { KeyClient keyClient = createClient(); byte[] keyBackup = keyClient.backupKeyWithResponse("keyName", new Context(key2, value2)).value(); System.out.printf("Key's Backup Byte array's length %s", keyBackup.length); } /** * Generates a code sample for using {@link KeyClient */ public void restoreKeySnippets() { KeyClient keyClient = createClient(); byte[] keyBackupByteArray = {}; Key keyResponse = keyClient.restoreKey(keyBackupByteArray); System.out.printf("Restored Key with name %s and id %s \n", keyResponse.name(), keyResponse.id()); } /** * Generates a code sample for using {@link KeyClient */ public void restoreKeyWithResponseSnippets() { KeyClient keyClient = createClient(); byte[] keyBackupByteArray = {}; Response<Key> keyResponse = keyClient.restoreKeyWithResponse(keyBackupByteArray, new Context(key1, value1)); System.out.printf("Restored Key with name %s and id %s \n", keyResponse.value().name(), keyResponse.value().id()); } /** * Generates a code sample for using {@link KeyClient */ /** * Generates a code sample for using {@link KeyClient */ public void listDeletedKeysSnippets() { KeyClient keyClient = createClient(); for (DeletedKey deletedKey : keyClient.listDeletedKeys()) { System.out.printf("Deleted key's recovery Id %s", deletedKey.recoveryId()); } for (DeletedKey deletedKey : keyClient.listDeletedKeys(new Context(key2, value2))) { System.out.printf("Deleted key's recovery Id %s", deletedKey.recoveryId()); } keyClient.listDeletedKeys().iterableByPage().forEach(resp -> { System.out.printf("Response headers are %s. Url %s and status code %d %n", resp.headers(), resp.request().url(), resp.statusCode()); resp.items().forEach(value -> { System.out.printf("Deleted key's recovery Id %s %n", value.recoveryId()); }); }); } /** * Generates code sample for using {@link KeyClient */ public void listKeyVersions() { KeyClient keyClient = createClient(); for (KeyBase key : keyClient.listKeyVersions("keyName")) { Key keyWithMaterial = keyClient.getKey(key); System.out.printf("Received key's version with name %s, type %s and version %s", keyWithMaterial.name(), keyWithMaterial.keyMaterial().kty(), keyWithMaterial.version()); } for (KeyBase key : keyClient.listKeyVersions("keyName", new Context(key2, value2))) { Key keyWithMaterial = keyClient.getKey(key); System.out.printf("Received key's version with name %s, type %s and version %s", keyWithMaterial.name(), keyWithMaterial.keyMaterial().kty(), keyWithMaterial.version()); } keyClient.listKeyVersions("keyName").iterableByPage().forEach(resp -> { System.out.printf("Response headers are %s. Url %s and status code %d %n", resp.headers(), resp.request().url(), resp.statusCode()); resp.items().forEach(value -> { System.out.printf("Response value is %d %n", value); }); }); } /** * Implementation not provided for this method * @return {@code null} */ private TokenCredential getKeyVaultCredential() { return null; } }
class KeyClientJavaDocCodeSnippets { private String key1 = "key1"; private String key2 = "key2"; private String value1 = "val1"; private String value2 = "val2"; /** * Generates code sample for creating a {@link KeyClient} * @return An instance of {@link KeyClient} */ public KeyClient createClient() { KeyClient keyClient = new KeyClientBuilder() .endpoint("https: .credential(new DefaultAzureCredentialBuilder().build()) .buildClient(); return keyClient; } /** * Generates a code sample for using {@link KeyClient */ public void createKey() { KeyClient keyClient = createClient(); Key key = keyClient.createKey("keyName", KeyType.EC); System.out.printf("Key is created with name %s and id %s %n", key.name(), key.id()); KeyCreateOptions keyCreateOptions = new KeyCreateOptions("keyName", KeyType.RSA) .notBefore(OffsetDateTime.now().plusDays(1)) .expires(OffsetDateTime.now().plusYears(1)); Key optionsKey = keyClient.createKey(keyCreateOptions); System.out.printf("Key is created with name %s and id %s \n", optionsKey.name(), optionsKey.id()); RsaKeyCreateOptions rsaKeyCreateOptions = new RsaKeyCreateOptions("keyName") .keySize(2048) .notBefore(OffsetDateTime.now().plusDays(1)) .expires(OffsetDateTime.now().plusYears(1)); Key rsaKey = keyClient.createRsaKey(rsaKeyCreateOptions); System.out.printf("Key is created with name %s and id %s \n", rsaKey.name(), rsaKey.id()); EcKeyCreateOptions ecKeyCreateOptions = new EcKeyCreateOptions("keyName") .curve(KeyCurveName.P_384) .notBefore(OffsetDateTime.now().plusDays(1)) .expires(OffsetDateTime.now().plusYears(1)); Key ecKey = keyClient.createEcKey(ecKeyCreateOptions); System.out.printf("Key is created with name %s and id %s \n", ecKey.name(), ecKey.id()); } /** * Generates a code sample for using {@link KeyClient */ public void deleteKeySnippets() { KeyClient keyClient = createClient(); Key key = keyClient.getKey("keyName"); DeletedKey deletedKey = keyClient.deleteKey("keyName"); System.out.printf("Deleted Key's Recovery Id %s", deletedKey.recoveryId()); } /** * Generates a code sample for using {@link KeyClient */ public void getDeletedKeySnippets() { KeyClient keyClient = createClient(); DeletedKey deletedKey = keyClient.getDeletedKey("keyName"); System.out.printf("Deleted Key's Recovery Id %s", deletedKey.recoveryId()); } /** * Generates a code sample for using {@link KeyClient */ public void createKeyWithResponses() { KeyClient keyClient = createClient(); KeyCreateOptions keyCreateOptions = new KeyCreateOptions("keyName", KeyType.RSA) .notBefore(OffsetDateTime.now().plusDays(1)) .expires(OffsetDateTime.now().plusYears(1)); Key optionsKey = keyClient.createKeyWithResponse(keyCreateOptions, new Context(key1, value1)).value(); System.out.printf("Key is created with name %s and id %s \n", optionsKey.name(), optionsKey.id()); RsaKeyCreateOptions rsaKeyCreateOptions = new RsaKeyCreateOptions("keyName") .keySize(2048) .notBefore(OffsetDateTime.now().plusDays(1)) .expires(OffsetDateTime.now().plusYears(1)); Key rsaKey = keyClient.createRsaKeyWithResponse(rsaKeyCreateOptions, new Context(key1, value1)).value(); System.out.printf("Key is created with name %s and id %s \n", rsaKey.name(), rsaKey.id()); EcKeyCreateOptions ecKeyCreateOptions = new EcKeyCreateOptions("keyName") .curve(KeyCurveName.P_384) .notBefore(OffsetDateTime.now().plusDays(1)) .expires(OffsetDateTime.now().plusYears(1)); Key ecKey = keyClient.createEcKeyWithResponse(ecKeyCreateOptions, new Context(key1, value1)).value(); System.out.printf("Key is created with name %s and id %s \n", ecKey.name(), ecKey.id()); } /** * Generates a code sample for using {@link KeyClient */ public void getKeyWithResponseSnippets() { KeyClient keyClient = createClient(); String keyVersion = "6A385B124DEF4096AF1361A85B16C204"; Key keyWithVersion = keyClient.getKeyWithResponse("keyName", keyVersion, new Context(key1, value1)).value(); System.out.printf("Key is returned with name %s and id %s \n", keyWithVersion.name(), keyWithVersion.id()); for (KeyBase key : keyClient.listKeys()) { Key keyResponse = keyClient.getKeyWithResponse(key, new Context(key1, value1)).value(); System.out.printf("Received key with name %s and type %s", keyResponse.name(), keyResponse.keyMaterial().kty()); } } /** * Generates a code sample for using {@link KeyClient */ public void getKeySnippets() { KeyClient keyClient = createClient(); String keyVersion = "6A385B124DEF4096AF1361A85B16C204"; Key keyWithVersion = keyClient.getKey("keyName", keyVersion); System.out.printf("Key is returned with name %s and id %s \n", keyWithVersion.name(), keyWithVersion.id()); Key keyWithVersionValue = keyClient.getKey("keyName"); System.out.printf("Key is returned with name %s and id %s \n", keyWithVersionValue.name(), keyWithVersionValue.id()); for (KeyBase key : keyClient.listKeys()) { Key keyResponse = keyClient.getKey(key); System.out.printf("Received key with name %s and type %s", keyResponse.name(), keyResponse.keyMaterial().kty()); } } /** * Generates a code sample for using {@link KeyClient */ public void updateKeyWithResponseSnippets() { KeyClient keyClient = createClient(); Key key = keyClient.getKey("keyName"); key.expires(OffsetDateTime.now().plusDays(60)); KeyBase updatedKeyBase = keyClient.updateKeyWithResponse(key, new Context(key1, value1), KeyOperation.ENCRYPT, KeyOperation.DECRYPT).value(); Key updatedKey = keyClient.getKey(updatedKeyBase.name()); System.out.printf("Key is updated with name %s and id %s \n", updatedKey.name(), updatedKey.id()); } /** * Generates a code sample for using {@link KeyClient */ public void updateKeySnippets() { KeyClient keyClient = createClient(); Key key = keyClient.getKey("keyName"); key.expires(OffsetDateTime.now().plusDays(60)); KeyBase updatedKeyBase = keyClient.updateKey(key, KeyOperation.ENCRYPT, KeyOperation.DECRYPT); Key updatedKey = keyClient.getKey(updatedKeyBase.name()); System.out.printf("Key is updated with name %s and id %s \n", updatedKey.name(), updatedKey.id()); Key updateKey = keyClient.getKey("keyName"); key.expires(OffsetDateTime.now().plusDays(60)); KeyBase updatedKeyBaseValue = keyClient.updateKey(updateKey); Key updatedKeyValue = keyClient.getKey(updatedKeyBaseValue.name()); System.out.printf("Key is updated with name %s and id %s \n", updatedKeyValue.name(), updatedKeyValue.id()); } /** * Generates a code sample for using {@link KeyClient */ public void deleteKeyWithResponseSnippets() { KeyClient keyClient = createClient(); Key key = keyClient.getKey("keyName"); DeletedKey deletedKey = keyClient.deleteKeyWithResponse("keyName", new Context(key1, value1)).value(); System.out.printf("Deleted Key's Recovery Id %s", deletedKey.recoveryId()); } /** * Generates a code sample for using {@link KeyClient */ public void getDeleteKeyWithResponseSnippets() { KeyClient keyClient = createClient(); DeletedKey deletedKey = keyClient.getDeletedKeyWithResponse("keyName", new Context(key1, value1)).value(); System.out.printf("Deleted Key with recovery Id %s \n", deletedKey.recoveryId()); } /** * Generates a code sample for using {@link KeyClient */ public void purgeDeletedKeySnippets() { KeyClient keyClient = createClient(); VoidResponse purgeResponse = keyClient.purgeDeletedKey("deletedKeyName"); System.out.printf("Purge Status Code: %rsaPrivateExponent", purgeResponse.statusCode()); VoidResponse purgedResponse = keyClient.purgeDeletedKey("deletedKeyName", new Context(key2, value2)); System.out.printf("Purge Status Code: %rsaPrivateExponent", purgedResponse.statusCode()); } /** * Generates a code sample for using {@link KeyClient */ public void recoverDeletedKeyWithResponseSnippets() { KeyClient keyClient = createClient(); Key recoveredKey = keyClient.recoverDeletedKeyWithResponse("deletedKeyName", new Context(key2, value2)).value(); System.out.printf("Recovered key with name %s", recoveredKey.name()); } /** * Generates a code sample for using {@link KeyClient */ public void recoverDeletedKeySnippets() { KeyClient keyClient = createClient(); Key recoveredKey = keyClient.recoverDeletedKey("deletedKeyName"); System.out.printf("Recovered key with name %s", recoveredKey.name()); } /** * Generates a code sample for using {@link KeyClient */ public void backupKeySnippets() { KeyClient keyClient = createClient(); byte[] keyBackup = keyClient.backupKey("keyName"); System.out.printf("Key's Backup Byte array's length %s", keyBackup.length); } /** * Generates a code sample for using {@link KeyClient */ public void backupKeyWithResponseSnippets() { KeyClient keyClient = createClient(); byte[] keyBackup = keyClient.backupKeyWithResponse("keyName", new Context(key2, value2)).value(); System.out.printf("Key's Backup Byte array's length %s", keyBackup.length); } /** * Generates a code sample for using {@link KeyClient */ public void restoreKeySnippets() { KeyClient keyClient = createClient(); byte[] keyBackupByteArray = {}; Key keyResponse = keyClient.restoreKey(keyBackupByteArray); System.out.printf("Restored Key with name %s and id %s \n", keyResponse.name(), keyResponse.id()); } /** * Generates a code sample for using {@link KeyClient */ public void restoreKeyWithResponseSnippets() { KeyClient keyClient = createClient(); byte[] keyBackupByteArray = {}; Response<Key> keyResponse = keyClient.restoreKeyWithResponse(keyBackupByteArray, new Context(key1, value1)); System.out.printf("Restored Key with name %s and id %s \n", keyResponse.value().name(), keyResponse.value().id()); } /** * Generates a code sample for using {@link KeyClient */ /** * Generates a code sample for using {@link KeyClient */ public void listDeletedKeysSnippets() { KeyClient keyClient = createClient(); for (DeletedKey deletedKey : keyClient.listDeletedKeys()) { System.out.printf("Deleted key's recovery Id %s", deletedKey.recoveryId()); } for (DeletedKey deletedKey : keyClient.listDeletedKeys(new Context(key2, value2))) { System.out.printf("Deleted key's recovery Id %s", deletedKey.recoveryId()); } keyClient.listDeletedKeys().iterableByPage().forEach(resp -> { System.out.printf("Got response headers . Url: %s, Status code: %d %n", resp.request().url(), resp.statusCode()); resp.items().forEach(value -> { System.out.printf("Deleted key's recovery Id %s %n", value.recoveryId()); }); }); } /** * Generates code sample for using {@link KeyClient */ public void listKeyVersions() { KeyClient keyClient = createClient(); for (KeyBase key : keyClient.listKeyVersions("keyName")) { Key keyWithMaterial = keyClient.getKey(key); System.out.printf("Received key's version with name %s, type %s and version %s", keyWithMaterial.name(), keyWithMaterial.keyMaterial().kty(), keyWithMaterial.version()); } for (KeyBase key : keyClient.listKeyVersions("keyName", new Context(key2, value2))) { Key keyWithMaterial = keyClient.getKey(key); System.out.printf("Received key's version with name %s, type %s and version %s", keyWithMaterial.name(), keyWithMaterial.keyMaterial().kty(), keyWithMaterial.version()); } keyClient.listKeyVersions("keyName").iterableByPage().forEach(resp -> { System.out.printf("Got response headers . Url: %s, Status code: %d %n", resp.request().url(), resp.statusCode()); resp.items().forEach(value -> { System.out.printf("Key name: %s, Key version: %s %n", value.name(), value.version()); }); }); } /** * Implementation not provided for this method * @return {@code null} */ private TokenCredential getKeyVaultCredential() { return null; } }
Let's just use values stored in `SecretBase`
public void listSecretsCodeSnippets() { SecretClient secretClient = getSecretClient(); for (SecretBase secret : secretClient.listSecrets()) { Secret secretWithValue = secretClient.getSecret(secret); System.out.printf("Received secret with name %s and value %s", secretWithValue.name(), secretWithValue.value()); } for (SecretBase secret : secretClient.listSecrets(new Context(key1, value2))) { Secret secretWithValue = secretClient.getSecret(secret); System.out.printf("Received secret with name %s and value %s", secretWithValue.name(), secretWithValue.value()); } secretClient.listSecrets().iterableByPage().forEach(resp -> { System.out.printf("Response headers are %s. Url %s and status code %d %n", resp.headers(), resp.request().url(), resp.statusCode()); resp.items().forEach(value -> { Secret secretWithValue = secretClient.getSecret(value); System.out.printf("Received secret with name %s and value %s", secretWithValue.name(), secretWithValue.value()); }); }); }
Secret secretWithValue = secretClient.getSecret(value);
public void listSecretsCodeSnippets() { SecretClient secretClient = getSecretClient(); for (SecretBase secret : secretClient.listSecrets()) { Secret secretWithValue = secretClient.getSecret(secret); System.out.printf("Received secret with name %s and value %s", secretWithValue.name(), secretWithValue.value()); } for (SecretBase secret : secretClient.listSecrets(new Context(key1, value2))) { Secret secretWithValue = secretClient.getSecret(secret); System.out.printf("Received secret with name %s and value %s", secretWithValue.name(), secretWithValue.value()); } secretClient.listSecrets().iterableByPage().forEach(resp -> { System.out.printf("Response headers are %s. Url %s and status code %d %n", resp.headers(), resp.request().url(), resp.statusCode()); resp.items().forEach(value -> { Secret secretWithValue = secretClient.getSecret(value); System.out.printf("Received secret with name %s and value %s", secretWithValue.name(), secretWithValue.value()); }); }); }
class SecretClientJavaDocCodeSnippets { private String key1 = "key1"; private String key2 = "key2"; private String value1 = "val1"; private String value2 = "val2"; /** * Method to insert code snippets for {@link SecretClient */ public void getSecretCodeSnippets() { SecretClient secretClient = getSecretClient(); for (SecretBase secret : secretClient.listSecrets()) { Secret secretWithValue = secretClient.getSecret(secret); System.out.printf("Secret is returned with name %s and value %s %n", secretWithValue.name(), secretWithValue.value()); } String secretVersion = "6A385B124DEF4096AF1361A85B16C204"; Secret secretWithVersion = secretClient.getSecret("secretName", secretVersion); System.out.printf("Secret is returned with name %s and value %s \n", secretWithVersion.name(), secretWithVersion.value()); Secret secretWithoutVersion = secretClient.getSecret("secretName", secretVersion); System.out.printf("Secret is returned with name %s and value %s \n", secretWithoutVersion.name(), secretWithoutVersion.value()); } /** * Method to insert code snippets for {@link SecretClient */ public void getSecretWithResponseCodeSnippets() { SecretClient secretClient = getSecretClient(); for (SecretBase secret : secretClient.listSecrets()) { Secret secretWithValue = secretClient.getSecretWithResponse(secret, new Context(key2, value2)).value(); System.out.printf("Secret is returned with name %s and value %s %n", secretWithValue.name(), secretWithValue.value()); } String secretVersion = "6A385B124DEF4096AF1361A85B16C204"; Secret secretWithVersion = secretClient.getSecretWithResponse("secretName", secretVersion, new Context(key2, value2)).value(); System.out.printf("Secret is returned with name %s and value %s \n", secretWithVersion.name(), secretWithVersion.value()); } /** * Method to insert code snippets for {@link SecretClient */ public void setSecretCodeSnippets() { SecretClient secretClient = getSecretClient(); Secret newSecret = new Secret("secretName", "secretValue").expires(OffsetDateTime.now().plusDays(60)); Secret returnedSecret = secretClient.setSecret(newSecret); System.out.printf("Secret is created with name %s and value %s \n", returnedSecret.name(), returnedSecret.value()); Secret secret = secretClient.setSecret("secretName", "secretValue"); System.out.printf("Secret is created with name %s and value %s \n", secret.name(), secret.value()); } /** * Method to insert code snippets for {@link SecretClient */ public void setSecretWithResponseCodeSnippets() { SecretClient secretClient = getSecretClient(); Secret newSecret = new Secret("secretName", "secretValue").expires(OffsetDateTime.now().plusDays(60)); Secret secret = secretClient.setSecretWithResponse(newSecret, new Context(key1, value1)).value(); System.out.printf("Secret is created with name %s and value %s \n", secret.name(), secret.value()); } /** * Method to insert code snippets for {@link SecretClient */ public void updateSecretCodeSnippets() { SecretClient secretClient = getSecretClient(); Secret secret = secretClient.getSecret("secretName"); secret.expires(OffsetDateTime.now().plusDays(60)); SecretBase updatedSecretBase = secretClient.updateSecret(secret); Secret updatedSecret = secretClient.getSecret(updatedSecretBase.name()); System.out.printf("Updated Secret is returned with name %s, value %s and expires %s \n", updatedSecret.name(), updatedSecret.value(), updatedSecret.expires()); } /** * Method to insert code snippets for {@link SecretClient */ public void updateSecretWithResponseCodeSnippets() { SecretClient secretClient = getSecretClient(); Secret secret = secretClient.getSecret("secretName"); secret.expires(OffsetDateTime.now().plusDays(60)); SecretBase updatedSecretBase = secretClient.updateSecretWithResponse(secret, new Context(key2, value2)).value(); Secret updatedSecret = secretClient.getSecret(updatedSecretBase.name()); System.out.printf("Updated Secret is returned with name %s, value %s and expires %s \n", updatedSecret.name(), updatedSecret.value(), updatedSecret.expires()); } /** * Method to insert code snippets for {@link SecretClient */ public void deleteSecretCodeSnippets() { SecretClient secretClient = getSecretClient(); DeletedSecret deletedSecret = secretClient.deleteSecret("secretName"); System.out.printf("Deleted Secret's Recovery Id %s", deletedSecret.recoveryId()); } /** * Method to insert code snippets for {@link SecretClient */ public void deleteSecretWithResponseCodeSnippets() { SecretClient secretClient = getSecretClient(); DeletedSecret deletedSecret = secretClient.deleteSecretWithResponse("secretName", new Context(key2, value2)).value(); System.out.printf("Deleted Secret's Recovery Id %s", deletedSecret.recoveryId()); } /** * Method to insert code snippets for {@link SecretClient */ public void getDeletedSecretCodeSnippets() { SecretClient secretClient = getSecretClient(); DeletedSecret deletedSecret = secretClient.getDeletedSecret("secretName"); System.out.printf("Deleted Secret's Recovery Id %s", deletedSecret.recoveryId()); } /** * Method to insert code snippets for {@link SecretClient */ public void getDeletedSecretWithResponseCodeSnippets() { SecretClient secretClient = getSecretClient(); DeletedSecret deletedSecret = secretClient.getDeletedSecretWithResponse("secretName", new Context(key2, value2)).value(); System.out.printf("Deleted Secret's Recovery Id %s", deletedSecret.recoveryId()); } /** * Method to insert code snippets for {@link SecretClient */ public void purgeDeletedSecretCodeSnippets() { SecretClient secretClient = getSecretClient(); VoidResponse purgeResponse = secretClient.purgeDeletedSecret("secretName"); System.out.printf("Purge Status Code: %d", purgeResponse.statusCode()); VoidResponse purgedResponse = secretClient.purgeDeletedSecret("secretName", new Context(key2, value2)); System.out.printf("Purge Status Code: %d", purgedResponse.statusCode()); } /** * Method to insert code snippets for {@link SecretClient */ public void recoverDeletedSecretCodeSnippets() { SecretClient secretClient = getSecretClient(); Secret recoveredSecret = secretClient.recoverDeletedSecret("secretName"); System.out.printf("Recovered Secret with name %s", recoveredSecret.name()); } /** * Method to insert code snippets for {@link SecretClient */ public void recoverDeletedSecretWithResponseCodeSnippets() { SecretClient secretClient = getSecretClient(); Secret recoveredSecret = secretClient.recoverDeletedSecretWithResponse("secretName", new Context(key1, value1)).value(); System.out.printf("Recovered Secret with name %s", recoveredSecret.name()); } /** * Method to insert code snippets for {@link SecretClient */ public void backupSecretCodeSnippets() { SecretClient secretClient = getSecretClient(); byte[] secretBackup = secretClient.backupSecret("secretName"); System.out.printf("Secret's Backup Byte array's length %s", secretBackup.length); } /** * Method to insert code snippets for {@link SecretClient */ public void backupSecretWithResponseCodeSnippets() { SecretClient secretClient = getSecretClient(); byte[] secretBackup = secretClient.backupSecretWithResponse("secretName", new Context(key1, value1)).value(); System.out.printf("Secret's Backup Byte array's length %s", secretBackup.length); } /** * Method to insert code snippets for {@link SecretClient */ public void restoreSecretCodeSnippets() { SecretClient secretClient = getSecretClient(); byte[] secretBackupByteArray = {}; Secret restoredSecret = secretClient.restoreSecret(secretBackupByteArray); System.out.printf("Restored Secret with name %s and value %s", restoredSecret.name(), restoredSecret.value()); } /** * Method to insert code snippets for {@link SecretClient */ public void restoreSecretWithResponseCodeSnippets() { SecretClient secretClient = getSecretClient(); byte[] secretBackupByteArray = {}; Secret restoredSecret = secretClient.restoreSecretWithResponse(secretBackupByteArray, new Context(key2, value2)).value(); System.out.printf("Restored Secret with name %s and value %s", restoredSecret.name(), restoredSecret.value()); } /** * Method to insert code snippets for {@link SecretClient */ /** * Method to insert code snippets for {@link SecretClient */ public void listDeletedSecretsCodeSnippets() { SecretClient secretClient = getSecretClient(); for (DeletedSecret deletedSecret : secretClient.listDeletedSecrets()) { System.out.printf("Deleted secret's recovery Id %s", deletedSecret.recoveryId()); } for (DeletedSecret deletedSecret : secretClient.listDeletedSecrets(new Context(key1, value2))) { System.out.printf("Deleted secret's recovery Id %s", deletedSecret.recoveryId()); } secretClient.listDeletedSecrets().iterableByPage().forEach(resp -> { System.out.printf("Response headers are %s. Url %s and status code %d %n", resp.headers(), resp.request().url(), resp.statusCode()); resp.items().forEach(value -> { System.out.printf("Deleted secret's recovery Id %s", value.recoveryId()); }); }); } /** * Method to insert code snippets for {@link SecretClient */ public void listSecretVersionsCodeSnippets() { SecretClient secretClient = getSecretClient(); for (SecretBase secret : secretClient.listSecretVersions("secretName")) { Secret secretWithValue = secretClient.getSecret(secret); System.out.printf("Received secret's version with name %s and value %s", secretWithValue.name(), secretWithValue.value()); } for (SecretBase secret : secretClient.listSecretVersions("secretName", new Context(key1, value2))) { Secret secretWithValue = secretClient.getSecret(secret); System.out.printf("Received secret's version with name %s and value %s", secretWithValue.name(), secretWithValue.value()); } secretClient.listSecretVersions("secretName", new Context(key1, value2)).iterableByPage().forEach(resp -> { System.out.printf("Response headers are %s. Url %s and status code %d %n", resp.headers(), resp.request().url(), resp.statusCode()); resp.items().forEach(value -> { Secret secretWithValue = secretClient.getSecret(value); System.out.printf("Received secret's version with name %s and value %s", secretWithValue.name(), secretWithValue.value()); }); }); } /** * Implementation for sync SecretClient * @return sync SecretClient */ private SecretClient getSyncSecretClientCodeSnippets() { SecretClient secretClient = new SecretClientBuilder() .credential(new DefaultAzureCredentialBuilder().build()) .endpoint("https: .httpLogDetailLevel(HttpLogDetailLevel.BODY_AND_HEADERS) .buildClient(); return secretClient; } /** * Implementation not provided for this method * @return {@code null} */ private SecretClient getSecretClient() { return null; } }
class SecretClientJavaDocCodeSnippets { private String key1 = "key1"; private String key2 = "key2"; private String value1 = "val1"; private String value2 = "val2"; /** * Method to insert code snippets for {@link SecretClient */ public void getSecretCodeSnippets() { SecretClient secretClient = getSecretClient(); for (SecretBase secret : secretClient.listSecrets()) { Secret secretWithValue = secretClient.getSecret(secret); System.out.printf("Secret is returned with name %s and value %s %n", secretWithValue.name(), secretWithValue.value()); } String secretVersion = "6A385B124DEF4096AF1361A85B16C204"; Secret secretWithVersion = secretClient.getSecret("secretName", secretVersion); System.out.printf("Secret is returned with name %s and value %s \n", secretWithVersion.name(), secretWithVersion.value()); Secret secretWithoutVersion = secretClient.getSecret("secretName", secretVersion); System.out.printf("Secret is returned with name %s and value %s \n", secretWithoutVersion.name(), secretWithoutVersion.value()); } /** * Method to insert code snippets for {@link SecretClient */ public void getSecretWithResponseCodeSnippets() { SecretClient secretClient = getSecretClient(); for (SecretBase secret : secretClient.listSecrets()) { Secret secretWithValue = secretClient.getSecretWithResponse(secret, new Context(key2, value2)).value(); System.out.printf("Secret is returned with name %s and value %s %n", secretWithValue.name(), secretWithValue.value()); } String secretVersion = "6A385B124DEF4096AF1361A85B16C204"; Secret secretWithVersion = secretClient.getSecretWithResponse("secretName", secretVersion, new Context(key2, value2)).value(); System.out.printf("Secret is returned with name %s and value %s \n", secretWithVersion.name(), secretWithVersion.value()); } /** * Method to insert code snippets for {@link SecretClient */ public void setSecretCodeSnippets() { SecretClient secretClient = getSecretClient(); Secret newSecret = new Secret("secretName", "secretValue").expires(OffsetDateTime.now().plusDays(60)); Secret returnedSecret = secretClient.setSecret(newSecret); System.out.printf("Secret is created with name %s and value %s \n", returnedSecret.name(), returnedSecret.value()); Secret secret = secretClient.setSecret("secretName", "secretValue"); System.out.printf("Secret is created with name %s and value %s \n", secret.name(), secret.value()); } /** * Method to insert code snippets for {@link SecretClient */ public void setSecretWithResponseCodeSnippets() { SecretClient secretClient = getSecretClient(); Secret newSecret = new Secret("secretName", "secretValue").expires(OffsetDateTime.now().plusDays(60)); Secret secret = secretClient.setSecretWithResponse(newSecret, new Context(key1, value1)).value(); System.out.printf("Secret is created with name %s and value %s \n", secret.name(), secret.value()); } /** * Method to insert code snippets for {@link SecretClient */ public void updateSecretCodeSnippets() { SecretClient secretClient = getSecretClient(); Secret secret = secretClient.getSecret("secretName"); secret.expires(OffsetDateTime.now().plusDays(60)); SecretBase updatedSecretBase = secretClient.updateSecret(secret); Secret updatedSecret = secretClient.getSecret(updatedSecretBase.name()); System.out.printf("Updated Secret is returned with name %s, value %s and expires %s \n", updatedSecret.name(), updatedSecret.value(), updatedSecret.expires()); } /** * Method to insert code snippets for {@link SecretClient */ public void updateSecretWithResponseCodeSnippets() { SecretClient secretClient = getSecretClient(); Secret secret = secretClient.getSecret("secretName"); secret.expires(OffsetDateTime.now().plusDays(60)); SecretBase updatedSecretBase = secretClient.updateSecretWithResponse(secret, new Context(key2, value2)).value(); Secret updatedSecret = secretClient.getSecret(updatedSecretBase.name()); System.out.printf("Updated Secret is returned with name %s, value %s and expires %s \n", updatedSecret.name(), updatedSecret.value(), updatedSecret.expires()); } /** * Method to insert code snippets for {@link SecretClient */ public void deleteSecretCodeSnippets() { SecretClient secretClient = getSecretClient(); DeletedSecret deletedSecret = secretClient.deleteSecret("secretName"); System.out.printf("Deleted Secret's Recovery Id %s", deletedSecret.recoveryId()); } /** * Method to insert code snippets for {@link SecretClient */ public void deleteSecretWithResponseCodeSnippets() { SecretClient secretClient = getSecretClient(); DeletedSecret deletedSecret = secretClient.deleteSecretWithResponse("secretName", new Context(key2, value2)).value(); System.out.printf("Deleted Secret's Recovery Id %s", deletedSecret.recoveryId()); } /** * Method to insert code snippets for {@link SecretClient */ public void getDeletedSecretCodeSnippets() { SecretClient secretClient = getSecretClient(); DeletedSecret deletedSecret = secretClient.getDeletedSecret("secretName"); System.out.printf("Deleted Secret's Recovery Id %s", deletedSecret.recoveryId()); } /** * Method to insert code snippets for {@link SecretClient */ public void getDeletedSecretWithResponseCodeSnippets() { SecretClient secretClient = getSecretClient(); DeletedSecret deletedSecret = secretClient.getDeletedSecretWithResponse("secretName", new Context(key2, value2)).value(); System.out.printf("Deleted Secret's Recovery Id %s", deletedSecret.recoveryId()); } /** * Method to insert code snippets for {@link SecretClient */ public void purgeDeletedSecretCodeSnippets() { SecretClient secretClient = getSecretClient(); VoidResponse purgeResponse = secretClient.purgeDeletedSecret("secretName"); System.out.printf("Purge Status Code: %d", purgeResponse.statusCode()); VoidResponse purgedResponse = secretClient.purgeDeletedSecret("secretName", new Context(key2, value2)); System.out.printf("Purge Status Code: %d", purgedResponse.statusCode()); } /** * Method to insert code snippets for {@link SecretClient */ public void recoverDeletedSecretCodeSnippets() { SecretClient secretClient = getSecretClient(); Secret recoveredSecret = secretClient.recoverDeletedSecret("secretName"); System.out.printf("Recovered Secret with name %s", recoveredSecret.name()); } /** * Method to insert code snippets for {@link SecretClient */ public void recoverDeletedSecretWithResponseCodeSnippets() { SecretClient secretClient = getSecretClient(); Secret recoveredSecret = secretClient.recoverDeletedSecretWithResponse("secretName", new Context(key1, value1)).value(); System.out.printf("Recovered Secret with name %s", recoveredSecret.name()); } /** * Method to insert code snippets for {@link SecretClient */ public void backupSecretCodeSnippets() { SecretClient secretClient = getSecretClient(); byte[] secretBackup = secretClient.backupSecret("secretName"); System.out.printf("Secret's Backup Byte array's length %s", secretBackup.length); } /** * Method to insert code snippets for {@link SecretClient */ public void backupSecretWithResponseCodeSnippets() { SecretClient secretClient = getSecretClient(); byte[] secretBackup = secretClient.backupSecretWithResponse("secretName", new Context(key1, value1)).value(); System.out.printf("Secret's Backup Byte array's length %s", secretBackup.length); } /** * Method to insert code snippets for {@link SecretClient */ public void restoreSecretCodeSnippets() { SecretClient secretClient = getSecretClient(); byte[] secretBackupByteArray = {}; Secret restoredSecret = secretClient.restoreSecret(secretBackupByteArray); System.out.printf("Restored Secret with name %s and value %s", restoredSecret.name(), restoredSecret.value()); } /** * Method to insert code snippets for {@link SecretClient */ public void restoreSecretWithResponseCodeSnippets() { SecretClient secretClient = getSecretClient(); byte[] secretBackupByteArray = {}; Secret restoredSecret = secretClient.restoreSecretWithResponse(secretBackupByteArray, new Context(key2, value2)).value(); System.out.printf("Restored Secret with name %s and value %s", restoredSecret.name(), restoredSecret.value()); } /** * Method to insert code snippets for {@link SecretClient */ /** * Method to insert code snippets for {@link SecretClient */ public void listDeletedSecretsCodeSnippets() { SecretClient secretClient = getSecretClient(); for (DeletedSecret deletedSecret : secretClient.listDeletedSecrets()) { System.out.printf("Deleted secret's recovery Id %s", deletedSecret.recoveryId()); } for (DeletedSecret deletedSecret : secretClient.listDeletedSecrets(new Context(key1, value2))) { System.out.printf("Deleted secret's recovery Id %s", deletedSecret.recoveryId()); } secretClient.listDeletedSecrets().iterableByPage().forEach(resp -> { System.out.printf("Got response headers . Url: %s, Status code: %d %n", resp.request().url(), resp.statusCode()); resp.items().forEach(value -> { System.out.printf("Deleted secret's recovery Id %s", value.recoveryId()); }); }); } /** * Method to insert code snippets for {@link SecretClient */ public void listSecretVersionsCodeSnippets() { SecretClient secretClient = getSecretClient(); for (SecretBase secret : secretClient.listSecretVersions("secretName")) { Secret secretWithValue = secretClient.getSecret(secret); System.out.printf("Received secret's version with name %s and value %s", secretWithValue.name(), secretWithValue.value()); } for (SecretBase secret : secretClient.listSecretVersions("secretName", new Context(key1, value2))) { Secret secretWithValue = secretClient.getSecret(secret); System.out.printf("Received secret's version with name %s and value %s", secretWithValue.name(), secretWithValue.value()); } secretClient.listSecretVersions("secretName", new Context(key1, value2)).iterableByPage().forEach(resp -> { System.out.printf("Got response headers . Url: %s, Status code: %d %n", resp.request().url(), resp.statusCode()); resp.items().forEach(value -> { Secret secretWithValue = secretClient.getSecret(value); System.out.printf("Received secret's version with name %s and value %s", secretWithValue.name(), secretWithValue.value()); }); }); } /** * Implementation for sync SecretClient * @return sync SecretClient */ private SecretClient getSyncSecretClientCodeSnippets() { SecretClient secretClient = new SecretClientBuilder() .credential(new DefaultAzureCredentialBuilder().build()) .endpoint("https: .httpLogDetailLevel(HttpLogDetailLevel.BODY_AND_HEADERS) .buildClient(); return secretClient; } /** * Implementation not provided for this method * @return {@code null} */ private SecretClient getSecretClient() { return null; } }
+1
public void listKeyVersions() { KeyClient keyClient = createClient(); for (KeyBase key : keyClient.listKeyVersions("keyName")) { Key keyWithMaterial = keyClient.getKey(key); System.out.printf("Received key's version with name %s, type %s and version %s", keyWithMaterial.name(), keyWithMaterial.keyMaterial().kty(), keyWithMaterial.version()); } for (KeyBase key : keyClient.listKeyVersions("keyName", new Context(key2, value2))) { Key keyWithMaterial = keyClient.getKey(key); System.out.printf("Received key's version with name %s, type %s and version %s", keyWithMaterial.name(), keyWithMaterial.keyMaterial().kty(), keyWithMaterial.version()); } keyClient.listKeyVersions("keyName").iterableByPage().forEach(resp -> { System.out.printf("Got response headers . Url: %s, Status code: %d %n", resp.request().url(), resp.statusCode()); resp.items().forEach(value -> { System.out.printf("Response value is %d %n", value); }); }); }
System.out.printf("Response value is %d %n", value);
public void listKeyVersions() { KeyClient keyClient = createClient(); for (KeyBase key : keyClient.listKeyVersions("keyName")) { Key keyWithMaterial = keyClient.getKey(key); System.out.printf("Received key's version with name %s, type %s and version %s", keyWithMaterial.name(), keyWithMaterial.keyMaterial().kty(), keyWithMaterial.version()); } for (KeyBase key : keyClient.listKeyVersions("keyName", new Context(key2, value2))) { Key keyWithMaterial = keyClient.getKey(key); System.out.printf("Received key's version with name %s, type %s and version %s", keyWithMaterial.name(), keyWithMaterial.keyMaterial().kty(), keyWithMaterial.version()); } keyClient.listKeyVersions("keyName").iterableByPage().forEach(resp -> { System.out.printf("Got response headers . Url: %s, Status code: %d %n", resp.request().url(), resp.statusCode()); resp.items().forEach(value -> { System.out.printf("Key name: %s, Key version: %s %n", value.name(), value.version()); }); }); }
class KeyClientJavaDocCodeSnippets { private String key1 = "key1"; private String key2 = "key2"; private String value1 = "val1"; private String value2 = "val2"; /** * Generates code sample for creating a {@link KeyClient} * @return An instance of {@link KeyClient} */ public KeyClient createClient() { KeyClient keyClient = new KeyClientBuilder() .endpoint("https: .credential(new DefaultAzureCredentialBuilder().build()) .buildClient(); return keyClient; } /** * Generates a code sample for using {@link KeyClient */ public void createKey() { KeyClient keyClient = createClient(); Key key = keyClient.createKey("keyName", KeyType.EC); System.out.printf("Key is created with name %s and id %s %n", key.name(), key.id()); KeyCreateOptions keyCreateOptions = new KeyCreateOptions("keyName", KeyType.RSA) .notBefore(OffsetDateTime.now().plusDays(1)) .expires(OffsetDateTime.now().plusYears(1)); Key optionsKey = keyClient.createKey(keyCreateOptions); System.out.printf("Key is created with name %s and id %s \n", optionsKey.name(), optionsKey.id()); RsaKeyCreateOptions rsaKeyCreateOptions = new RsaKeyCreateOptions("keyName") .keySize(2048) .notBefore(OffsetDateTime.now().plusDays(1)) .expires(OffsetDateTime.now().plusYears(1)); Key rsaKey = keyClient.createRsaKey(rsaKeyCreateOptions); System.out.printf("Key is created with name %s and id %s \n", rsaKey.name(), rsaKey.id()); EcKeyCreateOptions ecKeyCreateOptions = new EcKeyCreateOptions("keyName") .curve(KeyCurveName.P_384) .notBefore(OffsetDateTime.now().plusDays(1)) .expires(OffsetDateTime.now().plusYears(1)); Key ecKey = keyClient.createEcKey(ecKeyCreateOptions); System.out.printf("Key is created with name %s and id %s \n", ecKey.name(), ecKey.id()); } /** * Generates a code sample for using {@link KeyClient */ public void deleteKeySnippets() { KeyClient keyClient = createClient(); Key key = keyClient.getKey("keyName"); DeletedKey deletedKey = keyClient.deleteKey("keyName"); System.out.printf("Deleted Key's Recovery Id %s", deletedKey.recoveryId()); } /** * Generates a code sample for using {@link KeyClient */ public void getDeletedKeySnippets() { KeyClient keyClient = createClient(); DeletedKey deletedKey = keyClient.getDeletedKey("keyName"); System.out.printf("Deleted Key's Recovery Id %s", deletedKey.recoveryId()); } /** * Generates a code sample for using {@link KeyClient */ public void createKeyWithResponses() { KeyClient keyClient = createClient(); KeyCreateOptions keyCreateOptions = new KeyCreateOptions("keyName", KeyType.RSA) .notBefore(OffsetDateTime.now().plusDays(1)) .expires(OffsetDateTime.now().plusYears(1)); Key optionsKey = keyClient.createKeyWithResponse(keyCreateOptions, new Context(key1, value1)).value(); System.out.printf("Key is created with name %s and id %s \n", optionsKey.name(), optionsKey.id()); RsaKeyCreateOptions rsaKeyCreateOptions = new RsaKeyCreateOptions("keyName") .keySize(2048) .notBefore(OffsetDateTime.now().plusDays(1)) .expires(OffsetDateTime.now().plusYears(1)); Key rsaKey = keyClient.createRsaKeyWithResponse(rsaKeyCreateOptions, new Context(key1, value1)).value(); System.out.printf("Key is created with name %s and id %s \n", rsaKey.name(), rsaKey.id()); EcKeyCreateOptions ecKeyCreateOptions = new EcKeyCreateOptions("keyName") .curve(KeyCurveName.P_384) .notBefore(OffsetDateTime.now().plusDays(1)) .expires(OffsetDateTime.now().plusYears(1)); Key ecKey = keyClient.createEcKeyWithResponse(ecKeyCreateOptions, new Context(key1, value1)).value(); System.out.printf("Key is created with name %s and id %s \n", ecKey.name(), ecKey.id()); } /** * Generates a code sample for using {@link KeyClient */ public void getKeyWithResponseSnippets() { KeyClient keyClient = createClient(); String keyVersion = "6A385B124DEF4096AF1361A85B16C204"; Key keyWithVersion = keyClient.getKeyWithResponse("keyName", keyVersion, new Context(key1, value1)).value(); System.out.printf("Key is returned with name %s and id %s \n", keyWithVersion.name(), keyWithVersion.id()); for (KeyBase key : keyClient.listKeys()) { Key keyResponse = keyClient.getKeyWithResponse(key, new Context(key1, value1)).value(); System.out.printf("Received key with name %s and type %s", keyResponse.name(), keyResponse.keyMaterial().kty()); } } /** * Generates a code sample for using {@link KeyClient */ public void getKeySnippets() { KeyClient keyClient = createClient(); String keyVersion = "6A385B124DEF4096AF1361A85B16C204"; Key keyWithVersion = keyClient.getKey("keyName", keyVersion); System.out.printf("Key is returned with name %s and id %s \n", keyWithVersion.name(), keyWithVersion.id()); Key keyWithVersionValue = keyClient.getKey("keyName"); System.out.printf("Key is returned with name %s and id %s \n", keyWithVersionValue.name(), keyWithVersionValue.id()); for (KeyBase key : keyClient.listKeys()) { Key keyResponse = keyClient.getKey(key); System.out.printf("Received key with name %s and type %s", keyResponse.name(), keyResponse.keyMaterial().kty()); } } /** * Generates a code sample for using {@link KeyClient */ public void updateKeyWithResponseSnippets() { KeyClient keyClient = createClient(); Key key = keyClient.getKey("keyName"); key.expires(OffsetDateTime.now().plusDays(60)); KeyBase updatedKeyBase = keyClient.updateKeyWithResponse(key, new Context(key1, value1), KeyOperation.ENCRYPT, KeyOperation.DECRYPT).value(); Key updatedKey = keyClient.getKey(updatedKeyBase.name()); System.out.printf("Key is updated with name %s and id %s \n", updatedKey.name(), updatedKey.id()); } /** * Generates a code sample for using {@link KeyClient */ public void updateKeySnippets() { KeyClient keyClient = createClient(); Key key = keyClient.getKey("keyName"); key.expires(OffsetDateTime.now().plusDays(60)); KeyBase updatedKeyBase = keyClient.updateKey(key, KeyOperation.ENCRYPT, KeyOperation.DECRYPT); Key updatedKey = keyClient.getKey(updatedKeyBase.name()); System.out.printf("Key is updated with name %s and id %s \n", updatedKey.name(), updatedKey.id()); Key updateKey = keyClient.getKey("keyName"); key.expires(OffsetDateTime.now().plusDays(60)); KeyBase updatedKeyBaseValue = keyClient.updateKey(updateKey); Key updatedKeyValue = keyClient.getKey(updatedKeyBaseValue.name()); System.out.printf("Key is updated with name %s and id %s \n", updatedKeyValue.name(), updatedKeyValue.id()); } /** * Generates a code sample for using {@link KeyClient */ public void deleteKeyWithResponseSnippets() { KeyClient keyClient = createClient(); Key key = keyClient.getKey("keyName"); DeletedKey deletedKey = keyClient.deleteKeyWithResponse("keyName", new Context(key1, value1)).value(); System.out.printf("Deleted Key's Recovery Id %s", deletedKey.recoveryId()); } /** * Generates a code sample for using {@link KeyClient */ public void getDeleteKeyWithResponseSnippets() { KeyClient keyClient = createClient(); DeletedKey deletedKey = keyClient.getDeletedKeyWithResponse("keyName", new Context(key1, value1)).value(); System.out.printf("Deleted Key with recovery Id %s \n", deletedKey.recoveryId()); } /** * Generates a code sample for using {@link KeyClient */ public void purgeDeletedKeySnippets() { KeyClient keyClient = createClient(); VoidResponse purgeResponse = keyClient.purgeDeletedKey("deletedKeyName"); System.out.printf("Purge Status Code: %rsaPrivateExponent", purgeResponse.statusCode()); VoidResponse purgedResponse = keyClient.purgeDeletedKey("deletedKeyName", new Context(key2, value2)); System.out.printf("Purge Status Code: %rsaPrivateExponent", purgedResponse.statusCode()); } /** * Generates a code sample for using {@link KeyClient */ public void recoverDeletedKeyWithResponseSnippets() { KeyClient keyClient = createClient(); Key recoveredKey = keyClient.recoverDeletedKeyWithResponse("deletedKeyName", new Context(key2, value2)).value(); System.out.printf("Recovered key with name %s", recoveredKey.name()); } /** * Generates a code sample for using {@link KeyClient */ public void recoverDeletedKeySnippets() { KeyClient keyClient = createClient(); Key recoveredKey = keyClient.recoverDeletedKey("deletedKeyName"); System.out.printf("Recovered key with name %s", recoveredKey.name()); } /** * Generates a code sample for using {@link KeyClient */ public void backupKeySnippets() { KeyClient keyClient = createClient(); byte[] keyBackup = keyClient.backupKey("keyName"); System.out.printf("Key's Backup Byte array's length %s", keyBackup.length); } /** * Generates a code sample for using {@link KeyClient */ public void backupKeyWithResponseSnippets() { KeyClient keyClient = createClient(); byte[] keyBackup = keyClient.backupKeyWithResponse("keyName", new Context(key2, value2)).value(); System.out.printf("Key's Backup Byte array's length %s", keyBackup.length); } /** * Generates a code sample for using {@link KeyClient */ public void restoreKeySnippets() { KeyClient keyClient = createClient(); byte[] keyBackupByteArray = {}; Key keyResponse = keyClient.restoreKey(keyBackupByteArray); System.out.printf("Restored Key with name %s and id %s \n", keyResponse.name(), keyResponse.id()); } /** * Generates a code sample for using {@link KeyClient */ public void restoreKeyWithResponseSnippets() { KeyClient keyClient = createClient(); byte[] keyBackupByteArray = {}; Response<Key> keyResponse = keyClient.restoreKeyWithResponse(keyBackupByteArray, new Context(key1, value1)); System.out.printf("Restored Key with name %s and id %s \n", keyResponse.value().name(), keyResponse.value().id()); } /** * Generates a code sample for using {@link KeyClient */ public void listKeySnippets() { KeyClient keyClient = createClient(); for (KeyBase key : keyClient.listKeys()) { Key keyWithMaterial = keyClient.getKey(key); System.out.printf("Received key with name %s and type %s", keyWithMaterial.name(), keyWithMaterial.keyMaterial().kty()); } for (KeyBase key : keyClient.listKeys(new Context(key2, value2))) { Key keyWithMaterial = keyClient.getKey(key); System.out.printf("Received key with name %s and type %s", keyWithMaterial.name(), keyWithMaterial.keyMaterial().kty()); } keyClient.listKeys().iterableByPage().forEach(resp -> { System.out.printf("Got response headers . Url: %s, Status code: %d %n", resp.request().url(), resp.statusCode()); resp.items().forEach(value -> { Key keyWithMaterial = keyClient.getKey(value); System.out.printf("Received key with name %s and type %s %n", keyWithMaterial.name(), keyWithMaterial.keyMaterial().kty()); }); }); } /** * Generates a code sample for using {@link KeyClient */ public void listDeletedKeysSnippets() { KeyClient keyClient = createClient(); for (DeletedKey deletedKey : keyClient.listDeletedKeys()) { System.out.printf("Deleted key's recovery Id %s", deletedKey.recoveryId()); } for (DeletedKey deletedKey : keyClient.listDeletedKeys(new Context(key2, value2))) { System.out.printf("Deleted key's recovery Id %s", deletedKey.recoveryId()); } keyClient.listDeletedKeys().iterableByPage().forEach(resp -> { System.out.printf("Got response headers . Url: %s, Status code: %d %n", resp.request().url(), resp.statusCode()); resp.items().forEach(value -> { System.out.printf("Deleted key's recovery Id %s %n", value.recoveryId()); }); }); } /** * Generates code sample for using {@link KeyClient */ /** * Implementation not provided for this method * @return {@code null} */ private TokenCredential getKeyVaultCredential() { return null; } }
class KeyClientJavaDocCodeSnippets { private String key1 = "key1"; private String key2 = "key2"; private String value1 = "val1"; private String value2 = "val2"; /** * Generates code sample for creating a {@link KeyClient} * @return An instance of {@link KeyClient} */ public KeyClient createClient() { KeyClient keyClient = new KeyClientBuilder() .endpoint("https: .credential(new DefaultAzureCredentialBuilder().build()) .buildClient(); return keyClient; } /** * Generates a code sample for using {@link KeyClient */ public void createKey() { KeyClient keyClient = createClient(); Key key = keyClient.createKey("keyName", KeyType.EC); System.out.printf("Key is created with name %s and id %s %n", key.name(), key.id()); KeyCreateOptions keyCreateOptions = new KeyCreateOptions("keyName", KeyType.RSA) .notBefore(OffsetDateTime.now().plusDays(1)) .expires(OffsetDateTime.now().plusYears(1)); Key optionsKey = keyClient.createKey(keyCreateOptions); System.out.printf("Key is created with name %s and id %s \n", optionsKey.name(), optionsKey.id()); RsaKeyCreateOptions rsaKeyCreateOptions = new RsaKeyCreateOptions("keyName") .keySize(2048) .notBefore(OffsetDateTime.now().plusDays(1)) .expires(OffsetDateTime.now().plusYears(1)); Key rsaKey = keyClient.createRsaKey(rsaKeyCreateOptions); System.out.printf("Key is created with name %s and id %s \n", rsaKey.name(), rsaKey.id()); EcKeyCreateOptions ecKeyCreateOptions = new EcKeyCreateOptions("keyName") .curve(KeyCurveName.P_384) .notBefore(OffsetDateTime.now().plusDays(1)) .expires(OffsetDateTime.now().plusYears(1)); Key ecKey = keyClient.createEcKey(ecKeyCreateOptions); System.out.printf("Key is created with name %s and id %s \n", ecKey.name(), ecKey.id()); } /** * Generates a code sample for using {@link KeyClient */ public void deleteKeySnippets() { KeyClient keyClient = createClient(); Key key = keyClient.getKey("keyName"); DeletedKey deletedKey = keyClient.deleteKey("keyName"); System.out.printf("Deleted Key's Recovery Id %s", deletedKey.recoveryId()); } /** * Generates a code sample for using {@link KeyClient */ public void getDeletedKeySnippets() { KeyClient keyClient = createClient(); DeletedKey deletedKey = keyClient.getDeletedKey("keyName"); System.out.printf("Deleted Key's Recovery Id %s", deletedKey.recoveryId()); } /** * Generates a code sample for using {@link KeyClient */ public void createKeyWithResponses() { KeyClient keyClient = createClient(); KeyCreateOptions keyCreateOptions = new KeyCreateOptions("keyName", KeyType.RSA) .notBefore(OffsetDateTime.now().plusDays(1)) .expires(OffsetDateTime.now().plusYears(1)); Key optionsKey = keyClient.createKeyWithResponse(keyCreateOptions, new Context(key1, value1)).value(); System.out.printf("Key is created with name %s and id %s \n", optionsKey.name(), optionsKey.id()); RsaKeyCreateOptions rsaKeyCreateOptions = new RsaKeyCreateOptions("keyName") .keySize(2048) .notBefore(OffsetDateTime.now().plusDays(1)) .expires(OffsetDateTime.now().plusYears(1)); Key rsaKey = keyClient.createRsaKeyWithResponse(rsaKeyCreateOptions, new Context(key1, value1)).value(); System.out.printf("Key is created with name %s and id %s \n", rsaKey.name(), rsaKey.id()); EcKeyCreateOptions ecKeyCreateOptions = new EcKeyCreateOptions("keyName") .curve(KeyCurveName.P_384) .notBefore(OffsetDateTime.now().plusDays(1)) .expires(OffsetDateTime.now().plusYears(1)); Key ecKey = keyClient.createEcKeyWithResponse(ecKeyCreateOptions, new Context(key1, value1)).value(); System.out.printf("Key is created with name %s and id %s \n", ecKey.name(), ecKey.id()); } /** * Generates a code sample for using {@link KeyClient */ public void getKeyWithResponseSnippets() { KeyClient keyClient = createClient(); String keyVersion = "6A385B124DEF4096AF1361A85B16C204"; Key keyWithVersion = keyClient.getKeyWithResponse("keyName", keyVersion, new Context(key1, value1)).value(); System.out.printf("Key is returned with name %s and id %s \n", keyWithVersion.name(), keyWithVersion.id()); for (KeyBase key : keyClient.listKeys()) { Key keyResponse = keyClient.getKeyWithResponse(key, new Context(key1, value1)).value(); System.out.printf("Received key with name %s and type %s", keyResponse.name(), keyResponse.keyMaterial().kty()); } } /** * Generates a code sample for using {@link KeyClient */ public void getKeySnippets() { KeyClient keyClient = createClient(); String keyVersion = "6A385B124DEF4096AF1361A85B16C204"; Key keyWithVersion = keyClient.getKey("keyName", keyVersion); System.out.printf("Key is returned with name %s and id %s \n", keyWithVersion.name(), keyWithVersion.id()); Key keyWithVersionValue = keyClient.getKey("keyName"); System.out.printf("Key is returned with name %s and id %s \n", keyWithVersionValue.name(), keyWithVersionValue.id()); for (KeyBase key : keyClient.listKeys()) { Key keyResponse = keyClient.getKey(key); System.out.printf("Received key with name %s and type %s", keyResponse.name(), keyResponse.keyMaterial().kty()); } } /** * Generates a code sample for using {@link KeyClient */ public void updateKeyWithResponseSnippets() { KeyClient keyClient = createClient(); Key key = keyClient.getKey("keyName"); key.expires(OffsetDateTime.now().plusDays(60)); KeyBase updatedKeyBase = keyClient.updateKeyWithResponse(key, new Context(key1, value1), KeyOperation.ENCRYPT, KeyOperation.DECRYPT).value(); Key updatedKey = keyClient.getKey(updatedKeyBase.name()); System.out.printf("Key is updated with name %s and id %s \n", updatedKey.name(), updatedKey.id()); } /** * Generates a code sample for using {@link KeyClient */ public void updateKeySnippets() { KeyClient keyClient = createClient(); Key key = keyClient.getKey("keyName"); key.expires(OffsetDateTime.now().plusDays(60)); KeyBase updatedKeyBase = keyClient.updateKey(key, KeyOperation.ENCRYPT, KeyOperation.DECRYPT); Key updatedKey = keyClient.getKey(updatedKeyBase.name()); System.out.printf("Key is updated with name %s and id %s \n", updatedKey.name(), updatedKey.id()); Key updateKey = keyClient.getKey("keyName"); key.expires(OffsetDateTime.now().plusDays(60)); KeyBase updatedKeyBaseValue = keyClient.updateKey(updateKey); Key updatedKeyValue = keyClient.getKey(updatedKeyBaseValue.name()); System.out.printf("Key is updated with name %s and id %s \n", updatedKeyValue.name(), updatedKeyValue.id()); } /** * Generates a code sample for using {@link KeyClient */ public void deleteKeyWithResponseSnippets() { KeyClient keyClient = createClient(); Key key = keyClient.getKey("keyName"); DeletedKey deletedKey = keyClient.deleteKeyWithResponse("keyName", new Context(key1, value1)).value(); System.out.printf("Deleted Key's Recovery Id %s", deletedKey.recoveryId()); } /** * Generates a code sample for using {@link KeyClient */ public void getDeleteKeyWithResponseSnippets() { KeyClient keyClient = createClient(); DeletedKey deletedKey = keyClient.getDeletedKeyWithResponse("keyName", new Context(key1, value1)).value(); System.out.printf("Deleted Key with recovery Id %s \n", deletedKey.recoveryId()); } /** * Generates a code sample for using {@link KeyClient */ public void purgeDeletedKeySnippets() { KeyClient keyClient = createClient(); VoidResponse purgeResponse = keyClient.purgeDeletedKey("deletedKeyName"); System.out.printf("Purge Status Code: %rsaPrivateExponent", purgeResponse.statusCode()); VoidResponse purgedResponse = keyClient.purgeDeletedKey("deletedKeyName", new Context(key2, value2)); System.out.printf("Purge Status Code: %rsaPrivateExponent", purgedResponse.statusCode()); } /** * Generates a code sample for using {@link KeyClient */ public void recoverDeletedKeyWithResponseSnippets() { KeyClient keyClient = createClient(); Key recoveredKey = keyClient.recoverDeletedKeyWithResponse("deletedKeyName", new Context(key2, value2)).value(); System.out.printf("Recovered key with name %s", recoveredKey.name()); } /** * Generates a code sample for using {@link KeyClient */ public void recoverDeletedKeySnippets() { KeyClient keyClient = createClient(); Key recoveredKey = keyClient.recoverDeletedKey("deletedKeyName"); System.out.printf("Recovered key with name %s", recoveredKey.name()); } /** * Generates a code sample for using {@link KeyClient */ public void backupKeySnippets() { KeyClient keyClient = createClient(); byte[] keyBackup = keyClient.backupKey("keyName"); System.out.printf("Key's Backup Byte array's length %s", keyBackup.length); } /** * Generates a code sample for using {@link KeyClient */ public void backupKeyWithResponseSnippets() { KeyClient keyClient = createClient(); byte[] keyBackup = keyClient.backupKeyWithResponse("keyName", new Context(key2, value2)).value(); System.out.printf("Key's Backup Byte array's length %s", keyBackup.length); } /** * Generates a code sample for using {@link KeyClient */ public void restoreKeySnippets() { KeyClient keyClient = createClient(); byte[] keyBackupByteArray = {}; Key keyResponse = keyClient.restoreKey(keyBackupByteArray); System.out.printf("Restored Key with name %s and id %s \n", keyResponse.name(), keyResponse.id()); } /** * Generates a code sample for using {@link KeyClient */ public void restoreKeyWithResponseSnippets() { KeyClient keyClient = createClient(); byte[] keyBackupByteArray = {}; Response<Key> keyResponse = keyClient.restoreKeyWithResponse(keyBackupByteArray, new Context(key1, value1)); System.out.printf("Restored Key with name %s and id %s \n", keyResponse.value().name(), keyResponse.value().id()); } /** * Generates a code sample for using {@link KeyClient */ public void listKeySnippets() { KeyClient keyClient = createClient(); for (KeyBase key : keyClient.listKeys()) { Key keyWithMaterial = keyClient.getKey(key); System.out.printf("Received key with name %s and type %s", keyWithMaterial.name(), keyWithMaterial.keyMaterial().kty()); } for (KeyBase key : keyClient.listKeys(new Context(key2, value2))) { Key keyWithMaterial = keyClient.getKey(key); System.out.printf("Received key with name %s and type %s", keyWithMaterial.name(), keyWithMaterial.keyMaterial().kty()); } keyClient.listKeys().iterableByPage().forEach(resp -> { System.out.printf("Got response headers . Url: %s, Status code: %d %n", resp.request().url(), resp.statusCode()); resp.items().forEach(value -> { Key keyWithMaterial = keyClient.getKey(value); System.out.printf("Received key with name %s and type %s %n", keyWithMaterial.name(), keyWithMaterial.keyMaterial().kty()); }); }); } /** * Generates a code sample for using {@link KeyClient */ public void listDeletedKeysSnippets() { KeyClient keyClient = createClient(); for (DeletedKey deletedKey : keyClient.listDeletedKeys()) { System.out.printf("Deleted key's recovery Id %s", deletedKey.recoveryId()); } for (DeletedKey deletedKey : keyClient.listDeletedKeys(new Context(key2, value2))) { System.out.printf("Deleted key's recovery Id %s", deletedKey.recoveryId()); } keyClient.listDeletedKeys().iterableByPage().forEach(resp -> { System.out.printf("Got response headers . Url: %s, Status code: %d %n", resp.request().url(), resp.statusCode()); resp.items().forEach(value -> { System.out.printf("Deleted key's recovery Id %s %n", value.recoveryId()); }); }); } /** * Generates code sample for using {@link KeyClient */ /** * Implementation not provided for this method * @return {@code null} */ private TokenCredential getKeyVaultCredential() { return null; } }
Why do we need Stack for this? Would a simple String variable work? It make sense for classes with inner classes to be able to pop() and come back to parent class... But, can we find a new Method_DEF inside a Method_def?
public void leaveToken(DetailAST token) { switch (token.getType()) { case TokenTypes.CLASS_DEF: if (!classNameStack.isEmpty()) { classNameStack.pop(); } break; case TokenTypes.METHOD_DEF: if (!methodDefStack.isEmpty()) { methodDefStack.pop(); } break; default: break; } }
methodDefStack.pop();
public void leaveToken(DetailAST token) { if (token.getType() == TokenTypes.CLASS_DEF && !classNameStack.isEmpty()) { classNameStack.pop(); } }
class name when leave the same token private Deque<String> classNameStack = new ArrayDeque<>(); private Deque<DetailAST> methodDefStack = new ArrayDeque<>(); @Override public int[] getDefaultTokens() { return getRequiredTokens(); }
class name when leave the same token private Deque<String> classNameStack = new ArrayDeque<>(); private DetailAST methodDefToken = null; @Override public int[] getDefaultTokens() { return getRequiredTokens(); }
Will this result in a NullReferenceException if you do not have text for your description node.. findFirstToken returns null? (ie. `{@codesnippet}`).
private void checkNamingPattern(DetailAST blockCommentToken) { if (!BlockCommentPosition.isOnMethod(blockCommentToken)) { return; } DetailNode javadocNode = null; try { javadocNode = DetailNodeTreeStringPrinter.parseJavadocAsDetailNode(blockCommentToken); } catch (IllegalArgumentException ex) { } if (javadocNode == null) { return; } for (DetailNode node : javadocNode.getChildren()) { if (node.getType() != JavadocTokenTypes.JAVADOC_INLINE_TAG) { continue; } DetailNode customNameNode = JavadocUtil.findFirstToken(node, JavadocTokenTypes.CUSTOM_NAME); if (customNameNode == null || !CODE_SNIPPET_ANNOTATION.equals(customNameNode.getText())) { return; } DetailNode descriptionNode = JavadocUtil.findFirstToken(node, JavadocTokenTypes.DESCRIPTION); if (descriptionNode == null) { log(node.getLineNumber(), MISSING_CODESNIPPET_TAG_MESSAGE); return; } String customDescription = JavadocUtil.findFirstToken(descriptionNode, JavadocTokenTypes.TEXT).getText(); final DetailAST methodDefToken = methodDefStack.peek(); final String methodName = methodDefToken.findFirstToken(TokenTypes.IDENT).getText(); final String className = classNameStack.isEmpty() ? "" : classNameStack.peek(); final String parameters = constructParametersString(methodDefToken); String fullPath = packageName + "." + className + "." + methodName; final String fullPathWithoutParameters = fullPath; if (parameters != null) { fullPath = fullPath + " } if (customDescription == null || customDescription.isEmpty() || !isNamingMatched(customDescription.toLowerCase(Locale.ROOT), fullPathWithoutParameters.toLowerCase(Locale.ROOT), parameters)) { log(node.getLineNumber(), String.format("Naming pattern mismatch. The @codeSnippet description " + "''%s'' does not match ''%s''. Case Insensitive.", customDescription, fullPath)); } } }
String customDescription = JavadocUtil.findFirstToken(descriptionNode, JavadocTokenTypes.TEXT).getText();
private void checkNamingPattern(DetailAST blockCommentToken) { if (!BlockCommentPosition.isOnMethod(blockCommentToken)) { return; } DetailNode javadocNode = null; try { javadocNode = DetailNodeTreeStringPrinter.parseJavadocAsDetailNode(blockCommentToken); } catch (IllegalArgumentException ex) { } if (javadocNode == null) { return; } for (DetailNode node : javadocNode.getChildren()) { if (node.getType() != JavadocTokenTypes.JAVADOC_INLINE_TAG) { continue; } DetailNode customNameNode = JavadocUtil.findFirstToken(node, JavadocTokenTypes.CUSTOM_NAME); if (customNameNode == null || !CODE_SNIPPET_ANNOTATION.equals(customNameNode.getText())) { return; } DetailNode descriptionNode = JavadocUtil.findFirstToken(node, JavadocTokenTypes.DESCRIPTION); if (descriptionNode == null) { log(node.getLineNumber(), MISSING_CODESNIPPET_TAG_MESSAGE); return; } String customDescription = JavadocUtil.findFirstToken(descriptionNode, JavadocTokenTypes.TEXT).getText(); final String methodName = methodDefToken.findFirstToken(TokenTypes.IDENT).getText(); final String className = classNameStack.isEmpty() ? "" : classNameStack.peek(); final String parameters = constructParametersString(methodDefToken); String fullPath = packageName + "." + className + "." + methodName; final String fullPathWithoutParameters = fullPath; if (parameters != null) { fullPath = fullPath + " } if (customDescription == null || customDescription.isEmpty() || !isNamingMatched(customDescription.toLowerCase(Locale.ROOT), fullPathWithoutParameters.toLowerCase(Locale.ROOT), parameters)) { log(node.getLineNumber(), String.format("Naming pattern mismatch. The @codesnippet description " + "''%s'' does not match ''%s''. Case Insensitive.", customDescription, fullPath)); } } }
class name when leave the same token private Deque<String> classNameStack = new ArrayDeque<>(); private Deque<DetailAST> methodDefStack = new ArrayDeque<>(); @Override public int[] getDefaultTokens() { return getRequiredTokens(); }
class name when leave the same token private Deque<String> classNameStack = new ArrayDeque<>(); private DetailAST methodDefToken = null; @Override public int[] getDefaultTokens() { return getRequiredTokens(); }
```suggestion log(node.getLineNumber(), String.format("Naming pattern mismatch. The @codesnippet description " ```
private void checkNamingPattern(DetailAST blockCommentToken) { if (!BlockCommentPosition.isOnMethod(blockCommentToken)) { return; } DetailNode javadocNode = null; try { javadocNode = DetailNodeTreeStringPrinter.parseJavadocAsDetailNode(blockCommentToken); } catch (IllegalArgumentException ex) { } if (javadocNode == null) { return; } for (DetailNode node : javadocNode.getChildren()) { if (node.getType() != JavadocTokenTypes.JAVADOC_INLINE_TAG) { continue; } DetailNode customNameNode = JavadocUtil.findFirstToken(node, JavadocTokenTypes.CUSTOM_NAME); if (customNameNode == null || !CODE_SNIPPET_ANNOTATION.equals(customNameNode.getText())) { return; } DetailNode descriptionNode = JavadocUtil.findFirstToken(node, JavadocTokenTypes.DESCRIPTION); if (descriptionNode == null) { log(node.getLineNumber(), MISSING_CODESNIPPET_TAG_MESSAGE); return; } String customDescription = JavadocUtil.findFirstToken(descriptionNode, JavadocTokenTypes.TEXT).getText(); final DetailAST methodDefToken = methodDefStack.peek(); final String methodName = methodDefToken.findFirstToken(TokenTypes.IDENT).getText(); final String className = classNameStack.isEmpty() ? "" : classNameStack.peek(); final String parameters = constructParametersString(methodDefToken); String fullPath = packageName + "." + className + "." + methodName; final String fullPathWithoutParameters = fullPath; if (parameters != null) { fullPath = fullPath + " } if (customDescription == null || customDescription.isEmpty() || !isNamingMatched(customDescription.toLowerCase(Locale.ROOT), fullPathWithoutParameters.toLowerCase(Locale.ROOT), parameters)) { log(node.getLineNumber(), String.format("Naming pattern mismatch. The @codeSnippet description " + "''%s'' does not match ''%s''. Case Insensitive.", customDescription, fullPath)); } } }
log(node.getLineNumber(), String.format("Naming pattern mismatch. The @codeSnippet description "
private void checkNamingPattern(DetailAST blockCommentToken) { if (!BlockCommentPosition.isOnMethod(blockCommentToken)) { return; } DetailNode javadocNode = null; try { javadocNode = DetailNodeTreeStringPrinter.parseJavadocAsDetailNode(blockCommentToken); } catch (IllegalArgumentException ex) { } if (javadocNode == null) { return; } for (DetailNode node : javadocNode.getChildren()) { if (node.getType() != JavadocTokenTypes.JAVADOC_INLINE_TAG) { continue; } DetailNode customNameNode = JavadocUtil.findFirstToken(node, JavadocTokenTypes.CUSTOM_NAME); if (customNameNode == null || !CODE_SNIPPET_ANNOTATION.equals(customNameNode.getText())) { return; } DetailNode descriptionNode = JavadocUtil.findFirstToken(node, JavadocTokenTypes.DESCRIPTION); if (descriptionNode == null) { log(node.getLineNumber(), MISSING_CODESNIPPET_TAG_MESSAGE); return; } String customDescription = JavadocUtil.findFirstToken(descriptionNode, JavadocTokenTypes.TEXT).getText(); final String methodName = methodDefToken.findFirstToken(TokenTypes.IDENT).getText(); final String className = classNameStack.isEmpty() ? "" : classNameStack.peek(); final String parameters = constructParametersString(methodDefToken); String fullPath = packageName + "." + className + "." + methodName; final String fullPathWithoutParameters = fullPath; if (parameters != null) { fullPath = fullPath + " } if (customDescription == null || customDescription.isEmpty() || !isNamingMatched(customDescription.toLowerCase(Locale.ROOT), fullPathWithoutParameters.toLowerCase(Locale.ROOT), parameters)) { log(node.getLineNumber(), String.format("Naming pattern mismatch. The @codesnippet description " + "''%s'' does not match ''%s''. Case Insensitive.", customDescription, fullPath)); } } }
class name when leave the same token private Deque<String> classNameStack = new ArrayDeque<>(); private Deque<DetailAST> methodDefStack = new ArrayDeque<>(); @Override public int[] getDefaultTokens() { return getRequiredTokens(); }
class name when leave the same token private Deque<String> classNameStack = new ArrayDeque<>(); private DetailAST methodDefToken = null; @Override public int[] getDefaultTokens() { return getRequiredTokens(); }
```suggestion final StringBuilder sb = new StringBuilder(); ```
private String constructParametersString(DetailAST methodDefToken) { StringBuilder sb = new StringBuilder(); final DetailAST parametersToken = methodDefToken.findFirstToken(TokenTypes.PARAMETERS); for (DetailAST ast = parametersToken.getFirstChild(); ast != null; ast = ast.getNextSibling()) { if (ast.getType() != TokenTypes.PARAMETER_DEF) { continue; } final DetailAST typeToken = ast.findFirstToken(TokenTypes.TYPE); final DetailAST identToken = typeToken.findFirstToken(TokenTypes.IDENT); String parameterType = ""; if (identToken != null) { parameterType = identToken.getText(); } else { DetailAST arrayDeclarator = typeToken.findFirstToken(TokenTypes.ARRAY_DECLARATOR); if (arrayDeclarator == null) { parameterType = typeToken.getFirstChild().getText(); } DetailAST arrayDeclaratorIterator = arrayDeclarator; while (arrayDeclaratorIterator != null) { DetailAST temp = arrayDeclaratorIterator.findFirstToken(TokenTypes.ARRAY_DECLARATOR); if (temp == null) { parameterType = arrayDeclaratorIterator.getFirstChild().getText(); break; } arrayDeclaratorIterator = temp; } } sb.append(parameterType).append("-"); } int size = sb.length(); if (size == 0) { return null; } return sb.substring(0, size - 1); }
StringBuilder sb = new StringBuilder();
private String constructParametersString(DetailAST methodDefToken) { final StringBuilder sb = new StringBuilder(); final DetailAST parametersToken = methodDefToken.findFirstToken(TokenTypes.PARAMETERS); for (DetailAST ast = parametersToken.getFirstChild(); ast != null; ast = ast.getNextSibling()) { if (ast.getType() != TokenTypes.PARAMETER_DEF) { continue; } final DetailAST typeToken = ast.findFirstToken(TokenTypes.TYPE); final DetailAST identToken = typeToken.findFirstToken(TokenTypes.IDENT); String parameterType = ""; if (identToken != null) { parameterType = identToken.getText(); } else { DetailAST arrayDeclarator = typeToken.findFirstToken(TokenTypes.ARRAY_DECLARATOR); if (arrayDeclarator == null) { parameterType = typeToken.getFirstChild().getText(); } DetailAST arrayDeclaratorIterator = arrayDeclarator; while (arrayDeclaratorIterator != null) { DetailAST temp = arrayDeclaratorIterator.findFirstToken(TokenTypes.ARRAY_DECLARATOR); if (temp == null) { parameterType = arrayDeclaratorIterator.getFirstChild().getText(); break; } arrayDeclaratorIterator = temp; } } sb.append(parameterType).append("-"); } int size = sb.length(); if (size == 0) { return null; } return sb.substring(0, size - 1); }
class name when leave the same token private Deque<String> classNameStack = new ArrayDeque<>(); private Deque<DetailAST> methodDefStack = new ArrayDeque<>(); @Override public int[] getDefaultTokens() { return getRequiredTokens(); }
class name when leave the same token private Deque<String> classNameStack = new ArrayDeque<>(); private DetailAST methodDefToken = null; @Override public int[] getDefaultTokens() { return getRequiredTokens(); }
You are right, Looking for @codesnippet
private void checkNamingPattern(DetailAST blockCommentToken) { if (!BlockCommentPosition.isOnMethod(blockCommentToken)) { return; } DetailNode javadocNode = null; try { javadocNode = DetailNodeTreeStringPrinter.parseJavadocAsDetailNode(blockCommentToken); } catch (IllegalArgumentException ex) { } if (javadocNode == null) { return; } for (DetailNode node : javadocNode.getChildren()) { if (node.getType() != JavadocTokenTypes.JAVADOC_INLINE_TAG) { continue; } DetailNode customNameNode = JavadocUtil.findFirstToken(node, JavadocTokenTypes.CUSTOM_NAME); if (customNameNode == null || !CODE_SNIPPET_ANNOTATION.equals(customNameNode.getText())) { return; } DetailNode descriptionNode = JavadocUtil.findFirstToken(node, JavadocTokenTypes.DESCRIPTION); if (descriptionNode == null) { log(node.getLineNumber(), MISSING_CODESNIPPET_TAG_MESSAGE); return; } String customDescription = JavadocUtil.findFirstToken(descriptionNode, JavadocTokenTypes.TEXT).getText(); final DetailAST methodDefToken = methodDefStack.peek(); final String methodName = methodDefToken.findFirstToken(TokenTypes.IDENT).getText(); final String className = classNameStack.isEmpty() ? "" : classNameStack.peek(); final String parameters = constructParametersString(methodDefToken); String fullPath = packageName + "." + className + "." + methodName; if (parameters != null) { fullPath = fullPath + " } if (customDescription == null || customDescription.isEmpty() || !isNamingMatched(customDescription.toLowerCase(Locale.ROOT), fullPath.toLowerCase(Locale.ROOT), parameters)) { log(node.getLineNumber(), String.format("Naming pattern mismatch. The @codeSnippet description " + "''%s'' does not match ''%s''. Case Insensitive.", customDescription, fullPath)); } } }
private void checkNamingPattern(DetailAST blockCommentToken) { if (!BlockCommentPosition.isOnMethod(blockCommentToken)) { return; } DetailNode javadocNode = null; try { javadocNode = DetailNodeTreeStringPrinter.parseJavadocAsDetailNode(blockCommentToken); } catch (IllegalArgumentException ex) { } if (javadocNode == null) { return; } for (DetailNode node : javadocNode.getChildren()) { if (node.getType() != JavadocTokenTypes.JAVADOC_INLINE_TAG) { continue; } DetailNode customNameNode = JavadocUtil.findFirstToken(node, JavadocTokenTypes.CUSTOM_NAME); if (customNameNode == null || !CODE_SNIPPET_ANNOTATION.equals(customNameNode.getText())) { return; } DetailNode descriptionNode = JavadocUtil.findFirstToken(node, JavadocTokenTypes.DESCRIPTION); if (descriptionNode == null) { log(node.getLineNumber(), MISSING_CODESNIPPET_TAG_MESSAGE); return; } String customDescription = JavadocUtil.findFirstToken(descriptionNode, JavadocTokenTypes.TEXT).getText(); final String methodName = methodDefToken.findFirstToken(TokenTypes.IDENT).getText(); final String className = classNameStack.isEmpty() ? "" : classNameStack.peek(); final String parameters = constructParametersString(methodDefToken); String fullPath = packageName + "." + className + "." + methodName; final String fullPathWithoutParameters = fullPath; if (parameters != null) { fullPath = fullPath + " } if (customDescription == null || customDescription.isEmpty() || !isNamingMatched(customDescription.toLowerCase(Locale.ROOT), fullPathWithoutParameters.toLowerCase(Locale.ROOT), parameters)) { log(node.getLineNumber(), String.format("Naming pattern mismatch. The @codesnippet description " + "''%s'' does not match ''%s''. Case Insensitive.", customDescription, fullPath)); } } }
class name when leave the same token private Deque<String> classNameStack = new ArrayDeque<>(); private Deque<DetailAST> methodDefStack = new ArrayDeque<>(); @Override public int[] getDefaultTokens() { return getRequiredTokens(); }
class name when leave the same token private Deque<String> classNameStack = new ArrayDeque<>(); private DetailAST methodDefToken = null; @Override public int[] getDefaultTokens() { return getRequiredTokens(); }
Good point. A variable should be enough.
public void leaveToken(DetailAST token) { switch (token.getType()) { case TokenTypes.CLASS_DEF: if (!classNameStack.isEmpty()) { classNameStack.pop(); } break; case TokenTypes.METHOD_DEF: if (!methodDefStack.isEmpty()) { methodDefStack.pop(); } break; default: break; } }
methodDefStack.pop();
public void leaveToken(DetailAST token) { if (token.getType() == TokenTypes.CLASS_DEF && !classNameStack.isEmpty()) { classNameStack.pop(); } }
class name when leave the same token private Deque<String> classNameStack = new ArrayDeque<>(); private Deque<DetailAST> methodDefStack = new ArrayDeque<>(); @Override public int[] getDefaultTokens() { return getRequiredTokens(); }
class name when leave the same token private Deque<String> classNameStack = new ArrayDeque<>(); private DetailAST methodDefToken = null; @Override public int[] getDefaultTokens() { return getRequiredTokens(); }
There will always have TEXT token if there is DESCRIPTION token exists. Will add this comment to the above of this code section
private void checkNamingPattern(DetailAST blockCommentToken) { if (!BlockCommentPosition.isOnMethod(blockCommentToken)) { return; } DetailNode javadocNode = null; try { javadocNode = DetailNodeTreeStringPrinter.parseJavadocAsDetailNode(blockCommentToken); } catch (IllegalArgumentException ex) { } if (javadocNode == null) { return; } for (DetailNode node : javadocNode.getChildren()) { if (node.getType() != JavadocTokenTypes.JAVADOC_INLINE_TAG) { continue; } DetailNode customNameNode = JavadocUtil.findFirstToken(node, JavadocTokenTypes.CUSTOM_NAME); if (customNameNode == null || !CODE_SNIPPET_ANNOTATION.equals(customNameNode.getText())) { return; } DetailNode descriptionNode = JavadocUtil.findFirstToken(node, JavadocTokenTypes.DESCRIPTION); if (descriptionNode == null) { log(node.getLineNumber(), MISSING_CODESNIPPET_TAG_MESSAGE); return; } String customDescription = JavadocUtil.findFirstToken(descriptionNode, JavadocTokenTypes.TEXT).getText(); final DetailAST methodDefToken = methodDefStack.peek(); final String methodName = methodDefToken.findFirstToken(TokenTypes.IDENT).getText(); final String className = classNameStack.isEmpty() ? "" : classNameStack.peek(); final String parameters = constructParametersString(methodDefToken); String fullPath = packageName + "." + className + "." + methodName; final String fullPathWithoutParameters = fullPath; if (parameters != null) { fullPath = fullPath + " } if (customDescription == null || customDescription.isEmpty() || !isNamingMatched(customDescription.toLowerCase(Locale.ROOT), fullPathWithoutParameters.toLowerCase(Locale.ROOT), parameters)) { log(node.getLineNumber(), String.format("Naming pattern mismatch. The @codeSnippet description " + "''%s'' does not match ''%s''. Case Insensitive.", customDescription, fullPath)); } } }
String customDescription = JavadocUtil.findFirstToken(descriptionNode, JavadocTokenTypes.TEXT).getText();
private void checkNamingPattern(DetailAST blockCommentToken) { if (!BlockCommentPosition.isOnMethod(blockCommentToken)) { return; } DetailNode javadocNode = null; try { javadocNode = DetailNodeTreeStringPrinter.parseJavadocAsDetailNode(blockCommentToken); } catch (IllegalArgumentException ex) { } if (javadocNode == null) { return; } for (DetailNode node : javadocNode.getChildren()) { if (node.getType() != JavadocTokenTypes.JAVADOC_INLINE_TAG) { continue; } DetailNode customNameNode = JavadocUtil.findFirstToken(node, JavadocTokenTypes.CUSTOM_NAME); if (customNameNode == null || !CODE_SNIPPET_ANNOTATION.equals(customNameNode.getText())) { return; } DetailNode descriptionNode = JavadocUtil.findFirstToken(node, JavadocTokenTypes.DESCRIPTION); if (descriptionNode == null) { log(node.getLineNumber(), MISSING_CODESNIPPET_TAG_MESSAGE); return; } String customDescription = JavadocUtil.findFirstToken(descriptionNode, JavadocTokenTypes.TEXT).getText(); final String methodName = methodDefToken.findFirstToken(TokenTypes.IDENT).getText(); final String className = classNameStack.isEmpty() ? "" : classNameStack.peek(); final String parameters = constructParametersString(methodDefToken); String fullPath = packageName + "." + className + "." + methodName; final String fullPathWithoutParameters = fullPath; if (parameters != null) { fullPath = fullPath + " } if (customDescription == null || customDescription.isEmpty() || !isNamingMatched(customDescription.toLowerCase(Locale.ROOT), fullPathWithoutParameters.toLowerCase(Locale.ROOT), parameters)) { log(node.getLineNumber(), String.format("Naming pattern mismatch. The @codesnippet description " + "''%s'' does not match ''%s''. Case Insensitive.", customDescription, fullPath)); } } }
class name when leave the same token private Deque<String> classNameStack = new ArrayDeque<>(); private Deque<DetailAST> methodDefStack = new ArrayDeque<>(); @Override public int[] getDefaultTokens() { return getRequiredTokens(); }
class name when leave the same token private Deque<String> classNameStack = new ArrayDeque<>(); private DetailAST methodDefToken = null; @Override public int[] getDefaultTokens() { return getRequiredTokens(); }
This was a false positive in lts right ? mvn spotbugs:spotbugs doesn't think of it as a spotbug in java 8.
private void checkDigestLength(byte[] digest) { if (digest.length != getDigestLength()) { throw new IllegalArgumentException("Invalid digest length."); } }
if (digest.length != getDigestLength()) {
private void checkDigestLength(byte[] digest) { if (digest.length != getDigestLength()) { throw new IllegalArgumentException("Invalid digest length."); } }
class EcdsaSignatureTransform implements ISignatureTransform { private static final String ALGORITHM = "NONEwithECDSA"; private final KeyPair keyPair; private final Provider provider; private final Ecdsa algorithm; EcdsaSignatureTransform(KeyPair keyPair, Provider provider, Ecdsa algorithm) { this.keyPair = keyPair; this.provider = provider; this.algorithm = algorithm; } @Override public byte[] sign(byte[] digest) throws GeneralSecurityException { checkDigestLength(digest); Signature signature = Signature.getInstance(ALGORITHM, provider); signature.initSign(keyPair.getPrivate()); signature.update(digest); return SignatureEncoding.fromAsn1Der(signature.sign(), algorithm); } @Override public boolean verify(byte[] digest, byte[] signature) throws GeneralSecurityException { Signature verify = Signature.getInstance(ALGORITHM, provider); checkDigestLength(digest); signature = SignatureEncoding.toAsn1Der(signature, algorithm); verify.initVerify(keyPair.getPublic()); verify.update(digest); return verify.verify(signature); } }
class EcdsaSignatureTransform implements ISignatureTransform { private static final String ALGORITHM = "NONEwithECDSA"; private final KeyPair keyPair; private final Provider provider; private final Ecdsa algorithm; EcdsaSignatureTransform(KeyPair keyPair, Provider provider, Ecdsa algorithm) { this.keyPair = keyPair; this.provider = provider; this.algorithm = algorithm; } @Override public byte[] sign(byte[] digest) throws GeneralSecurityException { checkDigestLength(digest); Signature signature = Signature.getInstance(ALGORITHM, provider); signature.initSign(keyPair.getPrivate()); signature.update(digest); return SignatureEncoding.fromAsn1Der(signature.sign(), algorithm); } @Override public boolean verify(byte[] digest, byte[] signature) throws GeneralSecurityException { Signature verify = Signature.getInstance(ALGORITHM, provider); checkDigestLength(digest); signature = SignatureEncoding.toAsn1Der(signature, algorithm); verify.initVerify(keyPair.getPublic()); verify.update(digest); return verify.verify(signature); } }
Yes, however, we want to ensure our code is LTS compatible. So, if spotbugs check fails for JDK 11, then it's better to fix it for JDK8 too. This is only to test that we get the same results in PR build as we get in post-merge CI builds.
private void checkDigestLength(byte[] digest) { if (digest.length != getDigestLength()) { throw new IllegalArgumentException("Invalid digest length."); } }
if (digest.length != getDigestLength()) {
private void checkDigestLength(byte[] digest) { if (digest.length != getDigestLength()) { throw new IllegalArgumentException("Invalid digest length."); } }
class EcdsaSignatureTransform implements ISignatureTransform { private static final String ALGORITHM = "NONEwithECDSA"; private final KeyPair keyPair; private final Provider provider; private final Ecdsa algorithm; EcdsaSignatureTransform(KeyPair keyPair, Provider provider, Ecdsa algorithm) { this.keyPair = keyPair; this.provider = provider; this.algorithm = algorithm; } @Override public byte[] sign(byte[] digest) throws GeneralSecurityException { checkDigestLength(digest); Signature signature = Signature.getInstance(ALGORITHM, provider); signature.initSign(keyPair.getPrivate()); signature.update(digest); return SignatureEncoding.fromAsn1Der(signature.sign(), algorithm); } @Override public boolean verify(byte[] digest, byte[] signature) throws GeneralSecurityException { Signature verify = Signature.getInstance(ALGORITHM, provider); checkDigestLength(digest); signature = SignatureEncoding.toAsn1Der(signature, algorithm); verify.initVerify(keyPair.getPublic()); verify.update(digest); return verify.verify(signature); } }
class EcdsaSignatureTransform implements ISignatureTransform { private static final String ALGORITHM = "NONEwithECDSA"; private final KeyPair keyPair; private final Provider provider; private final Ecdsa algorithm; EcdsaSignatureTransform(KeyPair keyPair, Provider provider, Ecdsa algorithm) { this.keyPair = keyPair; this.provider = provider; this.algorithm = algorithm; } @Override public byte[] sign(byte[] digest) throws GeneralSecurityException { checkDigestLength(digest); Signature signature = Signature.getInstance(ALGORITHM, provider); signature.initSign(keyPair.getPrivate()); signature.update(digest); return SignatureEncoding.fromAsn1Der(signature.sign(), algorithm); } @Override public boolean verify(byte[] digest, byte[] signature) throws GeneralSecurityException { Signature verify = Signature.getInstance(ALGORITHM, provider); checkDigestLength(digest); signature = SignatureEncoding.toAsn1Der(signature, algorithm); verify.initVerify(keyPair.getPublic()); verify.update(digest); return verify.verify(signature); } }
This is 'package-private', not 'protected'. This means it is not public API and effectively private.
Mono<VoidResponse> createWithResponse(Map<String, String> metadata, Context context) { return client.queues().createWithRestResponseAsync(queueName, null, metadata, null, context) .map(VoidResponse::new); }
}
Mono<VoidResponse> createWithResponse(Map<String, String> metadata, Context context) { return client.queues().createWithRestResponseAsync(queueName, null, metadata, null, context) .map(VoidResponse::new); }
class QueueAsyncClient { private static final ClientLogger LOGGER = new ClientLogger(QueueAsyncClient.class); private final AzureQueueStorageImpl client; private final String queueName; /** * Creates a QueueAsyncClient that sends requests to the storage queue service at {@code AzureQueueStorageImpl * Each service call goes through the {@link HttpPipeline pipeline} in the {@code AzureQueueStorageImpl client}. * * @param client Client that interacts with the service interfaces * @param queueName Name of the queue */ QueueAsyncClient(AzureQueueStorageImpl client, String queueName) { this.queueName = queueName; this.client = new AzureQueueStorageBuilder().pipeline(client.getHttpPipeline()) .url(client.getUrl()) .version(client.getVersion()) .build(); } /** * Creates a QueueAsyncClient that sends requests to the storage queue service at {@code endpoint}. * Each service call goes through the {@code httpPipeline}. * * @param endpoint URL for the Storage Queue service * @param httpPipeline HttpPipeline that the HTTP requests and response flow through * @param queueName Name of the queue */ QueueAsyncClient(URL endpoint, HttpPipeline httpPipeline, String queueName) { this.queueName = queueName; this.client = new AzureQueueStorageBuilder().pipeline(httpPipeline) .url(endpoint.toString()) .build(); } /** * @return the URL of the storage queue * @throws RuntimeException If the queue is using a malformed URL. */ public URL getQueueUrl() { try { return new URL(client.getUrl()); } catch (MalformedURLException ex) { LOGGER.error("Queue URL is malformed"); throw new RuntimeException("Queue URL is malformed"); } } /** * Creates a new queue. * * <p><strong>Code Samples</strong></p> * * <p>Create a queue</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.create} * * <p>For more information, see the * <a href="https: * * @return An empty response * @throws StorageErrorException If a queue with the same name already exists in the queue service. */ public Mono<Void> create() { return createWithResponse(null).flatMap(FluxUtil::toMono); } /** * Creates a new queue. * * <p><strong>Code Samples</strong></p> * * <p>Create a queue with metadata "queue:metadataMap"</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.createWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata Metadata to associate with the queue * @return A response that only contains headers and response status code * @throws StorageErrorException If a queue with the same name and different metadata already exists in the queue service. */ public Mono<VoidResponse> createWithResponse(Map<String, String> metadata) { return withContext(context -> createWithResponse(metadata, context)); } /** * Permanently deletes the queue. * * <p><strong>Code Samples</strong></p> * * <p>Delete a queue</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.delete} * * <p>For more information, see the * <a href="https: * * @return An empty response * @throws StorageErrorException If the queue doesn't exist */ public Mono<Void> delete() { return deleteWithResponse().flatMap(FluxUtil::toMono); } /** * Permanently deletes the queue. * * <p><strong>Code Samples</strong></p> * * <p>Delete a queue</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.deleteWithResponse} * * <p>For more information, see the * <a href="https: * * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue doesn't exist */ public Mono<VoidResponse> deleteWithResponse() { return withContext(context -> deleteWithResponse(context)); } Mono<VoidResponse> deleteWithResponse(Context context) { return client.queues().deleteWithRestResponseAsync(queueName, context) .map(VoidResponse::new); } /** * Retrieves metadata and approximate message count of the queue. * * <p><strong>Code Samples</strong></p> * * <p>Get the properties of the queue</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.getProperties} * * <p>For more information, see the * <a href="https: * * @return A response containing a {@link QueueProperties} value which contains the metadata and approximate * messages count of the queue. * @throws StorageErrorException If the queue doesn't exist */ public Mono<QueueProperties> getProperties() { return getPropertiesWithResponse().flatMap(FluxUtil::toMono); } /** * Retrieves metadata and approximate message count of the queue. * * <p><strong>Code Samples</strong></p> * * <p>Get the properties of the queue</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.getPropertiesWithResponse} * * <p>For more information, see the * <a href="https: * * @return A response containing a {@link QueueProperties} value which contains the metadata and approximate * messages count of the queue. * @throws StorageErrorException If the queue doesn't exist */ public Mono<Response<QueueProperties>> getPropertiesWithResponse() { return withContext(context -> getPropertiesWithResponse(context)); } Mono<Response<QueueProperties>> getPropertiesWithResponse(Context context) { return client.queues().getPropertiesWithRestResponseAsync(queueName, Context.NONE) .map(this::getQueuePropertiesResponse); } /** * Sets the metadata of the queue. * * Passing in a {@code null} value for metadata will clear the metadata associated with the queue. * * <p><strong>Code Samples</strong></p> * * <p>Set the queue's metadata to "queue:metadataMap"</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.setMetadata * * <p>Clear the queue's metadata</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.clearMetadata * * <p>For more information, see the * <a href="https: * * @param metadata Metadata to set on the queue * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue doesn't exist */ public Mono<Void> setMetadata(Map<String, String> metadata) { return setMetadataWithResponse(metadata).flatMap(FluxUtil::toMono); } /** * Sets the metadata of the queue. * * Passing in a {@code null} value for metadata will clear the metadata associated with the queue. * * <p><strong>Code Samples</strong></p> * * <p>Set the queue's metadata to "queue:metadataMap"</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.setMetadataWithResponse * * <p>Clear the queue's metadata</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.clearMetadataWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata Metadata to set on the queue * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue doesn't exist */ public Mono<VoidResponse> setMetadataWithResponse(Map<String, String> metadata) { return withContext(context -> setMetadataWithResponse(metadata, context)); } Mono<VoidResponse> setMetadataWithResponse(Map<String, String> metadata, Context context) { return client.queues().setMetadataWithRestResponseAsync(queueName, null, metadata, null, context) .map(VoidResponse::new); } /** * Retrieves stored access policies specified on the queue. * * <p><strong>Code Samples</strong></p> * * <p>List the stored access policies</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.getAccessPolicy} * * <p>For more information, see the * <a href="https: * * @return The stored access policies specified on the queue. * @throws StorageErrorException If the queue doesn't exist */ public Flux<SignedIdentifier> getAccessPolicy() { return client.queues().getAccessPolicyWithRestResponseAsync(queueName, Context.NONE) .flatMapMany(response -> Flux.fromIterable(response.value())); } /** * Sets stored access policies on the queue. * * <p><strong>Code Samples</strong></p> * * <p>Set a read only stored access policy</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.setAccessPolicy} * * <p>For more information, see the * <a href="https: * * @param permissions Access policies to set on the queue * @return An empty response * @throws StorageErrorException If the queue doesn't exist, a stored access policy doesn't have all fields filled out, * or the queue will have more than five policies. */ public Mono<Void> setAccessPolicy(List<SignedIdentifier> permissions) { return setAccessPolicyWithResponse(permissions).flatMap(FluxUtil::toMono); } /** * Sets stored access policies on the queue. * * <p><strong>Code Samples</strong></p> * * <p>Set a read only stored access policy</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.setAccessPolicyWithResponse} * * <p>For more information, see the * <a href="https: * * @param permissions Access policies to set on the queue * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue doesn't exist, a stored access policy doesn't have all fields filled out, * or the queue will have more than five policies. */ public Mono<VoidResponse> setAccessPolicyWithResponse(List<SignedIdentifier> permissions) { return withContext(context -> setAccessPolicyWithResponse(permissions, context)); } Mono<VoidResponse> setAccessPolicyWithResponse(List<SignedIdentifier> permissions, Context context) { return client.queues().setAccessPolicyWithRestResponseAsync(queueName, permissions, null, null, context) .map(VoidResponse::new); } /** * Deletes all messages in the queue. * * <p><strong>Code Samples</strong></p> * * <p>Clear the messages</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.clearMessages} * * <p>For more information, see the * <a href="https: * * @return An empty response * @throws StorageErrorException If the queue doesn't exist */ public Mono<Void> clearMessages() { return clearMessagesWithResponse().flatMap(FluxUtil::toMono); } /** * Deletes all messages in the queue. * * <p><strong>Code Samples</strong></p> * * <p>Clear the messages</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.clearMessagesWithResponse} * * <p>For more information, see the * <a href="https: * * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue doesn't exist */ public Mono<VoidResponse> clearMessagesWithResponse() { return withContext(context -> clearMessagesWithResponse(context)); } Mono<VoidResponse> clearMessagesWithResponse(Context context) { return client.messages().clearWithRestResponseAsync(queueName, context) .map(VoidResponse::new); } /** * Enqueues a message that has a time-to-live of 7 days and is instantly visible. * * <p><strong>Code Samples</strong></p> * * <p>Enqueue a message of "Hello, Azure"</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.enqueueMessage * * <p>For more information, see the * <a href="https: * * @param messageText Message text * @return A {@link EnqueuedMessage} value that contains the {@link EnqueuedMessage * {@link EnqueuedMessage * about the enqueued message. * @throws StorageErrorException If the queue doesn't exist */ public Mono<EnqueuedMessage> enqueueMessage(String messageText) { return enqueueMessageWithResponse(messageText, null, null).flatMap(FluxUtil::toMono); } /** * Enqueues a message with a given time-to-live and a timeout period where the message is invisible in the queue. * * <p><strong>Code Samples</strong></p> * * <p>Add a message of "Hello, Azure" that has a timeout of 5 seconds</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.enqueueMessageWithResponse * * <p>Add a message of "Goodbye, Azure" that has a time to live of 5 seconds</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.enqueueMessageWithResponseLiveTime * * <p>For more information, see the * <a href="https: * * @param messageText Message text * @param visibilityTimeout Optional. The timeout period for how long the message is invisible in the queue in seconds. * If unset the value will default to 0 and the message will be instantly visible. The timeout must be between 0 * seconds and 7 days. * @param timeToLive Optional. How long the message will stay alive in the queue in seconds. If unset the value will * default to 7 days, if -1 is passed the message will not expire. The time to live must be -1 or any positive number. * @return A {@link EnqueuedMessage} value that contains the {@link EnqueuedMessage * {@link EnqueuedMessage * about the enqueued message. * @throws StorageErrorException If the queue doesn't exist or the {@code visibilityTimeout} or {@code timeToLive} * are outside of the allowed limits. */ public Mono<Response<EnqueuedMessage>> enqueueMessageWithResponse(String messageText, Duration visibilityTimeout, Duration timeToLive) { return withContext(context -> enqueueMessageWithResponse(messageText, visibilityTimeout, timeToLive, context)); } Mono<Response<EnqueuedMessage>> enqueueMessageWithResponse(String messageText, Duration visibilityTimeout, Duration timeToLive, Context context) { Integer visibilityTimeoutInSeconds = (visibilityTimeout == null) ? null : (int) visibilityTimeout.getSeconds(); Integer timeToLiveInSeconds = (timeToLive == null) ? null : (int) timeToLive.getSeconds(); QueueMessage message = new QueueMessage().messageText(messageText); return client.messages().enqueueWithRestResponseAsync(queueName, message, visibilityTimeoutInSeconds, timeToLiveInSeconds, null, null, context) .map(response -> new SimpleResponse<>(response, response.value().get(0))); } /** * Retrieves the first message in the queue and hides it from other operations for 30 seconds. * * <p><strong>Code Samples</strong></p> * * <p>Dequeue a message</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.dequeueMessages} * * <p>For more information, see the * <a href="https: * * @return The first {@link DequeuedMessage} in the queue, it contains * {@link DequeuedMessage * with the message, additionally it contains other metadata about the message. * @throws StorageErrorException If the queue doesn't exist */ public Flux<DequeuedMessage> dequeueMessages() { return dequeueMessages(1, Duration.ofSeconds(30)); } /** * Retrieves up to the maximum number of messages from the queue and hides them from other operations for 30 seconds. * * <p><strong>Code Samples</strong></p> * * <p>Dequeue up to 5 messages</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.dequeueMessages * * <p>For more information, see the * <a href="https: * * @param maxMessages Optional. Maximum number of messages to get, if there are less messages exist in the queue than requested * all the messages will be returned. If left empty only 1 message will be retrieved, the allowed range is 1 to 32 * messages. * @return Up to {@code maxMessages} {@link DequeuedMessage DequeuedMessages} from the queue. Each DequeuedMessage contains * {@link DequeuedMessage * with the message and other metadata about the message. * @throws StorageErrorException If the queue doesn't exist or {@code maxMessages} is outside of the allowed bounds */ public Flux<DequeuedMessage> dequeueMessages(Integer maxMessages) { return dequeueMessages(maxMessages, Duration.ofSeconds(30)); } /** * Retrieves up to the maximum number of messages from the queue and hides them from other operations for the * timeout period. * * <p><strong>Code Samples</strong></p> * * <p>Dequeue up to 5 messages and give them a 60 second timeout period</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.dequeueMessages * * <p>For more information, see the * <a href="https: * * @param maxMessages Optional. Maximum number of messages to get, if there are less messages exist in the queue than requested * all the messages will be returned. If left empty only 1 message will be retrieved, the allowed range is 1 to 32 * messages. * @param visibilityTimeout Optional. The timeout period for how long the message is invisible in the queue in seconds. * If left empty the dequeued messages will be invisible for 30 seconds. The timeout must be between 1 second and 7 days. * @return Up to {@code maxMessages} {@link DequeuedMessage DequeuedMessages} from the queue. Each DeqeuedMessage contains * {@link DequeuedMessage * with the message and other metadata about the message. * @throws StorageErrorException If the queue doesn't exist or {@code maxMessages} or {@code visibilityTimeout} is * outside of the allowed bounds */ public Flux<DequeuedMessage> dequeueMessages(Integer maxMessages, Duration visibilityTimeout) { Integer visibilityTimeoutInSeconds = (visibilityTimeout == null) ? null : (int) visibilityTimeout.getSeconds(); return client.messages().dequeueWithRestResponseAsync(queueName, maxMessages, visibilityTimeoutInSeconds, null, null, Context.NONE) .flatMapMany(response -> Flux.fromIterable(response.value())); } /** * Peeks the first message in the queue. * * Peeked messages don't contain the necessary information needed to interact with the message nor will it hide * messages from other operations on the queue. * * <p><strong>Code Samples</strong></p> * * <p>Peek the first message</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.peekMessages} * * <p>For more information, see the * <a href="https: * * @return A {@link PeekedMessage} that contains metadata about the message. */ public Flux<PeekedMessage> peekMessages() { return peekMessages(null); } /** * Peek messages from the front of the queue up to the maximum number of messages. * * Peeked messages don't contain the necessary information needed to interact with the message nor will it hide * messages from other operations on the queue. * * <p><strong>Code Samples</strong></p> * * <p>Peek up to the first five messages</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.peekMessages * * <p>For more information, see the * <a href="https: * * @param maxMessages Optional. Maximum number of messages to peek, if there are less messages exist in the queue than requested * all the messages will be peeked. If left empty only 1 message will be peeked, the allowed range is 1 to 32 * messages. * @return Up to {@code maxMessages} {@link PeekedMessage PeekedMessages} from the queue. Each PeekedMessage contains * metadata about the message. * @throws StorageErrorException If the queue doesn't exist or {@code maxMessages} is outside of the allowed bounds */ public Flux<PeekedMessage> peekMessages(Integer maxMessages) { return client.messages().peekWithRestResponseAsync(queueName, maxMessages, null, null, Context.NONE) .flatMapMany(response -> Flux.fromIterable(response.value())); } /** * Updates the specific message in the queue with a new message and resets the visibility timeout. * * <p><strong>Code Samples</strong></p> * * <p>Dequeue the first message and update it to "Hello again, Azure" and hide it for 5 seconds</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.updateMessage} * * <p>For more information, see the * <a href="https: * * @param messageText Updated value for the message * @param messageId Id of the message to update * @param popReceipt Unique identifier that must match for the message to be updated * @param visibilityTimeout The timeout period for how long the message is invisible in the queue in seconds. The * timeout period must be between 1 second and 7 days. * @return A {@link UpdatedMessage} that contains the new {@link UpdatedMessage * with the message, additionally contains the updated metadata about the message. * @throws StorageErrorException If the queue or messageId don't exist, the popReceipt doesn't match on the message, * or the {@code visibilityTimeout} is outside the allowed bounds */ public Mono<UpdatedMessage> updateMessage(String messageText, String messageId, String popReceipt, Duration visibilityTimeout) { return updateMessageWithResponse(messageText, messageId, popReceipt, visibilityTimeout).flatMap(FluxUtil::toMono); } /** * Updates the specific message in the queue with a new message and resets the visibility timeout. * * <p><strong>Code Samples</strong></p> * * <p>Dequeue the first message and update it to "Hello again, Azure" and hide it for 5 seconds</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.updateMessageWithResponse} * * <p>For more information, see the * <a href="https: * * @param messageText Updated value for the message * @param messageId Id of the message to update * @param popReceipt Unique identifier that must match for the message to be updated * @param visibilityTimeout The timeout period for how long the message is invisible in the queue in seconds. The * timeout period must be between 1 second and 7 days. * @return A {@link UpdatedMessage} that contains the new {@link UpdatedMessage * with the message, additionally contains the updated metadata about the message. * @throws StorageErrorException If the queue or messageId don't exist, the popReceipt doesn't match on the message, * or the {@code visibilityTimeout} is outside the allowed bounds */ public Mono<Response<UpdatedMessage>> updateMessageWithResponse(String messageText, String messageId, String popReceipt, Duration visibilityTimeout) { return withContext(context -> updateMessageWithResponse(messageText, messageId, popReceipt, visibilityTimeout, context)); } Mono<Response<UpdatedMessage>> updateMessageWithResponse(String messageText, String messageId, String popReceipt, Duration visibilityTimeout, Context context) { QueueMessage message = new QueueMessage().messageText(messageText); return client.messageIds().updateWithRestResponseAsync(queueName, messageId, message, popReceipt, (int) visibilityTimeout.getSeconds(), context) .map(this::getUpdatedMessageResponse); } /** * Deletes the specified message in the queue * * <p><strong>Code Samples</strong></p> * * <p>Delete the first message</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.deleteMessage} * * <p>For more information, see the * <a href="https: * * @param messageId Id of the message to deleted * @param popReceipt Unique identifier that must match for the message to be deleted * @return An empty response * @throws StorageErrorException If the queue or messageId don't exist or the popReceipt doesn't match on the message */ public Mono<Void> deleteMessage(String messageId, String popReceipt) { return deleteMessageWithResponse(messageId, popReceipt).flatMap(FluxUtil::toMono); } /** * Deletes the specified message in the queue * * <p><strong>Code Samples</strong></p> * * <p>Delete the first message</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.deleteMessageWithResponse} * * <p>For more information, see the * <a href="https: * * @param messageId Id of the message to deleted * @param popReceipt Unique identifier that must match for the message to be deleted * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue or messageId don't exist or the popReceipt doesn't match on the message */ public Mono<VoidResponse> deleteMessageWithResponse(String messageId, String popReceipt) { return withContext(context -> deleteMessageWithResponse(messageId, popReceipt, context)); } Mono<VoidResponse> deleteMessageWithResponse(String messageId, String popReceipt, Context context) { return client.messageIds().deleteWithRestResponseAsync(queueName, messageId, popReceipt, context) .map(VoidResponse::new); } /* * Maps the HTTP headers returned from the service to the expected response type * @param response Service response * @return Mapped response */ private Response<QueueProperties> getQueuePropertiesResponse(QueuesGetPropertiesResponse response) { QueueGetPropertiesHeaders propertiesHeaders = response.deserializedHeaders(); QueueProperties properties = new QueueProperties(propertiesHeaders.metadata(), propertiesHeaders.approximateMessagesCount()); return new SimpleResponse<>(response, properties); } /* * Maps the HTTP headers returned from the service to the expected response type * @param response Service response * @return Mapped response */ private Response<UpdatedMessage> getUpdatedMessageResponse(MessageIdsUpdateResponse response) { MessageIdUpdateHeaders headers = response.deserializedHeaders(); UpdatedMessage updatedMessage = new UpdatedMessage(headers.popReceipt(), headers.timeNextVisible()); return new SimpleResponse<>(response, updatedMessage); } }
class QueueAsyncClient { private static final ClientLogger LOGGER = new ClientLogger(QueueAsyncClient.class); private final AzureQueueStorageImpl client; private final String queueName; /** * Creates a QueueAsyncClient that sends requests to the storage queue service at {@code AzureQueueStorageImpl * Each service call goes through the {@link HttpPipeline pipeline} in the {@code AzureQueueStorageImpl client}. * * @param client Client that interacts with the service interfaces * @param queueName Name of the queue */ QueueAsyncClient(AzureQueueStorageImpl client, String queueName) { Objects.requireNonNull(queueName); this.queueName = queueName; this.client = new AzureQueueStorageBuilder().pipeline(client.getHttpPipeline()) .url(client.getUrl()) .version(client.getVersion()) .build(); } /** * Creates a QueueAsyncClient that sends requests to the storage queue service at {@code endpoint}. * Each service call goes through the {@code httpPipeline}. * * @param endpoint URL for the Storage Queue service * @param httpPipeline HttpPipeline that the HTTP requests and response flow through * @param queueName Name of the queue */ QueueAsyncClient(URL endpoint, HttpPipeline httpPipeline, String queueName) { Objects.requireNonNull(queueName); this.queueName = queueName; this.client = new AzureQueueStorageBuilder().pipeline(httpPipeline) .url(endpoint.toString()) .build(); } /** * @return the URL of the storage queue * @throws RuntimeException If the queue is using a malformed URL. */ public URL getQueueUrl() { try { return new URL(client.getUrl()); } catch (MalformedURLException ex) { LOGGER.error("Queue URL is malformed"); throw new RuntimeException("Queue URL is malformed"); } } /** * Creates a new queue. * * <p><strong>Code Samples</strong></p> * * <p>Create a queue</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.create} * * <p>For more information, see the * <a href="https: * * @return An empty response * @throws StorageErrorException If a queue with the same name already exists in the queue service. */ public Mono<Void> create() { return createWithResponse(null).flatMap(FluxUtil::toMono); } /** * Creates a new queue. * * <p><strong>Code Samples</strong></p> * * <p>Create a queue with metadata "queue:metadataMap"</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.createWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata Metadata to associate with the queue * @return A response that only contains headers and response status code * @throws StorageErrorException If a queue with the same name and different metadata already exists in the queue service. */ public Mono<VoidResponse> createWithResponse(Map<String, String> metadata) { return withContext(context -> createWithResponse(metadata, context)); } /** * Permanently deletes the queue. * * <p><strong>Code Samples</strong></p> * * <p>Delete a queue</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.delete} * * <p>For more information, see the * <a href="https: * * @return An empty response * @throws StorageErrorException If the queue doesn't exist */ public Mono<Void> delete() { return deleteWithResponse().flatMap(FluxUtil::toMono); } /** * Permanently deletes the queue. * * <p><strong>Code Samples</strong></p> * * <p>Delete a queue</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.deleteWithResponse} * * <p>For more information, see the * <a href="https: * * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue doesn't exist */ public Mono<VoidResponse> deleteWithResponse() { return withContext(context -> deleteWithResponse(context)); } Mono<VoidResponse> deleteWithResponse(Context context) { return client.queues().deleteWithRestResponseAsync(queueName, context) .map(VoidResponse::new); } /** * Retrieves metadata and approximate message count of the queue. * * <p><strong>Code Samples</strong></p> * * <p>Get the properties of the queue</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.getProperties} * * <p>For more information, see the * <a href="https: * * @return A response containing a {@link QueueProperties} value which contains the metadata and approximate * messages count of the queue. * @throws StorageErrorException If the queue doesn't exist */ public Mono<QueueProperties> getProperties() { return getPropertiesWithResponse().flatMap(FluxUtil::toMono); } /** * Retrieves metadata and approximate message count of the queue. * * <p><strong>Code Samples</strong></p> * * <p>Get the properties of the queue</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.getPropertiesWithResponse} * * <p>For more information, see the * <a href="https: * * @return A response containing a {@link QueueProperties} value which contains the metadata and approximate * messages count of the queue. * @throws StorageErrorException If the queue doesn't exist */ public Mono<Response<QueueProperties>> getPropertiesWithResponse() { return withContext(context -> getPropertiesWithResponse(context)); } Mono<Response<QueueProperties>> getPropertiesWithResponse(Context context) { return client.queues().getPropertiesWithRestResponseAsync(queueName, Context.NONE) .map(this::getQueuePropertiesResponse); } /** * Sets the metadata of the queue. * * Passing in a {@code null} value for metadata will clear the metadata associated with the queue. * * <p><strong>Code Samples</strong></p> * * <p>Set the queue's metadata to "queue:metadataMap"</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.setMetadata * * <p>Clear the queue's metadata</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.clearMetadata * * <p>For more information, see the * <a href="https: * * @param metadata Metadata to set on the queue * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue doesn't exist */ public Mono<Void> setMetadata(Map<String, String> metadata) { return setMetadataWithResponse(metadata).flatMap(FluxUtil::toMono); } /** * Sets the metadata of the queue. * * Passing in a {@code null} value for metadata will clear the metadata associated with the queue. * * <p><strong>Code Samples</strong></p> * * <p>Set the queue's metadata to "queue:metadataMap"</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.setMetadataWithResponse * * <p>Clear the queue's metadata</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.clearMetadataWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata Metadata to set on the queue * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue doesn't exist */ public Mono<VoidResponse> setMetadataWithResponse(Map<String, String> metadata) { return withContext(context -> setMetadataWithResponse(metadata, context)); } Mono<VoidResponse> setMetadataWithResponse(Map<String, String> metadata, Context context) { return client.queues().setMetadataWithRestResponseAsync(queueName, null, metadata, null, context) .map(VoidResponse::new); } /** * Retrieves stored access policies specified on the queue. * * <p><strong>Code Samples</strong></p> * * <p>List the stored access policies</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.getAccessPolicy} * * <p>For more information, see the * <a href="https: * * @return The stored access policies specified on the queue. * @throws StorageErrorException If the queue doesn't exist */ public Flux<SignedIdentifier> getAccessPolicy() { return client.queues().getAccessPolicyWithRestResponseAsync(queueName, Context.NONE) .flatMapMany(response -> Flux.fromIterable(response.value())); } /** * Sets stored access policies on the queue. * * <p><strong>Code Samples</strong></p> * * <p>Set a read only stored access policy</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.setAccessPolicy} * * <p>For more information, see the * <a href="https: * * @param permissions Access policies to set on the queue * @return An empty response * @throws StorageErrorException If the queue doesn't exist, a stored access policy doesn't have all fields filled out, * or the queue will have more than five policies. */ public Mono<Void> setAccessPolicy(List<SignedIdentifier> permissions) { return setAccessPolicyWithResponse(permissions).flatMap(FluxUtil::toMono); } /** * Sets stored access policies on the queue. * * <p><strong>Code Samples</strong></p> * * <p>Set a read only stored access policy</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.setAccessPolicyWithResponse} * * <p>For more information, see the * <a href="https: * * @param permissions Access policies to set on the queue * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue doesn't exist, a stored access policy doesn't have all fields filled out, * or the queue will have more than five policies. */ public Mono<VoidResponse> setAccessPolicyWithResponse(List<SignedIdentifier> permissions) { return withContext(context -> setAccessPolicyWithResponse(permissions, context)); } Mono<VoidResponse> setAccessPolicyWithResponse(List<SignedIdentifier> permissions, Context context) { return client.queues().setAccessPolicyWithRestResponseAsync(queueName, permissions, null, null, context) .map(VoidResponse::new); } /** * Deletes all messages in the queue. * * <p><strong>Code Samples</strong></p> * * <p>Clear the messages</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.clearMessages} * * <p>For more information, see the * <a href="https: * * @return An empty response * @throws StorageErrorException If the queue doesn't exist */ public Mono<Void> clearMessages() { return clearMessagesWithResponse().flatMap(FluxUtil::toMono); } /** * Deletes all messages in the queue. * * <p><strong>Code Samples</strong></p> * * <p>Clear the messages</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.clearMessagesWithResponse} * * <p>For more information, see the * <a href="https: * * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue doesn't exist */ public Mono<VoidResponse> clearMessagesWithResponse() { return withContext(context -> clearMessagesWithResponse(context)); } Mono<VoidResponse> clearMessagesWithResponse(Context context) { return client.messages().clearWithRestResponseAsync(queueName, context) .map(VoidResponse::new); } /** * Enqueues a message that has a time-to-live of 7 days and is instantly visible. * * <p><strong>Code Samples</strong></p> * * <p>Enqueue a message of "Hello, Azure"</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.enqueueMessage * * <p>For more information, see the * <a href="https: * * @param messageText Message text * @return A {@link EnqueuedMessage} value that contains the {@link EnqueuedMessage * {@link EnqueuedMessage * about the enqueued message. * @throws StorageErrorException If the queue doesn't exist */ public Mono<EnqueuedMessage> enqueueMessage(String messageText) { return enqueueMessageWithResponse(messageText, null, null).flatMap(FluxUtil::toMono); } /** * Enqueues a message with a given time-to-live and a timeout period where the message is invisible in the queue. * * <p><strong>Code Samples</strong></p> * * <p>Add a message of "Hello, Azure" that has a timeout of 5 seconds</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.enqueueMessageWithResponse * * <p>Add a message of "Goodbye, Azure" that has a time to live of 5 seconds</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.enqueueMessageWithResponseLiveTime * * <p>For more information, see the * <a href="https: * * @param messageText Message text * @param visibilityTimeout Optional. The timeout period for how long the message is invisible in the queue in seconds. * If unset the value will default to 0 and the message will be instantly visible. The timeout must be between 0 * seconds and 7 days. * @param timeToLive Optional. How long the message will stay alive in the queue in seconds. If unset the value will * default to 7 days, if -1 is passed the message will not expire. The time to live must be -1 or any positive number. * @return A {@link EnqueuedMessage} value that contains the {@link EnqueuedMessage * {@link EnqueuedMessage * about the enqueued message. * @throws StorageErrorException If the queue doesn't exist or the {@code visibilityTimeout} or {@code timeToLive} * are outside of the allowed limits. */ public Mono<Response<EnqueuedMessage>> enqueueMessageWithResponse(String messageText, Duration visibilityTimeout, Duration timeToLive) { return withContext(context -> enqueueMessageWithResponse(messageText, visibilityTimeout, timeToLive, context)); } Mono<Response<EnqueuedMessage>> enqueueMessageWithResponse(String messageText, Duration visibilityTimeout, Duration timeToLive, Context context) { Integer visibilityTimeoutInSeconds = (visibilityTimeout == null) ? null : (int) visibilityTimeout.getSeconds(); Integer timeToLiveInSeconds = (timeToLive == null) ? null : (int) timeToLive.getSeconds(); QueueMessage message = new QueueMessage().messageText(messageText); return client.messages().enqueueWithRestResponseAsync(queueName, message, visibilityTimeoutInSeconds, timeToLiveInSeconds, null, null, context) .map(response -> new SimpleResponse<>(response, response.value().get(0))); } /** * Retrieves the first message in the queue and hides it from other operations for 30 seconds. * * <p><strong>Code Samples</strong></p> * * <p>Dequeue a message</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.dequeueMessages} * * <p>For more information, see the * <a href="https: * * @return The first {@link DequeuedMessage} in the queue, it contains * {@link DequeuedMessage * with the message, additionally it contains other metadata about the message. * @throws StorageErrorException If the queue doesn't exist */ public Flux<DequeuedMessage> dequeueMessages() { return dequeueMessages(1, Duration.ofSeconds(30)); } /** * Retrieves up to the maximum number of messages from the queue and hides them from other operations for 30 seconds. * * <p><strong>Code Samples</strong></p> * * <p>Dequeue up to 5 messages</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.dequeueMessages * * <p>For more information, see the * <a href="https: * * @param maxMessages Optional. Maximum number of messages to get, if there are less messages exist in the queue than requested * all the messages will be returned. If left empty only 1 message will be retrieved, the allowed range is 1 to 32 * messages. * @return Up to {@code maxMessages} {@link DequeuedMessage DequeuedMessages} from the queue. Each DequeuedMessage contains * {@link DequeuedMessage * with the message and other metadata about the message. * @throws StorageErrorException If the queue doesn't exist or {@code maxMessages} is outside of the allowed bounds */ public Flux<DequeuedMessage> dequeueMessages(Integer maxMessages) { return dequeueMessages(maxMessages, Duration.ofSeconds(30)); } /** * Retrieves up to the maximum number of messages from the queue and hides them from other operations for the * timeout period. * * <p><strong>Code Samples</strong></p> * * <p>Dequeue up to 5 messages and give them a 60 second timeout period</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.dequeueMessages * * <p>For more information, see the * <a href="https: * * @param maxMessages Optional. Maximum number of messages to get, if there are less messages exist in the queue than requested * all the messages will be returned. If left empty only 1 message will be retrieved, the allowed range is 1 to 32 * messages. * @param visibilityTimeout Optional. The timeout period for how long the message is invisible in the queue in seconds. * If left empty the dequeued messages will be invisible for 30 seconds. The timeout must be between 1 second and 7 days. * @return Up to {@code maxMessages} {@link DequeuedMessage DequeuedMessages} from the queue. Each DeqeuedMessage contains * {@link DequeuedMessage * with the message and other metadata about the message. * @throws StorageErrorException If the queue doesn't exist or {@code maxMessages} or {@code visibilityTimeout} is * outside of the allowed bounds */ public Flux<DequeuedMessage> dequeueMessages(Integer maxMessages, Duration visibilityTimeout) { Integer visibilityTimeoutInSeconds = (visibilityTimeout == null) ? null : (int) visibilityTimeout.getSeconds(); return client.messages().dequeueWithRestResponseAsync(queueName, maxMessages, visibilityTimeoutInSeconds, null, null, Context.NONE) .flatMapMany(response -> Flux.fromIterable(response.value())); } /** * Peeks the first message in the queue. * * Peeked messages don't contain the necessary information needed to interact with the message nor will it hide * messages from other operations on the queue. * * <p><strong>Code Samples</strong></p> * * <p>Peek the first message</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.peekMessages} * * <p>For more information, see the * <a href="https: * * @return A {@link PeekedMessage} that contains metadata about the message. */ public Flux<PeekedMessage> peekMessages() { return peekMessages(null); } /** * Peek messages from the front of the queue up to the maximum number of messages. * * Peeked messages don't contain the necessary information needed to interact with the message nor will it hide * messages from other operations on the queue. * * <p><strong>Code Samples</strong></p> * * <p>Peek up to the first five messages</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.peekMessages * * <p>For more information, see the * <a href="https: * * @param maxMessages Optional. Maximum number of messages to peek, if there are less messages exist in the queue than requested * all the messages will be peeked. If left empty only 1 message will be peeked, the allowed range is 1 to 32 * messages. * @return Up to {@code maxMessages} {@link PeekedMessage PeekedMessages} from the queue. Each PeekedMessage contains * metadata about the message. * @throws StorageErrorException If the queue doesn't exist or {@code maxMessages} is outside of the allowed bounds */ public Flux<PeekedMessage> peekMessages(Integer maxMessages) { return client.messages().peekWithRestResponseAsync(queueName, maxMessages, null, null, Context.NONE) .flatMapMany(response -> Flux.fromIterable(response.value())); } /** * Updates the specific message in the queue with a new message and resets the visibility timeout. * * <p><strong>Code Samples</strong></p> * * <p>Dequeue the first message and update it to "Hello again, Azure" and hide it for 5 seconds</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.updateMessage} * * <p>For more information, see the * <a href="https: * * @param messageText Updated value for the message * @param messageId Id of the message to update * @param popReceipt Unique identifier that must match for the message to be updated * @param visibilityTimeout The timeout period for how long the message is invisible in the queue in seconds. The * timeout period must be between 1 second and 7 days. * @return A {@link UpdatedMessage} that contains the new {@link UpdatedMessage * with the message, additionally contains the updated metadata about the message. * @throws StorageErrorException If the queue or messageId don't exist, the popReceipt doesn't match on the message, * or the {@code visibilityTimeout} is outside the allowed bounds */ public Mono<UpdatedMessage> updateMessage(String messageText, String messageId, String popReceipt, Duration visibilityTimeout) { return updateMessageWithResponse(messageText, messageId, popReceipt, visibilityTimeout).flatMap(FluxUtil::toMono); } /** * Updates the specific message in the queue with a new message and resets the visibility timeout. * * <p><strong>Code Samples</strong></p> * * <p>Dequeue the first message and update it to "Hello again, Azure" and hide it for 5 seconds</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.updateMessageWithResponse} * * <p>For more information, see the * <a href="https: * * @param messageText Updated value for the message * @param messageId Id of the message to update * @param popReceipt Unique identifier that must match for the message to be updated * @param visibilityTimeout The timeout period for how long the message is invisible in the queue in seconds. The * timeout period must be between 1 second and 7 days. * @return A {@link UpdatedMessage} that contains the new {@link UpdatedMessage * with the message, additionally contains the updated metadata about the message. * @throws StorageErrorException If the queue or messageId don't exist, the popReceipt doesn't match on the message, * or the {@code visibilityTimeout} is outside the allowed bounds */ public Mono<Response<UpdatedMessage>> updateMessageWithResponse(String messageText, String messageId, String popReceipt, Duration visibilityTimeout) { return withContext(context -> updateMessageWithResponse(messageText, messageId, popReceipt, visibilityTimeout, context)); } Mono<Response<UpdatedMessage>> updateMessageWithResponse(String messageText, String messageId, String popReceipt, Duration visibilityTimeout, Context context) { QueueMessage message = new QueueMessage().messageText(messageText); return client.messageIds().updateWithRestResponseAsync(queueName, messageId, message, popReceipt, (int) visibilityTimeout.getSeconds(), context) .map(this::getUpdatedMessageResponse); } /** * Deletes the specified message in the queue * * <p><strong>Code Samples</strong></p> * * <p>Delete the first message</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.deleteMessage} * * <p>For more information, see the * <a href="https: * * @param messageId Id of the message to deleted * @param popReceipt Unique identifier that must match for the message to be deleted * @return An empty response * @throws StorageErrorException If the queue or messageId don't exist or the popReceipt doesn't match on the message */ public Mono<Void> deleteMessage(String messageId, String popReceipt) { return deleteMessageWithResponse(messageId, popReceipt).flatMap(FluxUtil::toMono); } /** * Deletes the specified message in the queue * * <p><strong>Code Samples</strong></p> * * <p>Delete the first message</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.deleteMessageWithResponse} * * <p>For more information, see the * <a href="https: * * @param messageId Id of the message to deleted * @param popReceipt Unique identifier that must match for the message to be deleted * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue or messageId don't exist or the popReceipt doesn't match on the message */ public Mono<VoidResponse> deleteMessageWithResponse(String messageId, String popReceipt) { return withContext(context -> deleteMessageWithResponse(messageId, popReceipt, context)); } Mono<VoidResponse> deleteMessageWithResponse(String messageId, String popReceipt, Context context) { return client.messageIds().deleteWithRestResponseAsync(queueName, messageId, popReceipt, context) .map(VoidResponse::new); } /* * Maps the HTTP headers returned from the service to the expected response type * @param response Service response * @return Mapped response */ private Response<QueueProperties> getQueuePropertiesResponse(QueuesGetPropertiesResponse response) { QueueGetPropertiesHeaders propertiesHeaders = response.deserializedHeaders(); QueueProperties properties = new QueueProperties(propertiesHeaders.metadata(), propertiesHeaders.approximateMessagesCount()); return new SimpleResponse<>(response, properties); } /* * Maps the HTTP headers returned from the service to the expected response type * @param response Service response * @return Mapped response */ private Response<UpdatedMessage> getUpdatedMessageResponse(MessageIdsUpdateResponse response) { MessageIdUpdateHeaders headers = response.deserializedHeaders(); UpdatedMessage updatedMessage = new UpdatedMessage(headers.popReceipt(), headers.timeNextVisible()); return new SimpleResponse<>(response, updatedMessage); } }
Looks like the PR build failed due to spotbugs. So, the fix is working!
private void checkDigestLength(byte[] digest) { if (digest.length != getDigestLength()) { throw new IllegalArgumentException("Invalid digest length."); } }
if (digest.length != getDigestLength()) {
private void checkDigestLength(byte[] digest) { if (digest.length != getDigestLength()) { throw new IllegalArgumentException("Invalid digest length."); } }
class EcdsaSignatureTransform implements ISignatureTransform { private static final String ALGORITHM = "NONEwithECDSA"; private final KeyPair keyPair; private final Provider provider; private final Ecdsa algorithm; EcdsaSignatureTransform(KeyPair keyPair, Provider provider, Ecdsa algorithm) { this.keyPair = keyPair; this.provider = provider; this.algorithm = algorithm; } @Override public byte[] sign(byte[] digest) throws GeneralSecurityException { checkDigestLength(digest); Signature signature = Signature.getInstance(ALGORITHM, provider); signature.initSign(keyPair.getPrivate()); signature.update(digest); return SignatureEncoding.fromAsn1Der(signature.sign(), algorithm); } @Override public boolean verify(byte[] digest, byte[] signature) throws GeneralSecurityException { Signature verify = Signature.getInstance(ALGORITHM, provider); checkDigestLength(digest); signature = SignatureEncoding.toAsn1Der(signature, algorithm); verify.initVerify(keyPair.getPublic()); verify.update(digest); return verify.verify(signature); } }
class EcdsaSignatureTransform implements ISignatureTransform { private static final String ALGORITHM = "NONEwithECDSA"; private final KeyPair keyPair; private final Provider provider; private final Ecdsa algorithm; EcdsaSignatureTransform(KeyPair keyPair, Provider provider, Ecdsa algorithm) { this.keyPair = keyPair; this.provider = provider; this.algorithm = algorithm; } @Override public byte[] sign(byte[] digest) throws GeneralSecurityException { checkDigestLength(digest); Signature signature = Signature.getInstance(ALGORITHM, provider); signature.initSign(keyPair.getPrivate()); signature.update(digest); return SignatureEncoding.fromAsn1Der(signature.sign(), algorithm); } @Override public boolean verify(byte[] digest, byte[] signature) throws GeneralSecurityException { Signature verify = Signature.getInstance(ALGORITHM, provider); checkDigestLength(digest); signature = SignatureEncoding.toAsn1Der(signature, algorithm); verify.initVerify(keyPair.getPublic()); verify.update(digest); return verify.verify(signature); } }
Yeah, I agree, this is a good idea to enable this in PR builds. It would be good to catch false positives at this stage itself and either refactor to fix or suppress them, which ever is the best option.
private void checkDigestLength(byte[] digest) { if (digest.length != getDigestLength()) { throw new IllegalArgumentException("Invalid digest length."); } }
if (digest.length != getDigestLength()) {
private void checkDigestLength(byte[] digest) { if (digest.length != getDigestLength()) { throw new IllegalArgumentException("Invalid digest length."); } }
class EcdsaSignatureTransform implements ISignatureTransform { private static final String ALGORITHM = "NONEwithECDSA"; private final KeyPair keyPair; private final Provider provider; private final Ecdsa algorithm; EcdsaSignatureTransform(KeyPair keyPair, Provider provider, Ecdsa algorithm) { this.keyPair = keyPair; this.provider = provider; this.algorithm = algorithm; } @Override public byte[] sign(byte[] digest) throws GeneralSecurityException { checkDigestLength(digest); Signature signature = Signature.getInstance(ALGORITHM, provider); signature.initSign(keyPair.getPrivate()); signature.update(digest); return SignatureEncoding.fromAsn1Der(signature.sign(), algorithm); } @Override public boolean verify(byte[] digest, byte[] signature) throws GeneralSecurityException { Signature verify = Signature.getInstance(ALGORITHM, provider); checkDigestLength(digest); signature = SignatureEncoding.toAsn1Der(signature, algorithm); verify.initVerify(keyPair.getPublic()); verify.update(digest); return verify.verify(signature); } }
class EcdsaSignatureTransform implements ISignatureTransform { private static final String ALGORITHM = "NONEwithECDSA"; private final KeyPair keyPair; private final Provider provider; private final Ecdsa algorithm; EcdsaSignatureTransform(KeyPair keyPair, Provider provider, Ecdsa algorithm) { this.keyPair = keyPair; this.provider = provider; this.algorithm = algorithm; } @Override public byte[] sign(byte[] digest) throws GeneralSecurityException { checkDigestLength(digest); Signature signature = Signature.getInstance(ALGORITHM, provider); signature.initSign(keyPair.getPrivate()); signature.update(digest); return SignatureEncoding.fromAsn1Der(signature.sign(), algorithm); } @Override public boolean verify(byte[] digest, byte[] signature) throws GeneralSecurityException { Signature verify = Signature.getInstance(ALGORITHM, provider); checkDigestLength(digest); signature = SignatureEncoding.toAsn1Der(signature, algorithm); verify.initVerify(keyPair.getPublic()); verify.update(digest); return verify.verify(signature); } }
Not quite understand why we make the API protected. If there is no need to have context in async one. Can we make it private?
Mono<VoidResponse> createWithResponse(Map<String, String> metadata, Context context) { return client.queues().createWithRestResponseAsync(queueName, null, metadata, null, context) .map(VoidResponse::new); }
}
Mono<VoidResponse> createWithResponse(Map<String, String> metadata, Context context) { return client.queues().createWithRestResponseAsync(queueName, null, metadata, null, context) .map(VoidResponse::new); }
class QueueAsyncClient { private static final ClientLogger LOGGER = new ClientLogger(QueueAsyncClient.class); private final AzureQueueStorageImpl client; private final String queueName; /** * Creates a QueueAsyncClient that sends requests to the storage queue service at {@code AzureQueueStorageImpl * Each service call goes through the {@link HttpPipeline pipeline} in the {@code AzureQueueStorageImpl client}. * * @param client Client that interacts with the service interfaces * @param queueName Name of the queue */ QueueAsyncClient(AzureQueueStorageImpl client, String queueName) { this.queueName = queueName; this.client = new AzureQueueStorageBuilder().pipeline(client.getHttpPipeline()) .url(client.getUrl()) .version(client.getVersion()) .build(); } /** * Creates a QueueAsyncClient that sends requests to the storage queue service at {@code endpoint}. * Each service call goes through the {@code httpPipeline}. * * @param endpoint URL for the Storage Queue service * @param httpPipeline HttpPipeline that the HTTP requests and response flow through * @param queueName Name of the queue */ QueueAsyncClient(URL endpoint, HttpPipeline httpPipeline, String queueName) { this.queueName = queueName; this.client = new AzureQueueStorageBuilder().pipeline(httpPipeline) .url(endpoint.toString()) .build(); } /** * @return the URL of the storage queue * @throws RuntimeException If the queue is using a malformed URL. */ public URL getQueueUrl() { try { return new URL(client.getUrl()); } catch (MalformedURLException ex) { LOGGER.error("Queue URL is malformed"); throw new RuntimeException("Queue URL is malformed"); } } /** * Creates a new queue. * * <p><strong>Code Samples</strong></p> * * <p>Create a queue</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.create} * * <p>For more information, see the * <a href="https: * * @return An empty response * @throws StorageErrorException If a queue with the same name already exists in the queue service. */ public Mono<Void> create() { return createWithResponse(null).flatMap(FluxUtil::toMono); } /** * Creates a new queue. * * <p><strong>Code Samples</strong></p> * * <p>Create a queue with metadata "queue:metadataMap"</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.createWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata Metadata to associate with the queue * @return A response that only contains headers and response status code * @throws StorageErrorException If a queue with the same name and different metadata already exists in the queue service. */ public Mono<VoidResponse> createWithResponse(Map<String, String> metadata) { return withContext(context -> createWithResponse(metadata, context)); } /** * Permanently deletes the queue. * * <p><strong>Code Samples</strong></p> * * <p>Delete a queue</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.delete} * * <p>For more information, see the * <a href="https: * * @return An empty response * @throws StorageErrorException If the queue doesn't exist */ public Mono<Void> delete() { return deleteWithResponse().flatMap(FluxUtil::toMono); } /** * Permanently deletes the queue. * * <p><strong>Code Samples</strong></p> * * <p>Delete a queue</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.deleteWithResponse} * * <p>For more information, see the * <a href="https: * * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue doesn't exist */ public Mono<VoidResponse> deleteWithResponse() { return withContext(context -> deleteWithResponse(context)); } Mono<VoidResponse> deleteWithResponse(Context context) { return client.queues().deleteWithRestResponseAsync(queueName, context) .map(VoidResponse::new); } /** * Retrieves metadata and approximate message count of the queue. * * <p><strong>Code Samples</strong></p> * * <p>Get the properties of the queue</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.getProperties} * * <p>For more information, see the * <a href="https: * * @return A response containing a {@link QueueProperties} value which contains the metadata and approximate * messages count of the queue. * @throws StorageErrorException If the queue doesn't exist */ public Mono<QueueProperties> getProperties() { return getPropertiesWithResponse().flatMap(FluxUtil::toMono); } /** * Retrieves metadata and approximate message count of the queue. * * <p><strong>Code Samples</strong></p> * * <p>Get the properties of the queue</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.getPropertiesWithResponse} * * <p>For more information, see the * <a href="https: * * @return A response containing a {@link QueueProperties} value which contains the metadata and approximate * messages count of the queue. * @throws StorageErrorException If the queue doesn't exist */ public Mono<Response<QueueProperties>> getPropertiesWithResponse() { return withContext(context -> getPropertiesWithResponse(context)); } Mono<Response<QueueProperties>> getPropertiesWithResponse(Context context) { return client.queues().getPropertiesWithRestResponseAsync(queueName, Context.NONE) .map(this::getQueuePropertiesResponse); } /** * Sets the metadata of the queue. * * Passing in a {@code null} value for metadata will clear the metadata associated with the queue. * * <p><strong>Code Samples</strong></p> * * <p>Set the queue's metadata to "queue:metadataMap"</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.setMetadata * * <p>Clear the queue's metadata</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.clearMetadata * * <p>For more information, see the * <a href="https: * * @param metadata Metadata to set on the queue * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue doesn't exist */ public Mono<Void> setMetadata(Map<String, String> metadata) { return setMetadataWithResponse(metadata).flatMap(FluxUtil::toMono); } /** * Sets the metadata of the queue. * * Passing in a {@code null} value for metadata will clear the metadata associated with the queue. * * <p><strong>Code Samples</strong></p> * * <p>Set the queue's metadata to "queue:metadataMap"</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.setMetadataWithResponse * * <p>Clear the queue's metadata</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.clearMetadataWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata Metadata to set on the queue * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue doesn't exist */ public Mono<VoidResponse> setMetadataWithResponse(Map<String, String> metadata) { return withContext(context -> setMetadataWithResponse(metadata, context)); } Mono<VoidResponse> setMetadataWithResponse(Map<String, String> metadata, Context context) { return client.queues().setMetadataWithRestResponseAsync(queueName, null, metadata, null, context) .map(VoidResponse::new); } /** * Retrieves stored access policies specified on the queue. * * <p><strong>Code Samples</strong></p> * * <p>List the stored access policies</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.getAccessPolicy} * * <p>For more information, see the * <a href="https: * * @return The stored access policies specified on the queue. * @throws StorageErrorException If the queue doesn't exist */ public Flux<SignedIdentifier> getAccessPolicy() { return client.queues().getAccessPolicyWithRestResponseAsync(queueName, Context.NONE) .flatMapMany(response -> Flux.fromIterable(response.value())); } /** * Sets stored access policies on the queue. * * <p><strong>Code Samples</strong></p> * * <p>Set a read only stored access policy</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.setAccessPolicy} * * <p>For more information, see the * <a href="https: * * @param permissions Access policies to set on the queue * @return An empty response * @throws StorageErrorException If the queue doesn't exist, a stored access policy doesn't have all fields filled out, * or the queue will have more than five policies. */ public Mono<Void> setAccessPolicy(List<SignedIdentifier> permissions) { return setAccessPolicyWithResponse(permissions).flatMap(FluxUtil::toMono); } /** * Sets stored access policies on the queue. * * <p><strong>Code Samples</strong></p> * * <p>Set a read only stored access policy</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.setAccessPolicyWithResponse} * * <p>For more information, see the * <a href="https: * * @param permissions Access policies to set on the queue * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue doesn't exist, a stored access policy doesn't have all fields filled out, * or the queue will have more than five policies. */ public Mono<VoidResponse> setAccessPolicyWithResponse(List<SignedIdentifier> permissions) { return withContext(context -> setAccessPolicyWithResponse(permissions, context)); } Mono<VoidResponse> setAccessPolicyWithResponse(List<SignedIdentifier> permissions, Context context) { return client.queues().setAccessPolicyWithRestResponseAsync(queueName, permissions, null, null, context) .map(VoidResponse::new); } /** * Deletes all messages in the queue. * * <p><strong>Code Samples</strong></p> * * <p>Clear the messages</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.clearMessages} * * <p>For more information, see the * <a href="https: * * @return An empty response * @throws StorageErrorException If the queue doesn't exist */ public Mono<Void> clearMessages() { return clearMessagesWithResponse().flatMap(FluxUtil::toMono); } /** * Deletes all messages in the queue. * * <p><strong>Code Samples</strong></p> * * <p>Clear the messages</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.clearMessagesWithResponse} * * <p>For more information, see the * <a href="https: * * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue doesn't exist */ public Mono<VoidResponse> clearMessagesWithResponse() { return withContext(context -> clearMessagesWithResponse(context)); } Mono<VoidResponse> clearMessagesWithResponse(Context context) { return client.messages().clearWithRestResponseAsync(queueName, context) .map(VoidResponse::new); } /** * Enqueues a message that has a time-to-live of 7 days and is instantly visible. * * <p><strong>Code Samples</strong></p> * * <p>Enqueue a message of "Hello, Azure"</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.enqueueMessage * * <p>For more information, see the * <a href="https: * * @param messageText Message text * @return A {@link EnqueuedMessage} value that contains the {@link EnqueuedMessage * {@link EnqueuedMessage * about the enqueued message. * @throws StorageErrorException If the queue doesn't exist */ public Mono<EnqueuedMessage> enqueueMessage(String messageText) { return enqueueMessageWithResponse(messageText, null, null).flatMap(FluxUtil::toMono); } /** * Enqueues a message with a given time-to-live and a timeout period where the message is invisible in the queue. * * <p><strong>Code Samples</strong></p> * * <p>Add a message of "Hello, Azure" that has a timeout of 5 seconds</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.enqueueMessageWithResponse * * <p>Add a message of "Goodbye, Azure" that has a time to live of 5 seconds</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.enqueueMessageWithResponseLiveTime * * <p>For more information, see the * <a href="https: * * @param messageText Message text * @param visibilityTimeout Optional. The timeout period for how long the message is invisible in the queue in seconds. * If unset the value will default to 0 and the message will be instantly visible. The timeout must be between 0 * seconds and 7 days. * @param timeToLive Optional. How long the message will stay alive in the queue in seconds. If unset the value will * default to 7 days, if -1 is passed the message will not expire. The time to live must be -1 or any positive number. * @return A {@link EnqueuedMessage} value that contains the {@link EnqueuedMessage * {@link EnqueuedMessage * about the enqueued message. * @throws StorageErrorException If the queue doesn't exist or the {@code visibilityTimeout} or {@code timeToLive} * are outside of the allowed limits. */ public Mono<Response<EnqueuedMessage>> enqueueMessageWithResponse(String messageText, Duration visibilityTimeout, Duration timeToLive) { return withContext(context -> enqueueMessageWithResponse(messageText, visibilityTimeout, timeToLive, context)); } Mono<Response<EnqueuedMessage>> enqueueMessageWithResponse(String messageText, Duration visibilityTimeout, Duration timeToLive, Context context) { Integer visibilityTimeoutInSeconds = (visibilityTimeout == null) ? null : (int) visibilityTimeout.getSeconds(); Integer timeToLiveInSeconds = (timeToLive == null) ? null : (int) timeToLive.getSeconds(); QueueMessage message = new QueueMessage().messageText(messageText); return client.messages().enqueueWithRestResponseAsync(queueName, message, visibilityTimeoutInSeconds, timeToLiveInSeconds, null, null, context) .map(response -> new SimpleResponse<>(response, response.value().get(0))); } /** * Retrieves the first message in the queue and hides it from other operations for 30 seconds. * * <p><strong>Code Samples</strong></p> * * <p>Dequeue a message</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.dequeueMessages} * * <p>For more information, see the * <a href="https: * * @return The first {@link DequeuedMessage} in the queue, it contains * {@link DequeuedMessage * with the message, additionally it contains other metadata about the message. * @throws StorageErrorException If the queue doesn't exist */ public Flux<DequeuedMessage> dequeueMessages() { return dequeueMessages(1, Duration.ofSeconds(30)); } /** * Retrieves up to the maximum number of messages from the queue and hides them from other operations for 30 seconds. * * <p><strong>Code Samples</strong></p> * * <p>Dequeue up to 5 messages</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.dequeueMessages * * <p>For more information, see the * <a href="https: * * @param maxMessages Optional. Maximum number of messages to get, if there are less messages exist in the queue than requested * all the messages will be returned. If left empty only 1 message will be retrieved, the allowed range is 1 to 32 * messages. * @return Up to {@code maxMessages} {@link DequeuedMessage DequeuedMessages} from the queue. Each DequeuedMessage contains * {@link DequeuedMessage * with the message and other metadata about the message. * @throws StorageErrorException If the queue doesn't exist or {@code maxMessages} is outside of the allowed bounds */ public Flux<DequeuedMessage> dequeueMessages(Integer maxMessages) { return dequeueMessages(maxMessages, Duration.ofSeconds(30)); } /** * Retrieves up to the maximum number of messages from the queue and hides them from other operations for the * timeout period. * * <p><strong>Code Samples</strong></p> * * <p>Dequeue up to 5 messages and give them a 60 second timeout period</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.dequeueMessages * * <p>For more information, see the * <a href="https: * * @param maxMessages Optional. Maximum number of messages to get, if there are less messages exist in the queue than requested * all the messages will be returned. If left empty only 1 message will be retrieved, the allowed range is 1 to 32 * messages. * @param visibilityTimeout Optional. The timeout period for how long the message is invisible in the queue in seconds. * If left empty the dequeued messages will be invisible for 30 seconds. The timeout must be between 1 second and 7 days. * @return Up to {@code maxMessages} {@link DequeuedMessage DequeuedMessages} from the queue. Each DeqeuedMessage contains * {@link DequeuedMessage * with the message and other metadata about the message. * @throws StorageErrorException If the queue doesn't exist or {@code maxMessages} or {@code visibilityTimeout} is * outside of the allowed bounds */ public Flux<DequeuedMessage> dequeueMessages(Integer maxMessages, Duration visibilityTimeout) { Integer visibilityTimeoutInSeconds = (visibilityTimeout == null) ? null : (int) visibilityTimeout.getSeconds(); return client.messages().dequeueWithRestResponseAsync(queueName, maxMessages, visibilityTimeoutInSeconds, null, null, Context.NONE) .flatMapMany(response -> Flux.fromIterable(response.value())); } /** * Peeks the first message in the queue. * * Peeked messages don't contain the necessary information needed to interact with the message nor will it hide * messages from other operations on the queue. * * <p><strong>Code Samples</strong></p> * * <p>Peek the first message</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.peekMessages} * * <p>For more information, see the * <a href="https: * * @return A {@link PeekedMessage} that contains metadata about the message. */ public Flux<PeekedMessage> peekMessages() { return peekMessages(null); } /** * Peek messages from the front of the queue up to the maximum number of messages. * * Peeked messages don't contain the necessary information needed to interact with the message nor will it hide * messages from other operations on the queue. * * <p><strong>Code Samples</strong></p> * * <p>Peek up to the first five messages</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.peekMessages * * <p>For more information, see the * <a href="https: * * @param maxMessages Optional. Maximum number of messages to peek, if there are less messages exist in the queue than requested * all the messages will be peeked. If left empty only 1 message will be peeked, the allowed range is 1 to 32 * messages. * @return Up to {@code maxMessages} {@link PeekedMessage PeekedMessages} from the queue. Each PeekedMessage contains * metadata about the message. * @throws StorageErrorException If the queue doesn't exist or {@code maxMessages} is outside of the allowed bounds */ public Flux<PeekedMessage> peekMessages(Integer maxMessages) { return client.messages().peekWithRestResponseAsync(queueName, maxMessages, null, null, Context.NONE) .flatMapMany(response -> Flux.fromIterable(response.value())); } /** * Updates the specific message in the queue with a new message and resets the visibility timeout. * * <p><strong>Code Samples</strong></p> * * <p>Dequeue the first message and update it to "Hello again, Azure" and hide it for 5 seconds</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.updateMessage} * * <p>For more information, see the * <a href="https: * * @param messageText Updated value for the message * @param messageId Id of the message to update * @param popReceipt Unique identifier that must match for the message to be updated * @param visibilityTimeout The timeout period for how long the message is invisible in the queue in seconds. The * timeout period must be between 1 second and 7 days. * @return A {@link UpdatedMessage} that contains the new {@link UpdatedMessage * with the message, additionally contains the updated metadata about the message. * @throws StorageErrorException If the queue or messageId don't exist, the popReceipt doesn't match on the message, * or the {@code visibilityTimeout} is outside the allowed bounds */ public Mono<UpdatedMessage> updateMessage(String messageText, String messageId, String popReceipt, Duration visibilityTimeout) { return updateMessageWithResponse(messageText, messageId, popReceipt, visibilityTimeout).flatMap(FluxUtil::toMono); } /** * Updates the specific message in the queue with a new message and resets the visibility timeout. * * <p><strong>Code Samples</strong></p> * * <p>Dequeue the first message and update it to "Hello again, Azure" and hide it for 5 seconds</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.updateMessageWithResponse} * * <p>For more information, see the * <a href="https: * * @param messageText Updated value for the message * @param messageId Id of the message to update * @param popReceipt Unique identifier that must match for the message to be updated * @param visibilityTimeout The timeout period for how long the message is invisible in the queue in seconds. The * timeout period must be between 1 second and 7 days. * @return A {@link UpdatedMessage} that contains the new {@link UpdatedMessage * with the message, additionally contains the updated metadata about the message. * @throws StorageErrorException If the queue or messageId don't exist, the popReceipt doesn't match on the message, * or the {@code visibilityTimeout} is outside the allowed bounds */ public Mono<Response<UpdatedMessage>> updateMessageWithResponse(String messageText, String messageId, String popReceipt, Duration visibilityTimeout) { return withContext(context -> updateMessageWithResponse(messageText, messageId, popReceipt, visibilityTimeout, context)); } Mono<Response<UpdatedMessage>> updateMessageWithResponse(String messageText, String messageId, String popReceipt, Duration visibilityTimeout, Context context) { QueueMessage message = new QueueMessage().messageText(messageText); return client.messageIds().updateWithRestResponseAsync(queueName, messageId, message, popReceipt, (int) visibilityTimeout.getSeconds(), context) .map(this::getUpdatedMessageResponse); } /** * Deletes the specified message in the queue * * <p><strong>Code Samples</strong></p> * * <p>Delete the first message</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.deleteMessage} * * <p>For more information, see the * <a href="https: * * @param messageId Id of the message to deleted * @param popReceipt Unique identifier that must match for the message to be deleted * @return An empty response * @throws StorageErrorException If the queue or messageId don't exist or the popReceipt doesn't match on the message */ public Mono<Void> deleteMessage(String messageId, String popReceipt) { return deleteMessageWithResponse(messageId, popReceipt).flatMap(FluxUtil::toMono); } /** * Deletes the specified message in the queue * * <p><strong>Code Samples</strong></p> * * <p>Delete the first message</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.deleteMessageWithResponse} * * <p>For more information, see the * <a href="https: * * @param messageId Id of the message to deleted * @param popReceipt Unique identifier that must match for the message to be deleted * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue or messageId don't exist or the popReceipt doesn't match on the message */ public Mono<VoidResponse> deleteMessageWithResponse(String messageId, String popReceipt) { return withContext(context -> deleteMessageWithResponse(messageId, popReceipt, context)); } Mono<VoidResponse> deleteMessageWithResponse(String messageId, String popReceipt, Context context) { return client.messageIds().deleteWithRestResponseAsync(queueName, messageId, popReceipt, context) .map(VoidResponse::new); } /* * Maps the HTTP headers returned from the service to the expected response type * @param response Service response * @return Mapped response */ private Response<QueueProperties> getQueuePropertiesResponse(QueuesGetPropertiesResponse response) { QueueGetPropertiesHeaders propertiesHeaders = response.deserializedHeaders(); QueueProperties properties = new QueueProperties(propertiesHeaders.metadata(), propertiesHeaders.approximateMessagesCount()); return new SimpleResponse<>(response, properties); } /* * Maps the HTTP headers returned from the service to the expected response type * @param response Service response * @return Mapped response */ private Response<UpdatedMessage> getUpdatedMessageResponse(MessageIdsUpdateResponse response) { MessageIdUpdateHeaders headers = response.deserializedHeaders(); UpdatedMessage updatedMessage = new UpdatedMessage(headers.popReceipt(), headers.timeNextVisible()); return new SimpleResponse<>(response, updatedMessage); } }
class QueueAsyncClient { private static final ClientLogger LOGGER = new ClientLogger(QueueAsyncClient.class); private final AzureQueueStorageImpl client; private final String queueName; /** * Creates a QueueAsyncClient that sends requests to the storage queue service at {@code AzureQueueStorageImpl * Each service call goes through the {@link HttpPipeline pipeline} in the {@code AzureQueueStorageImpl client}. * * @param client Client that interacts with the service interfaces * @param queueName Name of the queue */ QueueAsyncClient(AzureQueueStorageImpl client, String queueName) { Objects.requireNonNull(queueName); this.queueName = queueName; this.client = new AzureQueueStorageBuilder().pipeline(client.getHttpPipeline()) .url(client.getUrl()) .version(client.getVersion()) .build(); } /** * Creates a QueueAsyncClient that sends requests to the storage queue service at {@code endpoint}. * Each service call goes through the {@code httpPipeline}. * * @param endpoint URL for the Storage Queue service * @param httpPipeline HttpPipeline that the HTTP requests and response flow through * @param queueName Name of the queue */ QueueAsyncClient(URL endpoint, HttpPipeline httpPipeline, String queueName) { Objects.requireNonNull(queueName); this.queueName = queueName; this.client = new AzureQueueStorageBuilder().pipeline(httpPipeline) .url(endpoint.toString()) .build(); } /** * @return the URL of the storage queue * @throws RuntimeException If the queue is using a malformed URL. */ public URL getQueueUrl() { try { return new URL(client.getUrl()); } catch (MalformedURLException ex) { LOGGER.error("Queue URL is malformed"); throw new RuntimeException("Queue URL is malformed"); } } /** * Creates a new queue. * * <p><strong>Code Samples</strong></p> * * <p>Create a queue</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.create} * * <p>For more information, see the * <a href="https: * * @return An empty response * @throws StorageErrorException If a queue with the same name already exists in the queue service. */ public Mono<Void> create() { return createWithResponse(null).flatMap(FluxUtil::toMono); } /** * Creates a new queue. * * <p><strong>Code Samples</strong></p> * * <p>Create a queue with metadata "queue:metadataMap"</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.createWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata Metadata to associate with the queue * @return A response that only contains headers and response status code * @throws StorageErrorException If a queue with the same name and different metadata already exists in the queue service. */ public Mono<VoidResponse> createWithResponse(Map<String, String> metadata) { return withContext(context -> createWithResponse(metadata, context)); } /** * Permanently deletes the queue. * * <p><strong>Code Samples</strong></p> * * <p>Delete a queue</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.delete} * * <p>For more information, see the * <a href="https: * * @return An empty response * @throws StorageErrorException If the queue doesn't exist */ public Mono<Void> delete() { return deleteWithResponse().flatMap(FluxUtil::toMono); } /** * Permanently deletes the queue. * * <p><strong>Code Samples</strong></p> * * <p>Delete a queue</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.deleteWithResponse} * * <p>For more information, see the * <a href="https: * * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue doesn't exist */ public Mono<VoidResponse> deleteWithResponse() { return withContext(context -> deleteWithResponse(context)); } Mono<VoidResponse> deleteWithResponse(Context context) { return client.queues().deleteWithRestResponseAsync(queueName, context) .map(VoidResponse::new); } /** * Retrieves metadata and approximate message count of the queue. * * <p><strong>Code Samples</strong></p> * * <p>Get the properties of the queue</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.getProperties} * * <p>For more information, see the * <a href="https: * * @return A response containing a {@link QueueProperties} value which contains the metadata and approximate * messages count of the queue. * @throws StorageErrorException If the queue doesn't exist */ public Mono<QueueProperties> getProperties() { return getPropertiesWithResponse().flatMap(FluxUtil::toMono); } /** * Retrieves metadata and approximate message count of the queue. * * <p><strong>Code Samples</strong></p> * * <p>Get the properties of the queue</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.getPropertiesWithResponse} * * <p>For more information, see the * <a href="https: * * @return A response containing a {@link QueueProperties} value which contains the metadata and approximate * messages count of the queue. * @throws StorageErrorException If the queue doesn't exist */ public Mono<Response<QueueProperties>> getPropertiesWithResponse() { return withContext(context -> getPropertiesWithResponse(context)); } Mono<Response<QueueProperties>> getPropertiesWithResponse(Context context) { return client.queues().getPropertiesWithRestResponseAsync(queueName, Context.NONE) .map(this::getQueuePropertiesResponse); } /** * Sets the metadata of the queue. * * Passing in a {@code null} value for metadata will clear the metadata associated with the queue. * * <p><strong>Code Samples</strong></p> * * <p>Set the queue's metadata to "queue:metadataMap"</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.setMetadata * * <p>Clear the queue's metadata</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.clearMetadata * * <p>For more information, see the * <a href="https: * * @param metadata Metadata to set on the queue * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue doesn't exist */ public Mono<Void> setMetadata(Map<String, String> metadata) { return setMetadataWithResponse(metadata).flatMap(FluxUtil::toMono); } /** * Sets the metadata of the queue. * * Passing in a {@code null} value for metadata will clear the metadata associated with the queue. * * <p><strong>Code Samples</strong></p> * * <p>Set the queue's metadata to "queue:metadataMap"</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.setMetadataWithResponse * * <p>Clear the queue's metadata</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.clearMetadataWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata Metadata to set on the queue * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue doesn't exist */ public Mono<VoidResponse> setMetadataWithResponse(Map<String, String> metadata) { return withContext(context -> setMetadataWithResponse(metadata, context)); } Mono<VoidResponse> setMetadataWithResponse(Map<String, String> metadata, Context context) { return client.queues().setMetadataWithRestResponseAsync(queueName, null, metadata, null, context) .map(VoidResponse::new); } /** * Retrieves stored access policies specified on the queue. * * <p><strong>Code Samples</strong></p> * * <p>List the stored access policies</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.getAccessPolicy} * * <p>For more information, see the * <a href="https: * * @return The stored access policies specified on the queue. * @throws StorageErrorException If the queue doesn't exist */ public Flux<SignedIdentifier> getAccessPolicy() { return client.queues().getAccessPolicyWithRestResponseAsync(queueName, Context.NONE) .flatMapMany(response -> Flux.fromIterable(response.value())); } /** * Sets stored access policies on the queue. * * <p><strong>Code Samples</strong></p> * * <p>Set a read only stored access policy</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.setAccessPolicy} * * <p>For more information, see the * <a href="https: * * @param permissions Access policies to set on the queue * @return An empty response * @throws StorageErrorException If the queue doesn't exist, a stored access policy doesn't have all fields filled out, * or the queue will have more than five policies. */ public Mono<Void> setAccessPolicy(List<SignedIdentifier> permissions) { return setAccessPolicyWithResponse(permissions).flatMap(FluxUtil::toMono); } /** * Sets stored access policies on the queue. * * <p><strong>Code Samples</strong></p> * * <p>Set a read only stored access policy</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.setAccessPolicyWithResponse} * * <p>For more information, see the * <a href="https: * * @param permissions Access policies to set on the queue * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue doesn't exist, a stored access policy doesn't have all fields filled out, * or the queue will have more than five policies. */ public Mono<VoidResponse> setAccessPolicyWithResponse(List<SignedIdentifier> permissions) { return withContext(context -> setAccessPolicyWithResponse(permissions, context)); } Mono<VoidResponse> setAccessPolicyWithResponse(List<SignedIdentifier> permissions, Context context) { return client.queues().setAccessPolicyWithRestResponseAsync(queueName, permissions, null, null, context) .map(VoidResponse::new); } /** * Deletes all messages in the queue. * * <p><strong>Code Samples</strong></p> * * <p>Clear the messages</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.clearMessages} * * <p>For more information, see the * <a href="https: * * @return An empty response * @throws StorageErrorException If the queue doesn't exist */ public Mono<Void> clearMessages() { return clearMessagesWithResponse().flatMap(FluxUtil::toMono); } /** * Deletes all messages in the queue. * * <p><strong>Code Samples</strong></p> * * <p>Clear the messages</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.clearMessagesWithResponse} * * <p>For more information, see the * <a href="https: * * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue doesn't exist */ public Mono<VoidResponse> clearMessagesWithResponse() { return withContext(context -> clearMessagesWithResponse(context)); } Mono<VoidResponse> clearMessagesWithResponse(Context context) { return client.messages().clearWithRestResponseAsync(queueName, context) .map(VoidResponse::new); } /** * Enqueues a message that has a time-to-live of 7 days and is instantly visible. * * <p><strong>Code Samples</strong></p> * * <p>Enqueue a message of "Hello, Azure"</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.enqueueMessage * * <p>For more information, see the * <a href="https: * * @param messageText Message text * @return A {@link EnqueuedMessage} value that contains the {@link EnqueuedMessage * {@link EnqueuedMessage * about the enqueued message. * @throws StorageErrorException If the queue doesn't exist */ public Mono<EnqueuedMessage> enqueueMessage(String messageText) { return enqueueMessageWithResponse(messageText, null, null).flatMap(FluxUtil::toMono); } /** * Enqueues a message with a given time-to-live and a timeout period where the message is invisible in the queue. * * <p><strong>Code Samples</strong></p> * * <p>Add a message of "Hello, Azure" that has a timeout of 5 seconds</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.enqueueMessageWithResponse * * <p>Add a message of "Goodbye, Azure" that has a time to live of 5 seconds</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.enqueueMessageWithResponseLiveTime * * <p>For more information, see the * <a href="https: * * @param messageText Message text * @param visibilityTimeout Optional. The timeout period for how long the message is invisible in the queue in seconds. * If unset the value will default to 0 and the message will be instantly visible. The timeout must be between 0 * seconds and 7 days. * @param timeToLive Optional. How long the message will stay alive in the queue in seconds. If unset the value will * default to 7 days, if -1 is passed the message will not expire. The time to live must be -1 or any positive number. * @return A {@link EnqueuedMessage} value that contains the {@link EnqueuedMessage * {@link EnqueuedMessage * about the enqueued message. * @throws StorageErrorException If the queue doesn't exist or the {@code visibilityTimeout} or {@code timeToLive} * are outside of the allowed limits. */ public Mono<Response<EnqueuedMessage>> enqueueMessageWithResponse(String messageText, Duration visibilityTimeout, Duration timeToLive) { return withContext(context -> enqueueMessageWithResponse(messageText, visibilityTimeout, timeToLive, context)); } Mono<Response<EnqueuedMessage>> enqueueMessageWithResponse(String messageText, Duration visibilityTimeout, Duration timeToLive, Context context) { Integer visibilityTimeoutInSeconds = (visibilityTimeout == null) ? null : (int) visibilityTimeout.getSeconds(); Integer timeToLiveInSeconds = (timeToLive == null) ? null : (int) timeToLive.getSeconds(); QueueMessage message = new QueueMessage().messageText(messageText); return client.messages().enqueueWithRestResponseAsync(queueName, message, visibilityTimeoutInSeconds, timeToLiveInSeconds, null, null, context) .map(response -> new SimpleResponse<>(response, response.value().get(0))); } /** * Retrieves the first message in the queue and hides it from other operations for 30 seconds. * * <p><strong>Code Samples</strong></p> * * <p>Dequeue a message</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.dequeueMessages} * * <p>For more information, see the * <a href="https: * * @return The first {@link DequeuedMessage} in the queue, it contains * {@link DequeuedMessage * with the message, additionally it contains other metadata about the message. * @throws StorageErrorException If the queue doesn't exist */ public Flux<DequeuedMessage> dequeueMessages() { return dequeueMessages(1, Duration.ofSeconds(30)); } /** * Retrieves up to the maximum number of messages from the queue and hides them from other operations for 30 seconds. * * <p><strong>Code Samples</strong></p> * * <p>Dequeue up to 5 messages</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.dequeueMessages * * <p>For more information, see the * <a href="https: * * @param maxMessages Optional. Maximum number of messages to get, if there are less messages exist in the queue than requested * all the messages will be returned. If left empty only 1 message will be retrieved, the allowed range is 1 to 32 * messages. * @return Up to {@code maxMessages} {@link DequeuedMessage DequeuedMessages} from the queue. Each DequeuedMessage contains * {@link DequeuedMessage * with the message and other metadata about the message. * @throws StorageErrorException If the queue doesn't exist or {@code maxMessages} is outside of the allowed bounds */ public Flux<DequeuedMessage> dequeueMessages(Integer maxMessages) { return dequeueMessages(maxMessages, Duration.ofSeconds(30)); } /** * Retrieves up to the maximum number of messages from the queue and hides them from other operations for the * timeout period. * * <p><strong>Code Samples</strong></p> * * <p>Dequeue up to 5 messages and give them a 60 second timeout period</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.dequeueMessages * * <p>For more information, see the * <a href="https: * * @param maxMessages Optional. Maximum number of messages to get, if there are less messages exist in the queue than requested * all the messages will be returned. If left empty only 1 message will be retrieved, the allowed range is 1 to 32 * messages. * @param visibilityTimeout Optional. The timeout period for how long the message is invisible in the queue in seconds. * If left empty the dequeued messages will be invisible for 30 seconds. The timeout must be between 1 second and 7 days. * @return Up to {@code maxMessages} {@link DequeuedMessage DequeuedMessages} from the queue. Each DeqeuedMessage contains * {@link DequeuedMessage * with the message and other metadata about the message. * @throws StorageErrorException If the queue doesn't exist or {@code maxMessages} or {@code visibilityTimeout} is * outside of the allowed bounds */ public Flux<DequeuedMessage> dequeueMessages(Integer maxMessages, Duration visibilityTimeout) { Integer visibilityTimeoutInSeconds = (visibilityTimeout == null) ? null : (int) visibilityTimeout.getSeconds(); return client.messages().dequeueWithRestResponseAsync(queueName, maxMessages, visibilityTimeoutInSeconds, null, null, Context.NONE) .flatMapMany(response -> Flux.fromIterable(response.value())); } /** * Peeks the first message in the queue. * * Peeked messages don't contain the necessary information needed to interact with the message nor will it hide * messages from other operations on the queue. * * <p><strong>Code Samples</strong></p> * * <p>Peek the first message</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.peekMessages} * * <p>For more information, see the * <a href="https: * * @return A {@link PeekedMessage} that contains metadata about the message. */ public Flux<PeekedMessage> peekMessages() { return peekMessages(null); } /** * Peek messages from the front of the queue up to the maximum number of messages. * * Peeked messages don't contain the necessary information needed to interact with the message nor will it hide * messages from other operations on the queue. * * <p><strong>Code Samples</strong></p> * * <p>Peek up to the first five messages</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.peekMessages * * <p>For more information, see the * <a href="https: * * @param maxMessages Optional. Maximum number of messages to peek, if there are less messages exist in the queue than requested * all the messages will be peeked. If left empty only 1 message will be peeked, the allowed range is 1 to 32 * messages. * @return Up to {@code maxMessages} {@link PeekedMessage PeekedMessages} from the queue. Each PeekedMessage contains * metadata about the message. * @throws StorageErrorException If the queue doesn't exist or {@code maxMessages} is outside of the allowed bounds */ public Flux<PeekedMessage> peekMessages(Integer maxMessages) { return client.messages().peekWithRestResponseAsync(queueName, maxMessages, null, null, Context.NONE) .flatMapMany(response -> Flux.fromIterable(response.value())); } /** * Updates the specific message in the queue with a new message and resets the visibility timeout. * * <p><strong>Code Samples</strong></p> * * <p>Dequeue the first message and update it to "Hello again, Azure" and hide it for 5 seconds</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.updateMessage} * * <p>For more information, see the * <a href="https: * * @param messageText Updated value for the message * @param messageId Id of the message to update * @param popReceipt Unique identifier that must match for the message to be updated * @param visibilityTimeout The timeout period for how long the message is invisible in the queue in seconds. The * timeout period must be between 1 second and 7 days. * @return A {@link UpdatedMessage} that contains the new {@link UpdatedMessage * with the message, additionally contains the updated metadata about the message. * @throws StorageErrorException If the queue or messageId don't exist, the popReceipt doesn't match on the message, * or the {@code visibilityTimeout} is outside the allowed bounds */ public Mono<UpdatedMessage> updateMessage(String messageText, String messageId, String popReceipt, Duration visibilityTimeout) { return updateMessageWithResponse(messageText, messageId, popReceipt, visibilityTimeout).flatMap(FluxUtil::toMono); } /** * Updates the specific message in the queue with a new message and resets the visibility timeout. * * <p><strong>Code Samples</strong></p> * * <p>Dequeue the first message and update it to "Hello again, Azure" and hide it for 5 seconds</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.updateMessageWithResponse} * * <p>For more information, see the * <a href="https: * * @param messageText Updated value for the message * @param messageId Id of the message to update * @param popReceipt Unique identifier that must match for the message to be updated * @param visibilityTimeout The timeout period for how long the message is invisible in the queue in seconds. The * timeout period must be between 1 second and 7 days. * @return A {@link UpdatedMessage} that contains the new {@link UpdatedMessage * with the message, additionally contains the updated metadata about the message. * @throws StorageErrorException If the queue or messageId don't exist, the popReceipt doesn't match on the message, * or the {@code visibilityTimeout} is outside the allowed bounds */ public Mono<Response<UpdatedMessage>> updateMessageWithResponse(String messageText, String messageId, String popReceipt, Duration visibilityTimeout) { return withContext(context -> updateMessageWithResponse(messageText, messageId, popReceipt, visibilityTimeout, context)); } Mono<Response<UpdatedMessage>> updateMessageWithResponse(String messageText, String messageId, String popReceipt, Duration visibilityTimeout, Context context) { QueueMessage message = new QueueMessage().messageText(messageText); return client.messageIds().updateWithRestResponseAsync(queueName, messageId, message, popReceipt, (int) visibilityTimeout.getSeconds(), context) .map(this::getUpdatedMessageResponse); } /** * Deletes the specified message in the queue * * <p><strong>Code Samples</strong></p> * * <p>Delete the first message</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.deleteMessage} * * <p>For more information, see the * <a href="https: * * @param messageId Id of the message to deleted * @param popReceipt Unique identifier that must match for the message to be deleted * @return An empty response * @throws StorageErrorException If the queue or messageId don't exist or the popReceipt doesn't match on the message */ public Mono<Void> deleteMessage(String messageId, String popReceipt) { return deleteMessageWithResponse(messageId, popReceipt).flatMap(FluxUtil::toMono); } /** * Deletes the specified message in the queue * * <p><strong>Code Samples</strong></p> * * <p>Delete the first message</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.deleteMessageWithResponse} * * <p>For more information, see the * <a href="https: * * @param messageId Id of the message to deleted * @param popReceipt Unique identifier that must match for the message to be deleted * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue or messageId don't exist or the popReceipt doesn't match on the message */ public Mono<VoidResponse> deleteMessageWithResponse(String messageId, String popReceipt) { return withContext(context -> deleteMessageWithResponse(messageId, popReceipt, context)); } Mono<VoidResponse> deleteMessageWithResponse(String messageId, String popReceipt, Context context) { return client.messageIds().deleteWithRestResponseAsync(queueName, messageId, popReceipt, context) .map(VoidResponse::new); } /* * Maps the HTTP headers returned from the service to the expected response type * @param response Service response * @return Mapped response */ private Response<QueueProperties> getQueuePropertiesResponse(QueuesGetPropertiesResponse response) { QueueGetPropertiesHeaders propertiesHeaders = response.deserializedHeaders(); QueueProperties properties = new QueueProperties(propertiesHeaders.metadata(), propertiesHeaders.approximateMessagesCount()); return new SimpleResponse<>(response, properties); } /* * Maps the HTTP headers returned from the service to the expected response type * @param response Service response * @return Mapped response */ private Response<UpdatedMessage> getUpdatedMessageResponse(MessageIdsUpdateResponse response) { MessageIdUpdateHeaders headers = response.deserializedHeaders(); UpdatedMessage updatedMessage = new UpdatedMessage(headers.popReceipt(), headers.timeNextVisible()); return new SimpleResponse<>(response, updatedMessage); } }
This is being called from the sync client methods in which we need explicit context argument passing, so protected.
Mono<VoidResponse> createWithResponse(Map<String, String> metadata, Context context) { return client.queues().createWithRestResponseAsync(queueName, null, metadata, null, context) .map(VoidResponse::new); }
}
Mono<VoidResponse> createWithResponse(Map<String, String> metadata, Context context) { return client.queues().createWithRestResponseAsync(queueName, null, metadata, null, context) .map(VoidResponse::new); }
class QueueAsyncClient { private static final ClientLogger LOGGER = new ClientLogger(QueueAsyncClient.class); private final AzureQueueStorageImpl client; private final String queueName; /** * Creates a QueueAsyncClient that sends requests to the storage queue service at {@code AzureQueueStorageImpl * Each service call goes through the {@link HttpPipeline pipeline} in the {@code AzureQueueStorageImpl client}. * * @param client Client that interacts with the service interfaces * @param queueName Name of the queue */ QueueAsyncClient(AzureQueueStorageImpl client, String queueName) { this.queueName = queueName; this.client = new AzureQueueStorageBuilder().pipeline(client.getHttpPipeline()) .url(client.getUrl()) .version(client.getVersion()) .build(); } /** * Creates a QueueAsyncClient that sends requests to the storage queue service at {@code endpoint}. * Each service call goes through the {@code httpPipeline}. * * @param endpoint URL for the Storage Queue service * @param httpPipeline HttpPipeline that the HTTP requests and response flow through * @param queueName Name of the queue */ QueueAsyncClient(URL endpoint, HttpPipeline httpPipeline, String queueName) { this.queueName = queueName; this.client = new AzureQueueStorageBuilder().pipeline(httpPipeline) .url(endpoint.toString()) .build(); } /** * @return the URL of the storage queue * @throws RuntimeException If the queue is using a malformed URL. */ public URL getQueueUrl() { try { return new URL(client.getUrl()); } catch (MalformedURLException ex) { LOGGER.error("Queue URL is malformed"); throw new RuntimeException("Queue URL is malformed"); } } /** * Creates a new queue. * * <p><strong>Code Samples</strong></p> * * <p>Create a queue</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.create} * * <p>For more information, see the * <a href="https: * * @return An empty response * @throws StorageErrorException If a queue with the same name already exists in the queue service. */ public Mono<Void> create() { return createWithResponse(null).flatMap(FluxUtil::toMono); } /** * Creates a new queue. * * <p><strong>Code Samples</strong></p> * * <p>Create a queue with metadata "queue:metadataMap"</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.createWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata Metadata to associate with the queue * @return A response that only contains headers and response status code * @throws StorageErrorException If a queue with the same name and different metadata already exists in the queue service. */ public Mono<VoidResponse> createWithResponse(Map<String, String> metadata) { return withContext(context -> createWithResponse(metadata, context)); } /** * Permanently deletes the queue. * * <p><strong>Code Samples</strong></p> * * <p>Delete a queue</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.delete} * * <p>For more information, see the * <a href="https: * * @return An empty response * @throws StorageErrorException If the queue doesn't exist */ public Mono<Void> delete() { return deleteWithResponse().flatMap(FluxUtil::toMono); } /** * Permanently deletes the queue. * * <p><strong>Code Samples</strong></p> * * <p>Delete a queue</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.deleteWithResponse} * * <p>For more information, see the * <a href="https: * * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue doesn't exist */ public Mono<VoidResponse> deleteWithResponse() { return withContext(context -> deleteWithResponse(context)); } Mono<VoidResponse> deleteWithResponse(Context context) { return client.queues().deleteWithRestResponseAsync(queueName, context) .map(VoidResponse::new); } /** * Retrieves metadata and approximate message count of the queue. * * <p><strong>Code Samples</strong></p> * * <p>Get the properties of the queue</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.getProperties} * * <p>For more information, see the * <a href="https: * * @return A response containing a {@link QueueProperties} value which contains the metadata and approximate * messages count of the queue. * @throws StorageErrorException If the queue doesn't exist */ public Mono<QueueProperties> getProperties() { return getPropertiesWithResponse().flatMap(FluxUtil::toMono); } /** * Retrieves metadata and approximate message count of the queue. * * <p><strong>Code Samples</strong></p> * * <p>Get the properties of the queue</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.getPropertiesWithResponse} * * <p>For more information, see the * <a href="https: * * @return A response containing a {@link QueueProperties} value which contains the metadata and approximate * messages count of the queue. * @throws StorageErrorException If the queue doesn't exist */ public Mono<Response<QueueProperties>> getPropertiesWithResponse() { return withContext(context -> getPropertiesWithResponse(context)); } Mono<Response<QueueProperties>> getPropertiesWithResponse(Context context) { return client.queues().getPropertiesWithRestResponseAsync(queueName, Context.NONE) .map(this::getQueuePropertiesResponse); } /** * Sets the metadata of the queue. * * Passing in a {@code null} value for metadata will clear the metadata associated with the queue. * * <p><strong>Code Samples</strong></p> * * <p>Set the queue's metadata to "queue:metadataMap"</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.setMetadata * * <p>Clear the queue's metadata</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.clearMetadata * * <p>For more information, see the * <a href="https: * * @param metadata Metadata to set on the queue * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue doesn't exist */ public Mono<Void> setMetadata(Map<String, String> metadata) { return setMetadataWithResponse(metadata).flatMap(FluxUtil::toMono); } /** * Sets the metadata of the queue. * * Passing in a {@code null} value for metadata will clear the metadata associated with the queue. * * <p><strong>Code Samples</strong></p> * * <p>Set the queue's metadata to "queue:metadataMap"</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.setMetadataWithResponse * * <p>Clear the queue's metadata</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.clearMetadataWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata Metadata to set on the queue * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue doesn't exist */ public Mono<VoidResponse> setMetadataWithResponse(Map<String, String> metadata) { return withContext(context -> setMetadataWithResponse(metadata, context)); } Mono<VoidResponse> setMetadataWithResponse(Map<String, String> metadata, Context context) { return client.queues().setMetadataWithRestResponseAsync(queueName, null, metadata, null, context) .map(VoidResponse::new); } /** * Retrieves stored access policies specified on the queue. * * <p><strong>Code Samples</strong></p> * * <p>List the stored access policies</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.getAccessPolicy} * * <p>For more information, see the * <a href="https: * * @return The stored access policies specified on the queue. * @throws StorageErrorException If the queue doesn't exist */ public Flux<SignedIdentifier> getAccessPolicy() { return client.queues().getAccessPolicyWithRestResponseAsync(queueName, Context.NONE) .flatMapMany(response -> Flux.fromIterable(response.value())); } /** * Sets stored access policies on the queue. * * <p><strong>Code Samples</strong></p> * * <p>Set a read only stored access policy</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.setAccessPolicy} * * <p>For more information, see the * <a href="https: * * @param permissions Access policies to set on the queue * @return An empty response * @throws StorageErrorException If the queue doesn't exist, a stored access policy doesn't have all fields filled out, * or the queue will have more than five policies. */ public Mono<Void> setAccessPolicy(List<SignedIdentifier> permissions) { return setAccessPolicyWithResponse(permissions).flatMap(FluxUtil::toMono); } /** * Sets stored access policies on the queue. * * <p><strong>Code Samples</strong></p> * * <p>Set a read only stored access policy</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.setAccessPolicyWithResponse} * * <p>For more information, see the * <a href="https: * * @param permissions Access policies to set on the queue * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue doesn't exist, a stored access policy doesn't have all fields filled out, * or the queue will have more than five policies. */ public Mono<VoidResponse> setAccessPolicyWithResponse(List<SignedIdentifier> permissions) { return withContext(context -> setAccessPolicyWithResponse(permissions, context)); } Mono<VoidResponse> setAccessPolicyWithResponse(List<SignedIdentifier> permissions, Context context) { return client.queues().setAccessPolicyWithRestResponseAsync(queueName, permissions, null, null, context) .map(VoidResponse::new); } /** * Deletes all messages in the queue. * * <p><strong>Code Samples</strong></p> * * <p>Clear the messages</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.clearMessages} * * <p>For more information, see the * <a href="https: * * @return An empty response * @throws StorageErrorException If the queue doesn't exist */ public Mono<Void> clearMessages() { return clearMessagesWithResponse().flatMap(FluxUtil::toMono); } /** * Deletes all messages in the queue. * * <p><strong>Code Samples</strong></p> * * <p>Clear the messages</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.clearMessagesWithResponse} * * <p>For more information, see the * <a href="https: * * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue doesn't exist */ public Mono<VoidResponse> clearMessagesWithResponse() { return withContext(context -> clearMessagesWithResponse(context)); } Mono<VoidResponse> clearMessagesWithResponse(Context context) { return client.messages().clearWithRestResponseAsync(queueName, context) .map(VoidResponse::new); } /** * Enqueues a message that has a time-to-live of 7 days and is instantly visible. * * <p><strong>Code Samples</strong></p> * * <p>Enqueue a message of "Hello, Azure"</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.enqueueMessage * * <p>For more information, see the * <a href="https: * * @param messageText Message text * @return A {@link EnqueuedMessage} value that contains the {@link EnqueuedMessage * {@link EnqueuedMessage * about the enqueued message. * @throws StorageErrorException If the queue doesn't exist */ public Mono<EnqueuedMessage> enqueueMessage(String messageText) { return enqueueMessageWithResponse(messageText, null, null).flatMap(FluxUtil::toMono); } /** * Enqueues a message with a given time-to-live and a timeout period where the message is invisible in the queue. * * <p><strong>Code Samples</strong></p> * * <p>Add a message of "Hello, Azure" that has a timeout of 5 seconds</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.enqueueMessageWithResponse * * <p>Add a message of "Goodbye, Azure" that has a time to live of 5 seconds</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.enqueueMessageWithResponseLiveTime * * <p>For more information, see the * <a href="https: * * @param messageText Message text * @param visibilityTimeout Optional. The timeout period for how long the message is invisible in the queue in seconds. * If unset the value will default to 0 and the message will be instantly visible. The timeout must be between 0 * seconds and 7 days. * @param timeToLive Optional. How long the message will stay alive in the queue in seconds. If unset the value will * default to 7 days, if -1 is passed the message will not expire. The time to live must be -1 or any positive number. * @return A {@link EnqueuedMessage} value that contains the {@link EnqueuedMessage * {@link EnqueuedMessage * about the enqueued message. * @throws StorageErrorException If the queue doesn't exist or the {@code visibilityTimeout} or {@code timeToLive} * are outside of the allowed limits. */ public Mono<Response<EnqueuedMessage>> enqueueMessageWithResponse(String messageText, Duration visibilityTimeout, Duration timeToLive) { return withContext(context -> enqueueMessageWithResponse(messageText, visibilityTimeout, timeToLive, context)); } Mono<Response<EnqueuedMessage>> enqueueMessageWithResponse(String messageText, Duration visibilityTimeout, Duration timeToLive, Context context) { Integer visibilityTimeoutInSeconds = (visibilityTimeout == null) ? null : (int) visibilityTimeout.getSeconds(); Integer timeToLiveInSeconds = (timeToLive == null) ? null : (int) timeToLive.getSeconds(); QueueMessage message = new QueueMessage().messageText(messageText); return client.messages().enqueueWithRestResponseAsync(queueName, message, visibilityTimeoutInSeconds, timeToLiveInSeconds, null, null, context) .map(response -> new SimpleResponse<>(response, response.value().get(0))); } /** * Retrieves the first message in the queue and hides it from other operations for 30 seconds. * * <p><strong>Code Samples</strong></p> * * <p>Dequeue a message</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.dequeueMessages} * * <p>For more information, see the * <a href="https: * * @return The first {@link DequeuedMessage} in the queue, it contains * {@link DequeuedMessage * with the message, additionally it contains other metadata about the message. * @throws StorageErrorException If the queue doesn't exist */ public Flux<DequeuedMessage> dequeueMessages() { return dequeueMessages(1, Duration.ofSeconds(30)); } /** * Retrieves up to the maximum number of messages from the queue and hides them from other operations for 30 seconds. * * <p><strong>Code Samples</strong></p> * * <p>Dequeue up to 5 messages</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.dequeueMessages * * <p>For more information, see the * <a href="https: * * @param maxMessages Optional. Maximum number of messages to get, if there are less messages exist in the queue than requested * all the messages will be returned. If left empty only 1 message will be retrieved, the allowed range is 1 to 32 * messages. * @return Up to {@code maxMessages} {@link DequeuedMessage DequeuedMessages} from the queue. Each DequeuedMessage contains * {@link DequeuedMessage * with the message and other metadata about the message. * @throws StorageErrorException If the queue doesn't exist or {@code maxMessages} is outside of the allowed bounds */ public Flux<DequeuedMessage> dequeueMessages(Integer maxMessages) { return dequeueMessages(maxMessages, Duration.ofSeconds(30)); } /** * Retrieves up to the maximum number of messages from the queue and hides them from other operations for the * timeout period. * * <p><strong>Code Samples</strong></p> * * <p>Dequeue up to 5 messages and give them a 60 second timeout period</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.dequeueMessages * * <p>For more information, see the * <a href="https: * * @param maxMessages Optional. Maximum number of messages to get, if there are less messages exist in the queue than requested * all the messages will be returned. If left empty only 1 message will be retrieved, the allowed range is 1 to 32 * messages. * @param visibilityTimeout Optional. The timeout period for how long the message is invisible in the queue in seconds. * If left empty the dequeued messages will be invisible for 30 seconds. The timeout must be between 1 second and 7 days. * @return Up to {@code maxMessages} {@link DequeuedMessage DequeuedMessages} from the queue. Each DeqeuedMessage contains * {@link DequeuedMessage * with the message and other metadata about the message. * @throws StorageErrorException If the queue doesn't exist or {@code maxMessages} or {@code visibilityTimeout} is * outside of the allowed bounds */ public Flux<DequeuedMessage> dequeueMessages(Integer maxMessages, Duration visibilityTimeout) { Integer visibilityTimeoutInSeconds = (visibilityTimeout == null) ? null : (int) visibilityTimeout.getSeconds(); return client.messages().dequeueWithRestResponseAsync(queueName, maxMessages, visibilityTimeoutInSeconds, null, null, Context.NONE) .flatMapMany(response -> Flux.fromIterable(response.value())); } /** * Peeks the first message in the queue. * * Peeked messages don't contain the necessary information needed to interact with the message nor will it hide * messages from other operations on the queue. * * <p><strong>Code Samples</strong></p> * * <p>Peek the first message</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.peekMessages} * * <p>For more information, see the * <a href="https: * * @return A {@link PeekedMessage} that contains metadata about the message. */ public Flux<PeekedMessage> peekMessages() { return peekMessages(null); } /** * Peek messages from the front of the queue up to the maximum number of messages. * * Peeked messages don't contain the necessary information needed to interact with the message nor will it hide * messages from other operations on the queue. * * <p><strong>Code Samples</strong></p> * * <p>Peek up to the first five messages</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.peekMessages * * <p>For more information, see the * <a href="https: * * @param maxMessages Optional. Maximum number of messages to peek, if there are less messages exist in the queue than requested * all the messages will be peeked. If left empty only 1 message will be peeked, the allowed range is 1 to 32 * messages. * @return Up to {@code maxMessages} {@link PeekedMessage PeekedMessages} from the queue. Each PeekedMessage contains * metadata about the message. * @throws StorageErrorException If the queue doesn't exist or {@code maxMessages} is outside of the allowed bounds */ public Flux<PeekedMessage> peekMessages(Integer maxMessages) { return client.messages().peekWithRestResponseAsync(queueName, maxMessages, null, null, Context.NONE) .flatMapMany(response -> Flux.fromIterable(response.value())); } /** * Updates the specific message in the queue with a new message and resets the visibility timeout. * * <p><strong>Code Samples</strong></p> * * <p>Dequeue the first message and update it to "Hello again, Azure" and hide it for 5 seconds</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.updateMessage} * * <p>For more information, see the * <a href="https: * * @param messageText Updated value for the message * @param messageId Id of the message to update * @param popReceipt Unique identifier that must match for the message to be updated * @param visibilityTimeout The timeout period for how long the message is invisible in the queue in seconds. The * timeout period must be between 1 second and 7 days. * @return A {@link UpdatedMessage} that contains the new {@link UpdatedMessage * with the message, additionally contains the updated metadata about the message. * @throws StorageErrorException If the queue or messageId don't exist, the popReceipt doesn't match on the message, * or the {@code visibilityTimeout} is outside the allowed bounds */ public Mono<UpdatedMessage> updateMessage(String messageText, String messageId, String popReceipt, Duration visibilityTimeout) { return updateMessageWithResponse(messageText, messageId, popReceipt, visibilityTimeout).flatMap(FluxUtil::toMono); } /** * Updates the specific message in the queue with a new message and resets the visibility timeout. * * <p><strong>Code Samples</strong></p> * * <p>Dequeue the first message and update it to "Hello again, Azure" and hide it for 5 seconds</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.updateMessageWithResponse} * * <p>For more information, see the * <a href="https: * * @param messageText Updated value for the message * @param messageId Id of the message to update * @param popReceipt Unique identifier that must match for the message to be updated * @param visibilityTimeout The timeout period for how long the message is invisible in the queue in seconds. The * timeout period must be between 1 second and 7 days. * @return A {@link UpdatedMessage} that contains the new {@link UpdatedMessage * with the message, additionally contains the updated metadata about the message. * @throws StorageErrorException If the queue or messageId don't exist, the popReceipt doesn't match on the message, * or the {@code visibilityTimeout} is outside the allowed bounds */ public Mono<Response<UpdatedMessage>> updateMessageWithResponse(String messageText, String messageId, String popReceipt, Duration visibilityTimeout) { return withContext(context -> updateMessageWithResponse(messageText, messageId, popReceipt, visibilityTimeout, context)); } Mono<Response<UpdatedMessage>> updateMessageWithResponse(String messageText, String messageId, String popReceipt, Duration visibilityTimeout, Context context) { QueueMessage message = new QueueMessage().messageText(messageText); return client.messageIds().updateWithRestResponseAsync(queueName, messageId, message, popReceipt, (int) visibilityTimeout.getSeconds(), context) .map(this::getUpdatedMessageResponse); } /** * Deletes the specified message in the queue * * <p><strong>Code Samples</strong></p> * * <p>Delete the first message</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.deleteMessage} * * <p>For more information, see the * <a href="https: * * @param messageId Id of the message to deleted * @param popReceipt Unique identifier that must match for the message to be deleted * @return An empty response * @throws StorageErrorException If the queue or messageId don't exist or the popReceipt doesn't match on the message */ public Mono<Void> deleteMessage(String messageId, String popReceipt) { return deleteMessageWithResponse(messageId, popReceipt).flatMap(FluxUtil::toMono); } /** * Deletes the specified message in the queue * * <p><strong>Code Samples</strong></p> * * <p>Delete the first message</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.deleteMessageWithResponse} * * <p>For more information, see the * <a href="https: * * @param messageId Id of the message to deleted * @param popReceipt Unique identifier that must match for the message to be deleted * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue or messageId don't exist or the popReceipt doesn't match on the message */ public Mono<VoidResponse> deleteMessageWithResponse(String messageId, String popReceipt) { return withContext(context -> deleteMessageWithResponse(messageId, popReceipt, context)); } Mono<VoidResponse> deleteMessageWithResponse(String messageId, String popReceipt, Context context) { return client.messageIds().deleteWithRestResponseAsync(queueName, messageId, popReceipt, context) .map(VoidResponse::new); } /* * Maps the HTTP headers returned from the service to the expected response type * @param response Service response * @return Mapped response */ private Response<QueueProperties> getQueuePropertiesResponse(QueuesGetPropertiesResponse response) { QueueGetPropertiesHeaders propertiesHeaders = response.deserializedHeaders(); QueueProperties properties = new QueueProperties(propertiesHeaders.metadata(), propertiesHeaders.approximateMessagesCount()); return new SimpleResponse<>(response, properties); } /* * Maps the HTTP headers returned from the service to the expected response type * @param response Service response * @return Mapped response */ private Response<UpdatedMessage> getUpdatedMessageResponse(MessageIdsUpdateResponse response) { MessageIdUpdateHeaders headers = response.deserializedHeaders(); UpdatedMessage updatedMessage = new UpdatedMessage(headers.popReceipt(), headers.timeNextVisible()); return new SimpleResponse<>(response, updatedMessage); } }
class QueueAsyncClient { private static final ClientLogger LOGGER = new ClientLogger(QueueAsyncClient.class); private final AzureQueueStorageImpl client; private final String queueName; /** * Creates a QueueAsyncClient that sends requests to the storage queue service at {@code AzureQueueStorageImpl * Each service call goes through the {@link HttpPipeline pipeline} in the {@code AzureQueueStorageImpl client}. * * @param client Client that interacts with the service interfaces * @param queueName Name of the queue */ QueueAsyncClient(AzureQueueStorageImpl client, String queueName) { Objects.requireNonNull(queueName); this.queueName = queueName; this.client = new AzureQueueStorageBuilder().pipeline(client.getHttpPipeline()) .url(client.getUrl()) .version(client.getVersion()) .build(); } /** * Creates a QueueAsyncClient that sends requests to the storage queue service at {@code endpoint}. * Each service call goes through the {@code httpPipeline}. * * @param endpoint URL for the Storage Queue service * @param httpPipeline HttpPipeline that the HTTP requests and response flow through * @param queueName Name of the queue */ QueueAsyncClient(URL endpoint, HttpPipeline httpPipeline, String queueName) { Objects.requireNonNull(queueName); this.queueName = queueName; this.client = new AzureQueueStorageBuilder().pipeline(httpPipeline) .url(endpoint.toString()) .build(); } /** * @return the URL of the storage queue * @throws RuntimeException If the queue is using a malformed URL. */ public URL getQueueUrl() { try { return new URL(client.getUrl()); } catch (MalformedURLException ex) { LOGGER.error("Queue URL is malformed"); throw new RuntimeException("Queue URL is malformed"); } } /** * Creates a new queue. * * <p><strong>Code Samples</strong></p> * * <p>Create a queue</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.create} * * <p>For more information, see the * <a href="https: * * @return An empty response * @throws StorageErrorException If a queue with the same name already exists in the queue service. */ public Mono<Void> create() { return createWithResponse(null).flatMap(FluxUtil::toMono); } /** * Creates a new queue. * * <p><strong>Code Samples</strong></p> * * <p>Create a queue with metadata "queue:metadataMap"</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.createWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata Metadata to associate with the queue * @return A response that only contains headers and response status code * @throws StorageErrorException If a queue with the same name and different metadata already exists in the queue service. */ public Mono<VoidResponse> createWithResponse(Map<String, String> metadata) { return withContext(context -> createWithResponse(metadata, context)); } /** * Permanently deletes the queue. * * <p><strong>Code Samples</strong></p> * * <p>Delete a queue</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.delete} * * <p>For more information, see the * <a href="https: * * @return An empty response * @throws StorageErrorException If the queue doesn't exist */ public Mono<Void> delete() { return deleteWithResponse().flatMap(FluxUtil::toMono); } /** * Permanently deletes the queue. * * <p><strong>Code Samples</strong></p> * * <p>Delete a queue</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.deleteWithResponse} * * <p>For more information, see the * <a href="https: * * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue doesn't exist */ public Mono<VoidResponse> deleteWithResponse() { return withContext(context -> deleteWithResponse(context)); } Mono<VoidResponse> deleteWithResponse(Context context) { return client.queues().deleteWithRestResponseAsync(queueName, context) .map(VoidResponse::new); } /** * Retrieves metadata and approximate message count of the queue. * * <p><strong>Code Samples</strong></p> * * <p>Get the properties of the queue</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.getProperties} * * <p>For more information, see the * <a href="https: * * @return A response containing a {@link QueueProperties} value which contains the metadata and approximate * messages count of the queue. * @throws StorageErrorException If the queue doesn't exist */ public Mono<QueueProperties> getProperties() { return getPropertiesWithResponse().flatMap(FluxUtil::toMono); } /** * Retrieves metadata and approximate message count of the queue. * * <p><strong>Code Samples</strong></p> * * <p>Get the properties of the queue</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.getPropertiesWithResponse} * * <p>For more information, see the * <a href="https: * * @return A response containing a {@link QueueProperties} value which contains the metadata and approximate * messages count of the queue. * @throws StorageErrorException If the queue doesn't exist */ public Mono<Response<QueueProperties>> getPropertiesWithResponse() { return withContext(context -> getPropertiesWithResponse(context)); } Mono<Response<QueueProperties>> getPropertiesWithResponse(Context context) { return client.queues().getPropertiesWithRestResponseAsync(queueName, Context.NONE) .map(this::getQueuePropertiesResponse); } /** * Sets the metadata of the queue. * * Passing in a {@code null} value for metadata will clear the metadata associated with the queue. * * <p><strong>Code Samples</strong></p> * * <p>Set the queue's metadata to "queue:metadataMap"</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.setMetadata * * <p>Clear the queue's metadata</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.clearMetadata * * <p>For more information, see the * <a href="https: * * @param metadata Metadata to set on the queue * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue doesn't exist */ public Mono<Void> setMetadata(Map<String, String> metadata) { return setMetadataWithResponse(metadata).flatMap(FluxUtil::toMono); } /** * Sets the metadata of the queue. * * Passing in a {@code null} value for metadata will clear the metadata associated with the queue. * * <p><strong>Code Samples</strong></p> * * <p>Set the queue's metadata to "queue:metadataMap"</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.setMetadataWithResponse * * <p>Clear the queue's metadata</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.clearMetadataWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata Metadata to set on the queue * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue doesn't exist */ public Mono<VoidResponse> setMetadataWithResponse(Map<String, String> metadata) { return withContext(context -> setMetadataWithResponse(metadata, context)); } Mono<VoidResponse> setMetadataWithResponse(Map<String, String> metadata, Context context) { return client.queues().setMetadataWithRestResponseAsync(queueName, null, metadata, null, context) .map(VoidResponse::new); } /** * Retrieves stored access policies specified on the queue. * * <p><strong>Code Samples</strong></p> * * <p>List the stored access policies</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.getAccessPolicy} * * <p>For more information, see the * <a href="https: * * @return The stored access policies specified on the queue. * @throws StorageErrorException If the queue doesn't exist */ public Flux<SignedIdentifier> getAccessPolicy() { return client.queues().getAccessPolicyWithRestResponseAsync(queueName, Context.NONE) .flatMapMany(response -> Flux.fromIterable(response.value())); } /** * Sets stored access policies on the queue. * * <p><strong>Code Samples</strong></p> * * <p>Set a read only stored access policy</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.setAccessPolicy} * * <p>For more information, see the * <a href="https: * * @param permissions Access policies to set on the queue * @return An empty response * @throws StorageErrorException If the queue doesn't exist, a stored access policy doesn't have all fields filled out, * or the queue will have more than five policies. */ public Mono<Void> setAccessPolicy(List<SignedIdentifier> permissions) { return setAccessPolicyWithResponse(permissions).flatMap(FluxUtil::toMono); } /** * Sets stored access policies on the queue. * * <p><strong>Code Samples</strong></p> * * <p>Set a read only stored access policy</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.setAccessPolicyWithResponse} * * <p>For more information, see the * <a href="https: * * @param permissions Access policies to set on the queue * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue doesn't exist, a stored access policy doesn't have all fields filled out, * or the queue will have more than five policies. */ public Mono<VoidResponse> setAccessPolicyWithResponse(List<SignedIdentifier> permissions) { return withContext(context -> setAccessPolicyWithResponse(permissions, context)); } Mono<VoidResponse> setAccessPolicyWithResponse(List<SignedIdentifier> permissions, Context context) { return client.queues().setAccessPolicyWithRestResponseAsync(queueName, permissions, null, null, context) .map(VoidResponse::new); } /** * Deletes all messages in the queue. * * <p><strong>Code Samples</strong></p> * * <p>Clear the messages</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.clearMessages} * * <p>For more information, see the * <a href="https: * * @return An empty response * @throws StorageErrorException If the queue doesn't exist */ public Mono<Void> clearMessages() { return clearMessagesWithResponse().flatMap(FluxUtil::toMono); } /** * Deletes all messages in the queue. * * <p><strong>Code Samples</strong></p> * * <p>Clear the messages</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.clearMessagesWithResponse} * * <p>For more information, see the * <a href="https: * * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue doesn't exist */ public Mono<VoidResponse> clearMessagesWithResponse() { return withContext(context -> clearMessagesWithResponse(context)); } Mono<VoidResponse> clearMessagesWithResponse(Context context) { return client.messages().clearWithRestResponseAsync(queueName, context) .map(VoidResponse::new); } /** * Enqueues a message that has a time-to-live of 7 days and is instantly visible. * * <p><strong>Code Samples</strong></p> * * <p>Enqueue a message of "Hello, Azure"</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.enqueueMessage * * <p>For more information, see the * <a href="https: * * @param messageText Message text * @return A {@link EnqueuedMessage} value that contains the {@link EnqueuedMessage * {@link EnqueuedMessage * about the enqueued message. * @throws StorageErrorException If the queue doesn't exist */ public Mono<EnqueuedMessage> enqueueMessage(String messageText) { return enqueueMessageWithResponse(messageText, null, null).flatMap(FluxUtil::toMono); } /** * Enqueues a message with a given time-to-live and a timeout period where the message is invisible in the queue. * * <p><strong>Code Samples</strong></p> * * <p>Add a message of "Hello, Azure" that has a timeout of 5 seconds</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.enqueueMessageWithResponse * * <p>Add a message of "Goodbye, Azure" that has a time to live of 5 seconds</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.enqueueMessageWithResponseLiveTime * * <p>For more information, see the * <a href="https: * * @param messageText Message text * @param visibilityTimeout Optional. The timeout period for how long the message is invisible in the queue in seconds. * If unset the value will default to 0 and the message will be instantly visible. The timeout must be between 0 * seconds and 7 days. * @param timeToLive Optional. How long the message will stay alive in the queue in seconds. If unset the value will * default to 7 days, if -1 is passed the message will not expire. The time to live must be -1 or any positive number. * @return A {@link EnqueuedMessage} value that contains the {@link EnqueuedMessage * {@link EnqueuedMessage * about the enqueued message. * @throws StorageErrorException If the queue doesn't exist or the {@code visibilityTimeout} or {@code timeToLive} * are outside of the allowed limits. */ public Mono<Response<EnqueuedMessage>> enqueueMessageWithResponse(String messageText, Duration visibilityTimeout, Duration timeToLive) { return withContext(context -> enqueueMessageWithResponse(messageText, visibilityTimeout, timeToLive, context)); } Mono<Response<EnqueuedMessage>> enqueueMessageWithResponse(String messageText, Duration visibilityTimeout, Duration timeToLive, Context context) { Integer visibilityTimeoutInSeconds = (visibilityTimeout == null) ? null : (int) visibilityTimeout.getSeconds(); Integer timeToLiveInSeconds = (timeToLive == null) ? null : (int) timeToLive.getSeconds(); QueueMessage message = new QueueMessage().messageText(messageText); return client.messages().enqueueWithRestResponseAsync(queueName, message, visibilityTimeoutInSeconds, timeToLiveInSeconds, null, null, context) .map(response -> new SimpleResponse<>(response, response.value().get(0))); } /** * Retrieves the first message in the queue and hides it from other operations for 30 seconds. * * <p><strong>Code Samples</strong></p> * * <p>Dequeue a message</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.dequeueMessages} * * <p>For more information, see the * <a href="https: * * @return The first {@link DequeuedMessage} in the queue, it contains * {@link DequeuedMessage * with the message, additionally it contains other metadata about the message. * @throws StorageErrorException If the queue doesn't exist */ public Flux<DequeuedMessage> dequeueMessages() { return dequeueMessages(1, Duration.ofSeconds(30)); } /** * Retrieves up to the maximum number of messages from the queue and hides them from other operations for 30 seconds. * * <p><strong>Code Samples</strong></p> * * <p>Dequeue up to 5 messages</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.dequeueMessages * * <p>For more information, see the * <a href="https: * * @param maxMessages Optional. Maximum number of messages to get, if there are less messages exist in the queue than requested * all the messages will be returned. If left empty only 1 message will be retrieved, the allowed range is 1 to 32 * messages. * @return Up to {@code maxMessages} {@link DequeuedMessage DequeuedMessages} from the queue. Each DequeuedMessage contains * {@link DequeuedMessage * with the message and other metadata about the message. * @throws StorageErrorException If the queue doesn't exist or {@code maxMessages} is outside of the allowed bounds */ public Flux<DequeuedMessage> dequeueMessages(Integer maxMessages) { return dequeueMessages(maxMessages, Duration.ofSeconds(30)); } /** * Retrieves up to the maximum number of messages from the queue and hides them from other operations for the * timeout period. * * <p><strong>Code Samples</strong></p> * * <p>Dequeue up to 5 messages and give them a 60 second timeout period</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.dequeueMessages * * <p>For more information, see the * <a href="https: * * @param maxMessages Optional. Maximum number of messages to get, if there are less messages exist in the queue than requested * all the messages will be returned. If left empty only 1 message will be retrieved, the allowed range is 1 to 32 * messages. * @param visibilityTimeout Optional. The timeout period for how long the message is invisible in the queue in seconds. * If left empty the dequeued messages will be invisible for 30 seconds. The timeout must be between 1 second and 7 days. * @return Up to {@code maxMessages} {@link DequeuedMessage DequeuedMessages} from the queue. Each DeqeuedMessage contains * {@link DequeuedMessage * with the message and other metadata about the message. * @throws StorageErrorException If the queue doesn't exist or {@code maxMessages} or {@code visibilityTimeout} is * outside of the allowed bounds */ public Flux<DequeuedMessage> dequeueMessages(Integer maxMessages, Duration visibilityTimeout) { Integer visibilityTimeoutInSeconds = (visibilityTimeout == null) ? null : (int) visibilityTimeout.getSeconds(); return client.messages().dequeueWithRestResponseAsync(queueName, maxMessages, visibilityTimeoutInSeconds, null, null, Context.NONE) .flatMapMany(response -> Flux.fromIterable(response.value())); } /** * Peeks the first message in the queue. * * Peeked messages don't contain the necessary information needed to interact with the message nor will it hide * messages from other operations on the queue. * * <p><strong>Code Samples</strong></p> * * <p>Peek the first message</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.peekMessages} * * <p>For more information, see the * <a href="https: * * @return A {@link PeekedMessage} that contains metadata about the message. */ public Flux<PeekedMessage> peekMessages() { return peekMessages(null); } /** * Peek messages from the front of the queue up to the maximum number of messages. * * Peeked messages don't contain the necessary information needed to interact with the message nor will it hide * messages from other operations on the queue. * * <p><strong>Code Samples</strong></p> * * <p>Peek up to the first five messages</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.peekMessages * * <p>For more information, see the * <a href="https: * * @param maxMessages Optional. Maximum number of messages to peek, if there are less messages exist in the queue than requested * all the messages will be peeked. If left empty only 1 message will be peeked, the allowed range is 1 to 32 * messages. * @return Up to {@code maxMessages} {@link PeekedMessage PeekedMessages} from the queue. Each PeekedMessage contains * metadata about the message. * @throws StorageErrorException If the queue doesn't exist or {@code maxMessages} is outside of the allowed bounds */ public Flux<PeekedMessage> peekMessages(Integer maxMessages) { return client.messages().peekWithRestResponseAsync(queueName, maxMessages, null, null, Context.NONE) .flatMapMany(response -> Flux.fromIterable(response.value())); } /** * Updates the specific message in the queue with a new message and resets the visibility timeout. * * <p><strong>Code Samples</strong></p> * * <p>Dequeue the first message and update it to "Hello again, Azure" and hide it for 5 seconds</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.updateMessage} * * <p>For more information, see the * <a href="https: * * @param messageText Updated value for the message * @param messageId Id of the message to update * @param popReceipt Unique identifier that must match for the message to be updated * @param visibilityTimeout The timeout period for how long the message is invisible in the queue in seconds. The * timeout period must be between 1 second and 7 days. * @return A {@link UpdatedMessage} that contains the new {@link UpdatedMessage * with the message, additionally contains the updated metadata about the message. * @throws StorageErrorException If the queue or messageId don't exist, the popReceipt doesn't match on the message, * or the {@code visibilityTimeout} is outside the allowed bounds */ public Mono<UpdatedMessage> updateMessage(String messageText, String messageId, String popReceipt, Duration visibilityTimeout) { return updateMessageWithResponse(messageText, messageId, popReceipt, visibilityTimeout).flatMap(FluxUtil::toMono); } /** * Updates the specific message in the queue with a new message and resets the visibility timeout. * * <p><strong>Code Samples</strong></p> * * <p>Dequeue the first message and update it to "Hello again, Azure" and hide it for 5 seconds</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.updateMessageWithResponse} * * <p>For more information, see the * <a href="https: * * @param messageText Updated value for the message * @param messageId Id of the message to update * @param popReceipt Unique identifier that must match for the message to be updated * @param visibilityTimeout The timeout period for how long the message is invisible in the queue in seconds. The * timeout period must be between 1 second and 7 days. * @return A {@link UpdatedMessage} that contains the new {@link UpdatedMessage * with the message, additionally contains the updated metadata about the message. * @throws StorageErrorException If the queue or messageId don't exist, the popReceipt doesn't match on the message, * or the {@code visibilityTimeout} is outside the allowed bounds */ public Mono<Response<UpdatedMessage>> updateMessageWithResponse(String messageText, String messageId, String popReceipt, Duration visibilityTimeout) { return withContext(context -> updateMessageWithResponse(messageText, messageId, popReceipt, visibilityTimeout, context)); } Mono<Response<UpdatedMessage>> updateMessageWithResponse(String messageText, String messageId, String popReceipt, Duration visibilityTimeout, Context context) { QueueMessage message = new QueueMessage().messageText(messageText); return client.messageIds().updateWithRestResponseAsync(queueName, messageId, message, popReceipt, (int) visibilityTimeout.getSeconds(), context) .map(this::getUpdatedMessageResponse); } /** * Deletes the specified message in the queue * * <p><strong>Code Samples</strong></p> * * <p>Delete the first message</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.deleteMessage} * * <p>For more information, see the * <a href="https: * * @param messageId Id of the message to deleted * @param popReceipt Unique identifier that must match for the message to be deleted * @return An empty response * @throws StorageErrorException If the queue or messageId don't exist or the popReceipt doesn't match on the message */ public Mono<Void> deleteMessage(String messageId, String popReceipt) { return deleteMessageWithResponse(messageId, popReceipt).flatMap(FluxUtil::toMono); } /** * Deletes the specified message in the queue * * <p><strong>Code Samples</strong></p> * * <p>Delete the first message</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.deleteMessageWithResponse} * * <p>For more information, see the * <a href="https: * * @param messageId Id of the message to deleted * @param popReceipt Unique identifier that must match for the message to be deleted * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue or messageId don't exist or the popReceipt doesn't match on the message */ public Mono<VoidResponse> deleteMessageWithResponse(String messageId, String popReceipt) { return withContext(context -> deleteMessageWithResponse(messageId, popReceipt, context)); } Mono<VoidResponse> deleteMessageWithResponse(String messageId, String popReceipt, Context context) { return client.messageIds().deleteWithRestResponseAsync(queueName, messageId, popReceipt, context) .map(VoidResponse::new); } /* * Maps the HTTP headers returned from the service to the expected response type * @param response Service response * @return Mapped response */ private Response<QueueProperties> getQueuePropertiesResponse(QueuesGetPropertiesResponse response) { QueueGetPropertiesHeaders propertiesHeaders = response.deserializedHeaders(); QueueProperties properties = new QueueProperties(propertiesHeaders.metadata(), propertiesHeaders.approximateMessagesCount()); return new SimpleResponse<>(response, properties); } /* * Maps the HTTP headers returned from the service to the expected response type * @param response Service response * @return Mapped response */ private Response<UpdatedMessage> getUpdatedMessageResponse(MessageIdsUpdateResponse response) { MessageIdUpdateHeaders headers = response.deserializedHeaders(); UpdatedMessage updatedMessage = new UpdatedMessage(headers.popReceipt(), headers.timeNextVisible()); return new SimpleResponse<>(response, updatedMessage); } }
seems `buildAuthenticatedRequest` can throw exception if so `response.close();` won't be hit and that can leak connection.
public void applyCredentialsFilter(OkHttpClient.Builder clientBuilder) { clientBuilder.addInterceptor(new Interceptor() { @Override public Response intercept(Chain chain) throws IOException { Request originalRequest = chain.request(); HttpUrl url = chain.request().url(); Map<String, String> challengeMap = cache.getCachedChallenge(url); Response response; Pair<Request, HttpMessageSecurity> authenticatedRequestPair; if (challengeMap != null) { authenticatedRequestPair = buildAuthenticatedRequest(originalRequest, challengeMap); } else { response = chain.proceed(buildEmptyRequest(originalRequest)); if (response.code() != 401) { return response; } authenticatedRequestPair = buildAuthenticatedRequest(originalRequest, response); response.close(); } response = chain.proceed(authenticatedRequestPair.getLeft()); if (response.code() == 200) { return authenticatedRequestPair.getRight().unprotectResponse(response); } else { return response; } } }); }
response.close();
public void applyCredentialsFilter(OkHttpClient.Builder clientBuilder) { clientBuilder.addInterceptor(new Interceptor() { @Override public Response intercept(Chain chain) throws IOException { Request originalRequest = chain.request(); HttpUrl url = chain.request().url(); Map<String, String> challengeMap = cache.getCachedChallenge(url); Response response; Pair<Request, HttpMessageSecurity> authenticatedRequestPair; if (challengeMap != null) { authenticatedRequestPair = buildAuthenticatedRequest(originalRequest, challengeMap); } else { response = chain.proceed(buildEmptyRequest(originalRequest)); if (response.code() != 401) { return response; } try { authenticatedRequestPair = buildAuthenticatedRequest(originalRequest, response); } finally { response.close(); } } response = chain.proceed(authenticatedRequestPair.getLeft()); if (response.code() == 200) { return authenticatedRequestPair.getRight().unprotectResponse(response); } else { return response; } } }); }
class KeyVaultCredentials implements ServiceClientCredentials { private static final String WWW_AUTHENTICATE = "WWW-Authenticate"; private static final String BEARER_TOKEP_REFIX = "Bearer "; private static final String CLIENT_ENCRYPTION_KEY_TYPE = "RSA"; private static final int CLIENT_ENCRYPTION_KEY_SIZE = 2048; private List<String> supportedMethods = Arrays.asList("sign", "verify", "encrypt", "decrypt", "wrapkey", "unwrapkey"); private JsonWebKey clientEncryptionKey = null; private final ChallengeCache cache = new ChallengeCache(); @Override /** * Builds request with authenticated header. Protects request body if supported. * * @param originalRequest * unprotected request without auth token. * @param challengeMap * the challenge map. * @return Pair of protected request and HttpMessageSecurity used for * encryption. */ private Pair<Request, HttpMessageSecurity> buildAuthenticatedRequest(Request originalRequest, Map<String, String> challengeMap) throws IOException { Boolean supportsPop = supportsMessageProtection(originalRequest.url().toString(), challengeMap); if (supportsPop && this.clientEncryptionKey == null) { try { final KeyPairGenerator generator = KeyPairGenerator.getInstance(CLIENT_ENCRYPTION_KEY_TYPE); generator.initialize(CLIENT_ENCRYPTION_KEY_SIZE); this.clientEncryptionKey = JsonWebKey.fromRSA(generator.generateKeyPair()).withKid(UUID.randomUUID().toString()); } catch (NoSuchAlgorithmException e) { throw new RuntimeException(e); } } AuthenticationResult authResult = getAuthenticationCredentials(supportsPop, challengeMap); if (authResult == null) { return null; } HttpMessageSecurity httpMessageSecurity = new HttpMessageSecurity(authResult.getAuthToken(), supportsPop ? authResult.getPopKey() : "", supportsPop ? challengeMap.get("x-ms-message-encryption-key") : "", supportsPop ? challengeMap.get("x-ms-message-signing-key") : "", this.clientEncryptionKey); Request request = httpMessageSecurity.protectRequest(originalRequest); return Pair.of(request, httpMessageSecurity); } /** * Builds request with authenticated header. Protects request body if supported. * * @param originalRequest * unprotected request without auth token. * @param response * response with unauthorized return code. * @return Pair of protected request and HttpMessageSecurity used for * encryption. */ private Pair<Request, HttpMessageSecurity> buildAuthenticatedRequest(Request originalRequest, Response response) throws IOException { String authenticateHeader = response.header(WWW_AUTHENTICATE); Map<String, String> challengeMap = extractChallenge(authenticateHeader, BEARER_TOKEP_REFIX); challengeMap.put("x-ms-message-encryption-key", response.header("x-ms-message-encryption-key")); challengeMap.put("x-ms-message-signing-key", response.header("x-ms-message-signing-key")); cache.addCachedChallenge(originalRequest.url(), challengeMap); return buildAuthenticatedRequest(originalRequest, challengeMap); } /** * Removes request body used for EKV authorization. * * @param request * unprotected request without auth token. * @return request with removed body. */ private Request buildEmptyRequest(Request request) { RequestBody body = RequestBody.create(MediaType.parse("application/json"), "{}"); if (request.method().equalsIgnoreCase("get")) { return request; } else { return request.newBuilder().method(request.method(), body).build(); } } /** * Checks if resource supports message protection. * * @param url * resource url. * @param challengeMap * the challenge map. * @return true if message protection is supported. */ private Boolean supportsMessageProtection(String url, Map<String, String> challengeMap) { if (!"true".equals(challengeMap.get("supportspop"))) { return false; } if (!url.toLowerCase().contains("/keys/")) { return false; } String[] tokens = url.split("\\?")[0].split("/"); return supportedMethods.contains(tokens[tokens.length - 1]); } /** * Extracts the authentication challenges from the challenge map and calls the * authentication callback to get the bearer token and return it. * * @param supportsPop * is resource supports pop authentication. * @param challengeMap * the challenge map. * @return AuthenticationResult with bearer token and PoP key. */ private AuthenticationResult getAuthenticationCredentials(Boolean supportsPop, Map<String, String> challengeMap) { String authorization = challengeMap.get("authorization"); if (authorization == null) { authorization = challengeMap.get("authorization_uri"); } String resource = challengeMap.get("resource"); String scope = challengeMap.get("scope"); String schema = supportsPop ? "pop" : "bearer"; return doAuthenticate(authorization, resource, scope, schema); } /** * Extracts the challenge off the authentication header. * * @param authenticateHeader * the authentication header containing all the challenges. * @param authChallengePrefix * the authentication challenge name. * @return a challenge map. */ private static Map<String, String> extractChallenge(String authenticateHeader, String authChallengePrefix) { if (!isValidChallenge(authenticateHeader, authChallengePrefix)) { return null; } authenticateHeader = authenticateHeader.toLowerCase().replace(authChallengePrefix.toLowerCase(), ""); String[] challenges = authenticateHeader.split(", "); Map<String, String> challengeMap = new HashMap<String, String>(); for (String pair : challenges) { String[] keyValue = pair.split("="); challengeMap.put(keyValue[0].replaceAll("\"", ""), keyValue[1].replaceAll("\"", "")); } return challengeMap; } /** * Verifies whether a challenge is bearer or not. * * @param authenticateHeader * the authentication header containing all the challenges. * @param authChallengePrefix * the authentication challenge name. * @return */ private static boolean isValidChallenge(String authenticateHeader, String authChallengePrefix) { if (authenticateHeader != null && !authenticateHeader.isEmpty() && authenticateHeader.toLowerCase().startsWith(authChallengePrefix.toLowerCase())) { return true; } return false; } /** * Abstract method to be implemented. * * @param authorization * Identifier of the authority, a URL. * @param resource * Identifier of the target resource that is the recipient of the * requested token, a URL. * * @param scope * The scope of the authentication request. * * @return AuthenticationResult with authorization token and PoP key. * * Answers a server challenge with a token header. * <p> * Implementations typically use ADAL to get a token, as performed in * the sample below: * </p> * * <pre> * & * public String doAuthenticate(String authorization, String resource, String scope) { * String clientId = ...; * String clientKey = ...; * AuthenticationResult token = getAccessTokenFromClientCredentials(authorization, resource, clientId, clientKey); * return token.getAccessToken();; * } * * private static AuthenticationResult getAccessTokenFromClientCredentials(String authorization, String resource, String clientId, String clientKey) { * AuthenticationContext context = null; * AuthenticationResult result = null; * ExecutorService service = null; * try { * service = Executors.newFixedThreadPool(1); * context = new AuthenticationContext(authorization, false, service); * ClientCredential credentials = new ClientCredential(clientId, clientKey); * Future&lt;AuthenticationResult&gt; future = context.acquireToken(resource, credentials, null); * result = future.get(); * } catch (Exception e) { * throw new RuntimeException(e); * } finally { * service.shutdown(); * } * * if (result == null) { * throw new RuntimeException(&quot;authentication result was null&quot;); * } * return result; * } * </pre> * * <p> * <b>Note: The client key must be securely stored. It's advised to use * two client applications - one for development and other for * production - managed by separate parties.</b> * </p> * */ public String doAuthenticate(String authorization, String resource, String scope) { return ""; } /** * Method to be implemented. * * @param authorization * Identifier of the authority, a URL. * @param resource * Identifier of the target resource that is the recipient of the * requested token, a URL. * @param scope * The scope of the authentication request. * * @param schema * Authentication schema. Can be 'pop' or 'bearer'. * * @return AuthenticationResult with authorization token and PoP key. * * Answers a server challenge with a token header. * <p> * Implementations sends POST request to receive authentication token * like in example below. ADAL currently doesn't support POP * authentication. * </p> * * <pre> * public AuthenticationResult doAuthenticate(String authorization, String resource, String scope, String schema) { * JsonWebKey clientJwk = GenerateJsonWebKey(); * JsonWebKey clientPublicJwk = GetJwkWithPublicKeyOnly(clientJwk); * String token = GetAccessToken(authorization, resource, "pop".equals(schema), clientPublicJwk); * * return new AuthenticationResult(token, clientJwk.toString()); * } * * private JsonWebKey GenerateJsonWebKey() { * final KeyPairGenerator generator = KeyPairGenerator.getInstance("RSA"); * generator.initialize(2048); * KeyPair clientRsaKeyPair = generator.generateKeyPair(); * JsonWebKey result = JsonWebKey.fromRSA(clientRsaKeyPair); * result.withKid(UUID.randomUUID().toString()); * return result; * } * * public static JsonWebKey GetJwkWithPublicKeyOnly(JsonWebKey jwk) { * KeyPair publicOnly = jwk.toRSA(false); * JsonWebKey jsonkeyPublic = JsonWebKey.fromRSA(publicOnly); * jsonkeyPublic.withKid(jwk.kid()); * jsonkeyPublic.withKeyOps(Arrays.asList(JsonWebKeyOperation.ENCRYPT, JsonWebKeyOperation.WRAP_KEY, * JsonWebKeyOperation.VERIFY)); * return jsonkeyPublic; * } * * private String GetAccessToken(String authorization, String resource, boolean supportspop, JsonWebKey jwkPublic) { * CloseableHttpClient httpclient = HttpClients.createDefault(); * HttpPost httppost = new HttpPost(authorization + "/oauth2/token"); * * * List&lt;NameValuePair&gt; params = new ArrayList&lt;NameValuePair&gt;(2); * params.add(new BasicNameValuePair("resource", resource)); * params.add(new BasicNameValuePair("response_type", "token")); * params.add(new BasicNameValuePair("grant_type", "client_credentials")); * params.add(new BasicNameValuePair("client_id", this.getApplicationId())); * params.add(new BasicNameValuePair("client_secret", this.getApplicationSecret())); * * if (supportspop) { * params.add(new BasicNameValuePair("pop_jwk", jwkPublic.toString())); * } * * httppost.setEntity(new UrlEncodedFormEntity(params, "UTF-8")); * * HttpResponse response = httpclient.execute(httppost); * HttpEntity entity = response.getEntity(); * * * String content = EntityUtils.toString(entity); * * ObjectMapper mapper = new ObjectMapper(); * authreply reply = mapper.readValue(content, authreply.class); * * return reply.access_token; * } * </pre> * * <p> * <b>Note: The client key must be securely stored. It's advised to use * two client applications - one for development and other for * production - managed by separate parties.</b> * </p> */ public AuthenticationResult doAuthenticate(String authorization, String resource, String scope, String schema) { return new AuthenticationResult(doAuthenticate(authorization, resource, scope), ""); } }
class KeyVaultCredentials implements ServiceClientCredentials { private static final String WWW_AUTHENTICATE = "WWW-Authenticate"; private static final String BEARER_TOKEP_REFIX = "Bearer "; private static final String CLIENT_ENCRYPTION_KEY_TYPE = "RSA"; private static final int CLIENT_ENCRYPTION_KEY_SIZE = 2048; private List<String> supportedMethods = Arrays.asList("sign", "verify", "encrypt", "decrypt", "wrapkey", "unwrapkey"); private JsonWebKey clientEncryptionKey = null; private final ChallengeCache cache = new ChallengeCache(); @Override /** * Builds request with authenticated header. Protects request body if supported. * * @param originalRequest * unprotected request without auth token. * @param challengeMap * the challenge map. * @return Pair of protected request and HttpMessageSecurity used for * encryption. */ private Pair<Request, HttpMessageSecurity> buildAuthenticatedRequest(Request originalRequest, Map<String, String> challengeMap) throws IOException { Boolean supportsPop = supportsMessageProtection(originalRequest.url().toString(), challengeMap); if (supportsPop && this.clientEncryptionKey == null) { try { final KeyPairGenerator generator = KeyPairGenerator.getInstance(CLIENT_ENCRYPTION_KEY_TYPE); generator.initialize(CLIENT_ENCRYPTION_KEY_SIZE); this.clientEncryptionKey = JsonWebKey.fromRSA(generator.generateKeyPair()).withKid(UUID.randomUUID().toString()); } catch (NoSuchAlgorithmException e) { throw new RuntimeException(e); } } AuthenticationResult authResult = getAuthenticationCredentials(supportsPop, challengeMap); if (authResult == null) { return null; } HttpMessageSecurity httpMessageSecurity = new HttpMessageSecurity(authResult.getAuthToken(), supportsPop ? authResult.getPopKey() : "", supportsPop ? challengeMap.get("x-ms-message-encryption-key") : "", supportsPop ? challengeMap.get("x-ms-message-signing-key") : "", this.clientEncryptionKey); Request request = httpMessageSecurity.protectRequest(originalRequest); return Pair.of(request, httpMessageSecurity); } /** * Builds request with authenticated header. Protects request body if supported. * * @param originalRequest * unprotected request without auth token. * @param response * response with unauthorized return code. * @return Pair of protected request and HttpMessageSecurity used for * encryption. */ private Pair<Request, HttpMessageSecurity> buildAuthenticatedRequest(Request originalRequest, Response response) throws IOException { String authenticateHeader = response.header(WWW_AUTHENTICATE); Map<String, String> challengeMap = extractChallenge(authenticateHeader, BEARER_TOKEP_REFIX); challengeMap.put("x-ms-message-encryption-key", response.header("x-ms-message-encryption-key")); challengeMap.put("x-ms-message-signing-key", response.header("x-ms-message-signing-key")); cache.addCachedChallenge(originalRequest.url(), challengeMap); return buildAuthenticatedRequest(originalRequest, challengeMap); } /** * Removes request body used for EKV authorization. * * @param request * unprotected request without auth token. * @return request with removed body. */ private Request buildEmptyRequest(Request request) { RequestBody body = RequestBody.create(MediaType.parse("application/json"), "{}"); if (request.method().equalsIgnoreCase("get")) { return request; } else { return request.newBuilder().method(request.method(), body).build(); } } /** * Checks if resource supports message protection. * * @param url * resource url. * @param challengeMap * the challenge map. * @return true if message protection is supported. */ private Boolean supportsMessageProtection(String url, Map<String, String> challengeMap) { if (!"true".equals(challengeMap.get("supportspop"))) { return false; } if (!url.toLowerCase().contains("/keys/")) { return false; } String[] tokens = url.split("\\?")[0].split("/"); return supportedMethods.contains(tokens[tokens.length - 1]); } /** * Extracts the authentication challenges from the challenge map and calls the * authentication callback to get the bearer token and return it. * * @param supportsPop * is resource supports pop authentication. * @param challengeMap * the challenge map. * @return AuthenticationResult with bearer token and PoP key. */ private AuthenticationResult getAuthenticationCredentials(Boolean supportsPop, Map<String, String> challengeMap) { String authorization = challengeMap.get("authorization"); if (authorization == null) { authorization = challengeMap.get("authorization_uri"); } String resource = challengeMap.get("resource"); String scope = challengeMap.get("scope"); String schema = supportsPop ? "pop" : "bearer"; return doAuthenticate(authorization, resource, scope, schema); } /** * Extracts the challenge off the authentication header. * * @param authenticateHeader * the authentication header containing all the challenges. * @param authChallengePrefix * the authentication challenge name. * @return a challenge map. */ private static Map<String, String> extractChallenge(String authenticateHeader, String authChallengePrefix) { if (!isValidChallenge(authenticateHeader, authChallengePrefix)) { return null; } authenticateHeader = authenticateHeader.toLowerCase().replace(authChallengePrefix.toLowerCase(), ""); String[] challenges = authenticateHeader.split(", "); Map<String, String> challengeMap = new HashMap<String, String>(); for (String pair : challenges) { String[] keyValue = pair.split("="); challengeMap.put(keyValue[0].replaceAll("\"", ""), keyValue[1].replaceAll("\"", "")); } return challengeMap; } /** * Verifies whether a challenge is bearer or not. * * @param authenticateHeader * the authentication header containing all the challenges. * @param authChallengePrefix * the authentication challenge name. * @return */ private static boolean isValidChallenge(String authenticateHeader, String authChallengePrefix) { if (authenticateHeader != null && !authenticateHeader.isEmpty() && authenticateHeader.toLowerCase().startsWith(authChallengePrefix.toLowerCase())) { return true; } return false; } /** * Abstract method to be implemented. * * @param authorization * Identifier of the authority, a URL. * @param resource * Identifier of the target resource that is the recipient of the * requested token, a URL. * * @param scope * The scope of the authentication request. * * @return AuthenticationResult with authorization token and PoP key. * * Answers a server challenge with a token header. * <p> * Implementations typically use ADAL to get a token, as performed in * the sample below: * </p> * * <pre> * & * public String doAuthenticate(String authorization, String resource, String scope) { * String clientId = ...; * String clientKey = ...; * AuthenticationResult token = getAccessTokenFromClientCredentials(authorization, resource, clientId, clientKey); * return token.getAccessToken();; * } * * private static AuthenticationResult getAccessTokenFromClientCredentials(String authorization, String resource, String clientId, String clientKey) { * AuthenticationContext context = null; * AuthenticationResult result = null; * ExecutorService service = null; * try { * service = Executors.newFixedThreadPool(1); * context = new AuthenticationContext(authorization, false, service); * ClientCredential credentials = new ClientCredential(clientId, clientKey); * Future&lt;AuthenticationResult&gt; future = context.acquireToken(resource, credentials, null); * result = future.get(); * } catch (Exception e) { * throw new RuntimeException(e); * } finally { * service.shutdown(); * } * * if (result == null) { * throw new RuntimeException(&quot;authentication result was null&quot;); * } * return result; * } * </pre> * * <p> * <b>Note: The client key must be securely stored. It's advised to use * two client applications - one for development and other for * production - managed by separate parties.</b> * </p> * */ public String doAuthenticate(String authorization, String resource, String scope) { return ""; } /** * Method to be implemented. * * @param authorization * Identifier of the authority, a URL. * @param resource * Identifier of the target resource that is the recipient of the * requested token, a URL. * @param scope * The scope of the authentication request. * * @param schema * Authentication schema. Can be 'pop' or 'bearer'. * * @return AuthenticationResult with authorization token and PoP key. * * Answers a server challenge with a token header. * <p> * Implementations sends POST request to receive authentication token * like in example below. ADAL currently doesn't support POP * authentication. * </p> * * <pre> * public AuthenticationResult doAuthenticate(String authorization, String resource, String scope, String schema) { * JsonWebKey clientJwk = GenerateJsonWebKey(); * JsonWebKey clientPublicJwk = GetJwkWithPublicKeyOnly(clientJwk); * String token = GetAccessToken(authorization, resource, "pop".equals(schema), clientPublicJwk); * * return new AuthenticationResult(token, clientJwk.toString()); * } * * private JsonWebKey GenerateJsonWebKey() { * final KeyPairGenerator generator = KeyPairGenerator.getInstance("RSA"); * generator.initialize(2048); * KeyPair clientRsaKeyPair = generator.generateKeyPair(); * JsonWebKey result = JsonWebKey.fromRSA(clientRsaKeyPair); * result.withKid(UUID.randomUUID().toString()); * return result; * } * * public static JsonWebKey GetJwkWithPublicKeyOnly(JsonWebKey jwk) { * KeyPair publicOnly = jwk.toRSA(false); * JsonWebKey jsonkeyPublic = JsonWebKey.fromRSA(publicOnly); * jsonkeyPublic.withKid(jwk.kid()); * jsonkeyPublic.withKeyOps(Arrays.asList(JsonWebKeyOperation.ENCRYPT, JsonWebKeyOperation.WRAP_KEY, * JsonWebKeyOperation.VERIFY)); * return jsonkeyPublic; * } * * private String GetAccessToken(String authorization, String resource, boolean supportspop, JsonWebKey jwkPublic) { * CloseableHttpClient httpclient = HttpClients.createDefault(); * HttpPost httppost = new HttpPost(authorization + "/oauth2/token"); * * * List&lt;NameValuePair&gt; params = new ArrayList&lt;NameValuePair&gt;(2); * params.add(new BasicNameValuePair("resource", resource)); * params.add(new BasicNameValuePair("response_type", "token")); * params.add(new BasicNameValuePair("grant_type", "client_credentials")); * params.add(new BasicNameValuePair("client_id", this.getApplicationId())); * params.add(new BasicNameValuePair("client_secret", this.getApplicationSecret())); * * if (supportspop) { * params.add(new BasicNameValuePair("pop_jwk", jwkPublic.toString())); * } * * httppost.setEntity(new UrlEncodedFormEntity(params, "UTF-8")); * * HttpResponse response = httpclient.execute(httppost); * HttpEntity entity = response.getEntity(); * * * String content = EntityUtils.toString(entity); * * ObjectMapper mapper = new ObjectMapper(); * authreply reply = mapper.readValue(content, authreply.class); * * return reply.access_token; * } * </pre> * * <p> * <b>Note: The client key must be securely stored. It's advised to use * two client applications - one for development and other for * production - managed by separate parties.</b> * </p> */ public AuthenticationResult doAuthenticate(String authorization, String resource, String scope, String schema) { return new AuthenticationResult(doAuthenticate(authorization, resource, scope), ""); } }
Valid point. Added a try finally block to ensure response gets cleaned up via close().
public void applyCredentialsFilter(OkHttpClient.Builder clientBuilder) { clientBuilder.addInterceptor(new Interceptor() { @Override public Response intercept(Chain chain) throws IOException { Request originalRequest = chain.request(); HttpUrl url = chain.request().url(); Map<String, String> challengeMap = cache.getCachedChallenge(url); Response response; Pair<Request, HttpMessageSecurity> authenticatedRequestPair; if (challengeMap != null) { authenticatedRequestPair = buildAuthenticatedRequest(originalRequest, challengeMap); } else { response = chain.proceed(buildEmptyRequest(originalRequest)); if (response.code() != 401) { return response; } authenticatedRequestPair = buildAuthenticatedRequest(originalRequest, response); response.close(); } response = chain.proceed(authenticatedRequestPair.getLeft()); if (response.code() == 200) { return authenticatedRequestPair.getRight().unprotectResponse(response); } else { return response; } } }); }
response.close();
public void applyCredentialsFilter(OkHttpClient.Builder clientBuilder) { clientBuilder.addInterceptor(new Interceptor() { @Override public Response intercept(Chain chain) throws IOException { Request originalRequest = chain.request(); HttpUrl url = chain.request().url(); Map<String, String> challengeMap = cache.getCachedChallenge(url); Response response; Pair<Request, HttpMessageSecurity> authenticatedRequestPair; if (challengeMap != null) { authenticatedRequestPair = buildAuthenticatedRequest(originalRequest, challengeMap); } else { response = chain.proceed(buildEmptyRequest(originalRequest)); if (response.code() != 401) { return response; } try { authenticatedRequestPair = buildAuthenticatedRequest(originalRequest, response); } finally { response.close(); } } response = chain.proceed(authenticatedRequestPair.getLeft()); if (response.code() == 200) { return authenticatedRequestPair.getRight().unprotectResponse(response); } else { return response; } } }); }
class KeyVaultCredentials implements ServiceClientCredentials { private static final String WWW_AUTHENTICATE = "WWW-Authenticate"; private static final String BEARER_TOKEP_REFIX = "Bearer "; private static final String CLIENT_ENCRYPTION_KEY_TYPE = "RSA"; private static final int CLIENT_ENCRYPTION_KEY_SIZE = 2048; private List<String> supportedMethods = Arrays.asList("sign", "verify", "encrypt", "decrypt", "wrapkey", "unwrapkey"); private JsonWebKey clientEncryptionKey = null; private final ChallengeCache cache = new ChallengeCache(); @Override /** * Builds request with authenticated header. Protects request body if supported. * * @param originalRequest * unprotected request without auth token. * @param challengeMap * the challenge map. * @return Pair of protected request and HttpMessageSecurity used for * encryption. */ private Pair<Request, HttpMessageSecurity> buildAuthenticatedRequest(Request originalRequest, Map<String, String> challengeMap) throws IOException { Boolean supportsPop = supportsMessageProtection(originalRequest.url().toString(), challengeMap); if (supportsPop && this.clientEncryptionKey == null) { try { final KeyPairGenerator generator = KeyPairGenerator.getInstance(CLIENT_ENCRYPTION_KEY_TYPE); generator.initialize(CLIENT_ENCRYPTION_KEY_SIZE); this.clientEncryptionKey = JsonWebKey.fromRSA(generator.generateKeyPair()).withKid(UUID.randomUUID().toString()); } catch (NoSuchAlgorithmException e) { throw new RuntimeException(e); } } AuthenticationResult authResult = getAuthenticationCredentials(supportsPop, challengeMap); if (authResult == null) { return null; } HttpMessageSecurity httpMessageSecurity = new HttpMessageSecurity(authResult.getAuthToken(), supportsPop ? authResult.getPopKey() : "", supportsPop ? challengeMap.get("x-ms-message-encryption-key") : "", supportsPop ? challengeMap.get("x-ms-message-signing-key") : "", this.clientEncryptionKey); Request request = httpMessageSecurity.protectRequest(originalRequest); return Pair.of(request, httpMessageSecurity); } /** * Builds request with authenticated header. Protects request body if supported. * * @param originalRequest * unprotected request without auth token. * @param response * response with unauthorized return code. * @return Pair of protected request and HttpMessageSecurity used for * encryption. */ private Pair<Request, HttpMessageSecurity> buildAuthenticatedRequest(Request originalRequest, Response response) throws IOException { String authenticateHeader = response.header(WWW_AUTHENTICATE); Map<String, String> challengeMap = extractChallenge(authenticateHeader, BEARER_TOKEP_REFIX); challengeMap.put("x-ms-message-encryption-key", response.header("x-ms-message-encryption-key")); challengeMap.put("x-ms-message-signing-key", response.header("x-ms-message-signing-key")); cache.addCachedChallenge(originalRequest.url(), challengeMap); return buildAuthenticatedRequest(originalRequest, challengeMap); } /** * Removes request body used for EKV authorization. * * @param request * unprotected request without auth token. * @return request with removed body. */ private Request buildEmptyRequest(Request request) { RequestBody body = RequestBody.create(MediaType.parse("application/json"), "{}"); if (request.method().equalsIgnoreCase("get")) { return request; } else { return request.newBuilder().method(request.method(), body).build(); } } /** * Checks if resource supports message protection. * * @param url * resource url. * @param challengeMap * the challenge map. * @return true if message protection is supported. */ private Boolean supportsMessageProtection(String url, Map<String, String> challengeMap) { if (!"true".equals(challengeMap.get("supportspop"))) { return false; } if (!url.toLowerCase().contains("/keys/")) { return false; } String[] tokens = url.split("\\?")[0].split("/"); return supportedMethods.contains(tokens[tokens.length - 1]); } /** * Extracts the authentication challenges from the challenge map and calls the * authentication callback to get the bearer token and return it. * * @param supportsPop * is resource supports pop authentication. * @param challengeMap * the challenge map. * @return AuthenticationResult with bearer token and PoP key. */ private AuthenticationResult getAuthenticationCredentials(Boolean supportsPop, Map<String, String> challengeMap) { String authorization = challengeMap.get("authorization"); if (authorization == null) { authorization = challengeMap.get("authorization_uri"); } String resource = challengeMap.get("resource"); String scope = challengeMap.get("scope"); String schema = supportsPop ? "pop" : "bearer"; return doAuthenticate(authorization, resource, scope, schema); } /** * Extracts the challenge off the authentication header. * * @param authenticateHeader * the authentication header containing all the challenges. * @param authChallengePrefix * the authentication challenge name. * @return a challenge map. */ private static Map<String, String> extractChallenge(String authenticateHeader, String authChallengePrefix) { if (!isValidChallenge(authenticateHeader, authChallengePrefix)) { return null; } authenticateHeader = authenticateHeader.toLowerCase().replace(authChallengePrefix.toLowerCase(), ""); String[] challenges = authenticateHeader.split(", "); Map<String, String> challengeMap = new HashMap<String, String>(); for (String pair : challenges) { String[] keyValue = pair.split("="); challengeMap.put(keyValue[0].replaceAll("\"", ""), keyValue[1].replaceAll("\"", "")); } return challengeMap; } /** * Verifies whether a challenge is bearer or not. * * @param authenticateHeader * the authentication header containing all the challenges. * @param authChallengePrefix * the authentication challenge name. * @return */ private static boolean isValidChallenge(String authenticateHeader, String authChallengePrefix) { if (authenticateHeader != null && !authenticateHeader.isEmpty() && authenticateHeader.toLowerCase().startsWith(authChallengePrefix.toLowerCase())) { return true; } return false; } /** * Abstract method to be implemented. * * @param authorization * Identifier of the authority, a URL. * @param resource * Identifier of the target resource that is the recipient of the * requested token, a URL. * * @param scope * The scope of the authentication request. * * @return AuthenticationResult with authorization token and PoP key. * * Answers a server challenge with a token header. * <p> * Implementations typically use ADAL to get a token, as performed in * the sample below: * </p> * * <pre> * & * public String doAuthenticate(String authorization, String resource, String scope) { * String clientId = ...; * String clientKey = ...; * AuthenticationResult token = getAccessTokenFromClientCredentials(authorization, resource, clientId, clientKey); * return token.getAccessToken();; * } * * private static AuthenticationResult getAccessTokenFromClientCredentials(String authorization, String resource, String clientId, String clientKey) { * AuthenticationContext context = null; * AuthenticationResult result = null; * ExecutorService service = null; * try { * service = Executors.newFixedThreadPool(1); * context = new AuthenticationContext(authorization, false, service); * ClientCredential credentials = new ClientCredential(clientId, clientKey); * Future&lt;AuthenticationResult&gt; future = context.acquireToken(resource, credentials, null); * result = future.get(); * } catch (Exception e) { * throw new RuntimeException(e); * } finally { * service.shutdown(); * } * * if (result == null) { * throw new RuntimeException(&quot;authentication result was null&quot;); * } * return result; * } * </pre> * * <p> * <b>Note: The client key must be securely stored. It's advised to use * two client applications - one for development and other for * production - managed by separate parties.</b> * </p> * */ public String doAuthenticate(String authorization, String resource, String scope) { return ""; } /** * Method to be implemented. * * @param authorization * Identifier of the authority, a URL. * @param resource * Identifier of the target resource that is the recipient of the * requested token, a URL. * @param scope * The scope of the authentication request. * * @param schema * Authentication schema. Can be 'pop' or 'bearer'. * * @return AuthenticationResult with authorization token and PoP key. * * Answers a server challenge with a token header. * <p> * Implementations sends POST request to receive authentication token * like in example below. ADAL currently doesn't support POP * authentication. * </p> * * <pre> * public AuthenticationResult doAuthenticate(String authorization, String resource, String scope, String schema) { * JsonWebKey clientJwk = GenerateJsonWebKey(); * JsonWebKey clientPublicJwk = GetJwkWithPublicKeyOnly(clientJwk); * String token = GetAccessToken(authorization, resource, "pop".equals(schema), clientPublicJwk); * * return new AuthenticationResult(token, clientJwk.toString()); * } * * private JsonWebKey GenerateJsonWebKey() { * final KeyPairGenerator generator = KeyPairGenerator.getInstance("RSA"); * generator.initialize(2048); * KeyPair clientRsaKeyPair = generator.generateKeyPair(); * JsonWebKey result = JsonWebKey.fromRSA(clientRsaKeyPair); * result.withKid(UUID.randomUUID().toString()); * return result; * } * * public static JsonWebKey GetJwkWithPublicKeyOnly(JsonWebKey jwk) { * KeyPair publicOnly = jwk.toRSA(false); * JsonWebKey jsonkeyPublic = JsonWebKey.fromRSA(publicOnly); * jsonkeyPublic.withKid(jwk.kid()); * jsonkeyPublic.withKeyOps(Arrays.asList(JsonWebKeyOperation.ENCRYPT, JsonWebKeyOperation.WRAP_KEY, * JsonWebKeyOperation.VERIFY)); * return jsonkeyPublic; * } * * private String GetAccessToken(String authorization, String resource, boolean supportspop, JsonWebKey jwkPublic) { * CloseableHttpClient httpclient = HttpClients.createDefault(); * HttpPost httppost = new HttpPost(authorization + "/oauth2/token"); * * * List&lt;NameValuePair&gt; params = new ArrayList&lt;NameValuePair&gt;(2); * params.add(new BasicNameValuePair("resource", resource)); * params.add(new BasicNameValuePair("response_type", "token")); * params.add(new BasicNameValuePair("grant_type", "client_credentials")); * params.add(new BasicNameValuePair("client_id", this.getApplicationId())); * params.add(new BasicNameValuePair("client_secret", this.getApplicationSecret())); * * if (supportspop) { * params.add(new BasicNameValuePair("pop_jwk", jwkPublic.toString())); * } * * httppost.setEntity(new UrlEncodedFormEntity(params, "UTF-8")); * * HttpResponse response = httpclient.execute(httppost); * HttpEntity entity = response.getEntity(); * * * String content = EntityUtils.toString(entity); * * ObjectMapper mapper = new ObjectMapper(); * authreply reply = mapper.readValue(content, authreply.class); * * return reply.access_token; * } * </pre> * * <p> * <b>Note: The client key must be securely stored. It's advised to use * two client applications - one for development and other for * production - managed by separate parties.</b> * </p> */ public AuthenticationResult doAuthenticate(String authorization, String resource, String scope, String schema) { return new AuthenticationResult(doAuthenticate(authorization, resource, scope), ""); } }
class KeyVaultCredentials implements ServiceClientCredentials { private static final String WWW_AUTHENTICATE = "WWW-Authenticate"; private static final String BEARER_TOKEP_REFIX = "Bearer "; private static final String CLIENT_ENCRYPTION_KEY_TYPE = "RSA"; private static final int CLIENT_ENCRYPTION_KEY_SIZE = 2048; private List<String> supportedMethods = Arrays.asList("sign", "verify", "encrypt", "decrypt", "wrapkey", "unwrapkey"); private JsonWebKey clientEncryptionKey = null; private final ChallengeCache cache = new ChallengeCache(); @Override /** * Builds request with authenticated header. Protects request body if supported. * * @param originalRequest * unprotected request without auth token. * @param challengeMap * the challenge map. * @return Pair of protected request and HttpMessageSecurity used for * encryption. */ private Pair<Request, HttpMessageSecurity> buildAuthenticatedRequest(Request originalRequest, Map<String, String> challengeMap) throws IOException { Boolean supportsPop = supportsMessageProtection(originalRequest.url().toString(), challengeMap); if (supportsPop && this.clientEncryptionKey == null) { try { final KeyPairGenerator generator = KeyPairGenerator.getInstance(CLIENT_ENCRYPTION_KEY_TYPE); generator.initialize(CLIENT_ENCRYPTION_KEY_SIZE); this.clientEncryptionKey = JsonWebKey.fromRSA(generator.generateKeyPair()).withKid(UUID.randomUUID().toString()); } catch (NoSuchAlgorithmException e) { throw new RuntimeException(e); } } AuthenticationResult authResult = getAuthenticationCredentials(supportsPop, challengeMap); if (authResult == null) { return null; } HttpMessageSecurity httpMessageSecurity = new HttpMessageSecurity(authResult.getAuthToken(), supportsPop ? authResult.getPopKey() : "", supportsPop ? challengeMap.get("x-ms-message-encryption-key") : "", supportsPop ? challengeMap.get("x-ms-message-signing-key") : "", this.clientEncryptionKey); Request request = httpMessageSecurity.protectRequest(originalRequest); return Pair.of(request, httpMessageSecurity); } /** * Builds request with authenticated header. Protects request body if supported. * * @param originalRequest * unprotected request without auth token. * @param response * response with unauthorized return code. * @return Pair of protected request and HttpMessageSecurity used for * encryption. */ private Pair<Request, HttpMessageSecurity> buildAuthenticatedRequest(Request originalRequest, Response response) throws IOException { String authenticateHeader = response.header(WWW_AUTHENTICATE); Map<String, String> challengeMap = extractChallenge(authenticateHeader, BEARER_TOKEP_REFIX); challengeMap.put("x-ms-message-encryption-key", response.header("x-ms-message-encryption-key")); challengeMap.put("x-ms-message-signing-key", response.header("x-ms-message-signing-key")); cache.addCachedChallenge(originalRequest.url(), challengeMap); return buildAuthenticatedRequest(originalRequest, challengeMap); } /** * Removes request body used for EKV authorization. * * @param request * unprotected request without auth token. * @return request with removed body. */ private Request buildEmptyRequest(Request request) { RequestBody body = RequestBody.create(MediaType.parse("application/json"), "{}"); if (request.method().equalsIgnoreCase("get")) { return request; } else { return request.newBuilder().method(request.method(), body).build(); } } /** * Checks if resource supports message protection. * * @param url * resource url. * @param challengeMap * the challenge map. * @return true if message protection is supported. */ private Boolean supportsMessageProtection(String url, Map<String, String> challengeMap) { if (!"true".equals(challengeMap.get("supportspop"))) { return false; } if (!url.toLowerCase().contains("/keys/")) { return false; } String[] tokens = url.split("\\?")[0].split("/"); return supportedMethods.contains(tokens[tokens.length - 1]); } /** * Extracts the authentication challenges from the challenge map and calls the * authentication callback to get the bearer token and return it. * * @param supportsPop * is resource supports pop authentication. * @param challengeMap * the challenge map. * @return AuthenticationResult with bearer token and PoP key. */ private AuthenticationResult getAuthenticationCredentials(Boolean supportsPop, Map<String, String> challengeMap) { String authorization = challengeMap.get("authorization"); if (authorization == null) { authorization = challengeMap.get("authorization_uri"); } String resource = challengeMap.get("resource"); String scope = challengeMap.get("scope"); String schema = supportsPop ? "pop" : "bearer"; return doAuthenticate(authorization, resource, scope, schema); } /** * Extracts the challenge off the authentication header. * * @param authenticateHeader * the authentication header containing all the challenges. * @param authChallengePrefix * the authentication challenge name. * @return a challenge map. */ private static Map<String, String> extractChallenge(String authenticateHeader, String authChallengePrefix) { if (!isValidChallenge(authenticateHeader, authChallengePrefix)) { return null; } authenticateHeader = authenticateHeader.toLowerCase().replace(authChallengePrefix.toLowerCase(), ""); String[] challenges = authenticateHeader.split(", "); Map<String, String> challengeMap = new HashMap<String, String>(); for (String pair : challenges) { String[] keyValue = pair.split("="); challengeMap.put(keyValue[0].replaceAll("\"", ""), keyValue[1].replaceAll("\"", "")); } return challengeMap; } /** * Verifies whether a challenge is bearer or not. * * @param authenticateHeader * the authentication header containing all the challenges. * @param authChallengePrefix * the authentication challenge name. * @return */ private static boolean isValidChallenge(String authenticateHeader, String authChallengePrefix) { if (authenticateHeader != null && !authenticateHeader.isEmpty() && authenticateHeader.toLowerCase().startsWith(authChallengePrefix.toLowerCase())) { return true; } return false; } /** * Abstract method to be implemented. * * @param authorization * Identifier of the authority, a URL. * @param resource * Identifier of the target resource that is the recipient of the * requested token, a URL. * * @param scope * The scope of the authentication request. * * @return AuthenticationResult with authorization token and PoP key. * * Answers a server challenge with a token header. * <p> * Implementations typically use ADAL to get a token, as performed in * the sample below: * </p> * * <pre> * & * public String doAuthenticate(String authorization, String resource, String scope) { * String clientId = ...; * String clientKey = ...; * AuthenticationResult token = getAccessTokenFromClientCredentials(authorization, resource, clientId, clientKey); * return token.getAccessToken();; * } * * private static AuthenticationResult getAccessTokenFromClientCredentials(String authorization, String resource, String clientId, String clientKey) { * AuthenticationContext context = null; * AuthenticationResult result = null; * ExecutorService service = null; * try { * service = Executors.newFixedThreadPool(1); * context = new AuthenticationContext(authorization, false, service); * ClientCredential credentials = new ClientCredential(clientId, clientKey); * Future&lt;AuthenticationResult&gt; future = context.acquireToken(resource, credentials, null); * result = future.get(); * } catch (Exception e) { * throw new RuntimeException(e); * } finally { * service.shutdown(); * } * * if (result == null) { * throw new RuntimeException(&quot;authentication result was null&quot;); * } * return result; * } * </pre> * * <p> * <b>Note: The client key must be securely stored. It's advised to use * two client applications - one for development and other for * production - managed by separate parties.</b> * </p> * */ public String doAuthenticate(String authorization, String resource, String scope) { return ""; } /** * Method to be implemented. * * @param authorization * Identifier of the authority, a URL. * @param resource * Identifier of the target resource that is the recipient of the * requested token, a URL. * @param scope * The scope of the authentication request. * * @param schema * Authentication schema. Can be 'pop' or 'bearer'. * * @return AuthenticationResult with authorization token and PoP key. * * Answers a server challenge with a token header. * <p> * Implementations sends POST request to receive authentication token * like in example below. ADAL currently doesn't support POP * authentication. * </p> * * <pre> * public AuthenticationResult doAuthenticate(String authorization, String resource, String scope, String schema) { * JsonWebKey clientJwk = GenerateJsonWebKey(); * JsonWebKey clientPublicJwk = GetJwkWithPublicKeyOnly(clientJwk); * String token = GetAccessToken(authorization, resource, "pop".equals(schema), clientPublicJwk); * * return new AuthenticationResult(token, clientJwk.toString()); * } * * private JsonWebKey GenerateJsonWebKey() { * final KeyPairGenerator generator = KeyPairGenerator.getInstance("RSA"); * generator.initialize(2048); * KeyPair clientRsaKeyPair = generator.generateKeyPair(); * JsonWebKey result = JsonWebKey.fromRSA(clientRsaKeyPair); * result.withKid(UUID.randomUUID().toString()); * return result; * } * * public static JsonWebKey GetJwkWithPublicKeyOnly(JsonWebKey jwk) { * KeyPair publicOnly = jwk.toRSA(false); * JsonWebKey jsonkeyPublic = JsonWebKey.fromRSA(publicOnly); * jsonkeyPublic.withKid(jwk.kid()); * jsonkeyPublic.withKeyOps(Arrays.asList(JsonWebKeyOperation.ENCRYPT, JsonWebKeyOperation.WRAP_KEY, * JsonWebKeyOperation.VERIFY)); * return jsonkeyPublic; * } * * private String GetAccessToken(String authorization, String resource, boolean supportspop, JsonWebKey jwkPublic) { * CloseableHttpClient httpclient = HttpClients.createDefault(); * HttpPost httppost = new HttpPost(authorization + "/oauth2/token"); * * * List&lt;NameValuePair&gt; params = new ArrayList&lt;NameValuePair&gt;(2); * params.add(new BasicNameValuePair("resource", resource)); * params.add(new BasicNameValuePair("response_type", "token")); * params.add(new BasicNameValuePair("grant_type", "client_credentials")); * params.add(new BasicNameValuePair("client_id", this.getApplicationId())); * params.add(new BasicNameValuePair("client_secret", this.getApplicationSecret())); * * if (supportspop) { * params.add(new BasicNameValuePair("pop_jwk", jwkPublic.toString())); * } * * httppost.setEntity(new UrlEncodedFormEntity(params, "UTF-8")); * * HttpResponse response = httpclient.execute(httppost); * HttpEntity entity = response.getEntity(); * * * String content = EntityUtils.toString(entity); * * ObjectMapper mapper = new ObjectMapper(); * authreply reply = mapper.readValue(content, authreply.class); * * return reply.access_token; * } * </pre> * * <p> * <b>Note: The client key must be securely stored. It's advised to use * two client applications - one for development and other for * production - managed by separate parties.</b> * </p> */ public AuthenticationResult doAuthenticate(String authorization, String resource, String scope, String schema) { return new AuthenticationResult(doAuthenticate(authorization, resource, scope), ""); } }
It may be my ignorance of language specifics, but these look like they're stubbed out and not implemented. Is that intentional?
IterableResponse<EventData> receive(int maximumMessageCount) { return new IterableResponse<>(Flux.empty()); }
return new IterableResponse<>(Flux.empty());
IterableResponse<EventData> receive(int maximumMessageCount) { return new IterableResponse<>(Flux.empty()); }
class EventHubConsumer implements Closeable { private final EventHubAsyncConsumer consumer; private final EventHubConsumerOptions options; EventHubConsumer(EventHubAsyncConsumer consumer, EventHubConsumerOptions options) { this.consumer = Objects.requireNonNull(consumer); this.options = Objects.requireNonNull(options); this.consumer.receive().windowTimeout(options.prefetchCount(), this.options.retry().tryTimeout()); } /** * Receives a batch of EventData from the Event Hub partition. * * @param maximumMessageCount The maximum number of messages to receive in this batch. */ /** * Receives a batch of EventData from the Event Hub partition * * @param maximumMessageCount The maximum number of messages to receive in this batch. * @param maximumWaitTime The maximum amount of time to wait to build up the requested message count for the * batch; if not specified, the default wait time specified when the consumer was created will be used. */ IterableResponse<EventData> receive(int maximumMessageCount, Duration maximumWaitTime) { return new IterableResponse<>(Flux.empty()); } /** * {@inheritDoc} */ @Override public void close() throws IOException { consumer.close(); } }
class EventHubConsumer implements Closeable { private final EventHubAsyncConsumer consumer; private final EventHubConsumerOptions options; EventHubConsumer(EventHubAsyncConsumer consumer, EventHubConsumerOptions options) { this.consumer = Objects.requireNonNull(consumer); this.options = Objects.requireNonNull(options); this.consumer.receive().windowTimeout(options.prefetchCount(), this.options.retry().tryTimeout()); } /** * Receives a batch of EventData from the Event Hub partition. * * @param maximumMessageCount The maximum number of messages to receive in this batch. * @return A set of {@link EventData} that was received. The iterable contains up to {@code maximumMessageCount} * events. */ public /** * Receives a batch of EventData from the Event Hub partition * * @param maximumMessageCount The maximum number of messages to receive in this batch. * @param maximumWaitTime The maximum amount of time to wait to build up the requested message count for the * batch; if not specified, the default wait time specified when the consumer was created will be used. * @return A set of {@link EventData} that was received. The iterable contains up to {@code maximumMessageCount} * events. */ public IterableResponse<EventData> receive(int maximumMessageCount, Duration maximumWaitTime) { return new IterableResponse<>(Flux.empty()); } /** * {@inheritDoc} */ @Override public void close() throws IOException { consumer.close(); } }
Check for `null` before using `options`.
public EventHubProducer createProducer(EventHubProducerOptions options) { final EventHubAsyncProducer producer = client.createProducer(); return new EventHubProducer(producer, options.retry().tryTimeout()); }
return new EventHubProducer(producer, options.retry().tryTimeout());
public EventHubProducer createProducer(EventHubProducerOptions options) { Objects.requireNonNull(options); final EventHubAsyncProducer producer = client.createProducer(options); final Duration tryTimeout = options.retry() != null && options.retry().tryTimeout() != null ? options.retry().tryTimeout() : defaultProducerOptions.retry().tryTimeout(); return new EventHubProducer(producer, tryTimeout); }
class EventHubClient implements Closeable { private final EventHubAsyncClient client; private final RetryOptions retry; private final EventHubProducerOptions defaultProducerOptions; private final EventHubConsumerOptions defaultConsumerOptions; EventHubClient(EventHubAsyncClient client, ConnectionOptions connectionOptions) { this.retry = connectionOptions.retry(); this.defaultProducerOptions = new EventHubProducerOptions() .retry(connectionOptions.retry()); this.defaultConsumerOptions = new EventHubConsumerOptions() .retry(connectionOptions.retry()) .scheduler(connectionOptions.scheduler()); this.client = client; } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public EventHubProperties getProperties() { return client.getProperties().block(retry.tryTimeout()); } /** * Retrieves the identifiers for all the partitions of an Event Hub. * * @return The identifiers for all partitions of an Event Hub. */ @ServiceMethod(returns = ReturnType.COLLECTION) public IterableResponse<String> getPartitionIds() { return new IterableResponse<>(client.getPartitionIds()); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * @return The information for the requested partition under the Event Hub this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public PartitionProperties getPartitionProperties(String partitionId) { return client.getPartitionProperties(partitionId).block(retry.tryTimeout()); } /** * Creates an Event Hub producer responsible for transmitting {@link EventData} to the Event Hub, grouped together * in batches. Event data is automatically routed to an available partition. * * @return A new {@link EventHubProducer}. */ public EventHubProducer createProducer() { return createProducer(defaultProducerOptions); } /** * Creates an Event Hub producer responsible for transmitting {@link EventData} to the Event Hub, grouped together * in batches. If {@link EventHubProducerOptions * events are routed to that specific partition. Otherwise, events are automatically routed to an available * partition. * * @param options The set of options to apply when creating the producer. * @return A new {@link EventHubProducer}. * @throws NullPointerException if {@code options} is {@code null}. */ /** * Creates an Event Hub consumer responsible for reading {@link EventData} from a specific Event Hub partition, as a * member of the specified consumer group, and begins reading events from the {@code eventPosition}. * * The consumer created is non-exclusive, allowing multiple consumers from the same consumer group to be actively * reading events from the partition. These non-exclusive consumers are sometimes referred to as "Non-epoch * Consumers". * * @param consumerGroup The name of the consumer group this consumer is associated with. Events are read in * the context of this group. The name of the consumer group that is created by default is {@link * EventHubAsyncClient * @param partitionId The identifier of the Event Hub partition. * @param eventPosition The position within the partition where the consumer should begin reading events. * @return A new {@link EventHubConsumer} that receives events from the partition at the given position. * @throws NullPointerException If {@code eventPosition}, or {@code options} is {@code null}. * @throws IllegalArgumentException If {@code consumerGroup} or {@code partitionId} is {@code null} or an * empty string. */ public EventHubConsumer createConsumer(String consumerGroup, String partitionId, EventPosition eventPosition) { final EventHubAsyncConsumer consumer = client.createConsumer(consumerGroup, partitionId, eventPosition); return new EventHubConsumer(consumer, defaultConsumerOptions); } /** * Creates an Event Hub consumer responsible for reading {@link EventData} from a specific Event Hub partition, as a * member of the configured consumer group, and begins reading events from the specified {@code eventPosition}. * * <p> * A consumer may be exclusive, which asserts ownership over the partition for the consumer group to ensure that * only one consumer from that group is reading the from the partition. These exclusive consumers are sometimes * referred to as "Epoch Consumers." * * A consumer may also be non-exclusive, allowing multiple consumers from the same consumer group to be actively * reading events from the partition. These non-exclusive consumers are sometimes referred to as "Non-epoch * Consumers." * * Designating a consumer as exclusive may be specified in the {@code options}, by setting {@link * EventHubConsumerOptions * non-exclusive. * </p> * * @param consumerGroup The name of the consumer group this consumer is associated with. Events are read in * the context of this group. The name of the consumer group that is created by default is {@link * EventHubAsyncClient * @param partitionId The identifier of the Event Hub partition from which events will be received. * @param eventPosition The position within the partition where the consumer should begin reading events. * @param options The set of options to apply when creating the consumer. * @return An new {@link EventHubConsumer} that receives events from the partition with all configured {@link * EventHubConsumerOptions}. * @throws NullPointerException If {@code eventPosition}, or {@code options} is {@code null}. * @throws IllegalArgumentException If {@code consumerGroup} or {@code partitionId} is {@code null} or an * empty string. */ public EventHubConsumer createConsumer(String consumerGroup, String partitionId, EventPosition eventPosition, EventHubConsumerOptions options) { final EventHubAsyncConsumer consumer = client.createConsumer(consumerGroup, partitionId, eventPosition, options); return new EventHubConsumer(consumer, options); } /** * {@inheritDoc} */ @Override public void close() { client.close(); } }
class EventHubClient implements Closeable { private final EventHubAsyncClient client; private final RetryOptions retry; private final EventHubProducerOptions defaultProducerOptions; private final EventHubConsumerOptions defaultConsumerOptions; EventHubClient(EventHubAsyncClient client, ConnectionOptions connectionOptions) { Objects.requireNonNull(connectionOptions); this.client = Objects.requireNonNull(client); this.retry = connectionOptions.retry(); this.defaultProducerOptions = new EventHubProducerOptions() .retry(connectionOptions.retry()); this.defaultConsumerOptions = new EventHubConsumerOptions() .retry(connectionOptions.retry()) .scheduler(connectionOptions.scheduler()); } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public EventHubProperties getProperties() { return client.getProperties().block(retry.tryTimeout()); } /** * Retrieves the identifiers for all the partitions of an Event Hub. * * @return The identifiers for all partitions of an Event Hub. */ @ServiceMethod(returns = ReturnType.COLLECTION) public IterableResponse<String> getPartitionIds() { return new IterableResponse<>(client.getPartitionIds()); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * @return The information for the requested partition under the Event Hub this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public PartitionProperties getPartitionProperties(String partitionId) { return client.getPartitionProperties(partitionId).block(retry.tryTimeout()); } /** * Creates an Event Hub producer responsible for transmitting {@link EventData} to the Event Hub, grouped together * in batches. Event data is automatically routed to an available partition. * * @return A new {@link EventHubProducer}. */ public EventHubProducer createProducer() { return createProducer(defaultProducerOptions); } /** * Creates an Event Hub producer responsible for transmitting {@link EventData} to the Event Hub, grouped together * in batches. If {@link EventHubProducerOptions * events are routed to that specific partition. Otherwise, events are automatically routed to an available * partition. * * @param options The set of options to apply when creating the producer. * @return A new {@link EventHubProducer}. * @throws NullPointerException if {@code options} is {@code null}. */ /** * Creates an Event Hub consumer responsible for reading {@link EventData} from a specific Event Hub partition, as a * member of the specified consumer group, and begins reading events from the {@code eventPosition}. * * The consumer created is non-exclusive, allowing multiple consumers from the same consumer group to be actively * reading events from the partition. These non-exclusive consumers are sometimes referred to as "Non-epoch * Consumers". * * @param consumerGroup The name of the consumer group this consumer is associated with. Events are read in the * context of this group. The name of the consumer group that is created by default is {@link * EventHubAsyncClient * @param partitionId The identifier of the Event Hub partition. * @param eventPosition The position within the partition where the consumer should begin reading events. * @return A new {@link EventHubConsumer} that receives events from the partition at the given position. * @throws NullPointerException If {@code eventPosition}, {@code consumerGroup}, {@code partitionId}, or {@code * options} is {@code null}. * @throws IllegalArgumentException If {@code consumerGroup} or {@code partitionId} is an empty string. */ public EventHubConsumer createConsumer(String consumerGroup, String partitionId, EventPosition eventPosition) { final EventHubAsyncConsumer consumer = client.createConsumer(consumerGroup, partitionId, eventPosition); return new EventHubConsumer(consumer, defaultConsumerOptions); } /** * Creates an Event Hub consumer responsible for reading {@link EventData} from a specific Event Hub partition, as a * member of the configured consumer group, and begins reading events from the specified {@code eventPosition}. * * <p> * A consumer may be exclusive, which asserts ownership over the partition for the consumer group to ensure that * only one consumer from that group is reading from the partition. These exclusive consumers are sometimes referred * to as "Epoch Consumers." * * A consumer may also be non-exclusive, allowing multiple consumers from the same consumer group to be actively * reading events from the partition. These non-exclusive consumers are sometimes referred to as "Non-epoch * Consumers." * * Designating a consumer as exclusive may be specified in the {@code options}, by setting {@link * EventHubConsumerOptions * non-exclusive. * </p> * * @param consumerGroup The name of the consumer group this consumer is associated with. Events are read in the * context of this group. The name of the consumer group that is created by default is {@link * EventHubAsyncClient * @param partitionId The identifier of the Event Hub partition from which events will be received. * @param eventPosition The position within the partition where the consumer should begin reading events. * @param options The set of options to apply when creating the consumer. * @return An new {@link EventHubConsumer} that receives events from the partition with all configured {@link * EventHubConsumerOptions}. * @throws NullPointerException If {@code eventPosition}, {@code consumerGroup}, {@code partitionId}, or {@code * options} is {@code null}. * @throws IllegalArgumentException If {@code consumerGroup} or {@code partitionId} is an empty string. */ public EventHubConsumer createConsumer(String consumerGroup, String partitionId, EventPosition eventPosition, EventHubConsumerOptions options) { final EventHubAsyncConsumer consumer = client.createConsumer(consumerGroup, partitionId, eventPosition, options); return new EventHubConsumer(consumer, options); } /** * {@inheritDoc} */ @Override public void close() { client.close(); } }
This is intentional. This PR is focused only on the synchronous producer for user study starting on Thursday. The synch Consumer will be implemented later.
IterableResponse<EventData> receive(int maximumMessageCount) { return new IterableResponse<>(Flux.empty()); }
return new IterableResponse<>(Flux.empty());
IterableResponse<EventData> receive(int maximumMessageCount) { return new IterableResponse<>(Flux.empty()); }
class EventHubConsumer implements Closeable { private final EventHubAsyncConsumer consumer; private final EventHubConsumerOptions options; EventHubConsumer(EventHubAsyncConsumer consumer, EventHubConsumerOptions options) { this.consumer = Objects.requireNonNull(consumer); this.options = Objects.requireNonNull(options); this.consumer.receive().windowTimeout(options.prefetchCount(), this.options.retry().tryTimeout()); } /** * Receives a batch of EventData from the Event Hub partition. * * @param maximumMessageCount The maximum number of messages to receive in this batch. */ /** * Receives a batch of EventData from the Event Hub partition * * @param maximumMessageCount The maximum number of messages to receive in this batch. * @param maximumWaitTime The maximum amount of time to wait to build up the requested message count for the * batch; if not specified, the default wait time specified when the consumer was created will be used. */ IterableResponse<EventData> receive(int maximumMessageCount, Duration maximumWaitTime) { return new IterableResponse<>(Flux.empty()); } /** * {@inheritDoc} */ @Override public void close() throws IOException { consumer.close(); } }
class EventHubConsumer implements Closeable { private final EventHubAsyncConsumer consumer; private final EventHubConsumerOptions options; EventHubConsumer(EventHubAsyncConsumer consumer, EventHubConsumerOptions options) { this.consumer = Objects.requireNonNull(consumer); this.options = Objects.requireNonNull(options); this.consumer.receive().windowTimeout(options.prefetchCount(), this.options.retry().tryTimeout()); } /** * Receives a batch of EventData from the Event Hub partition. * * @param maximumMessageCount The maximum number of messages to receive in this batch. * @return A set of {@link EventData} that was received. The iterable contains up to {@code maximumMessageCount} * events. */ public /** * Receives a batch of EventData from the Event Hub partition * * @param maximumMessageCount The maximum number of messages to receive in this batch. * @param maximumWaitTime The maximum amount of time to wait to build up the requested message count for the * batch; if not specified, the default wait time specified when the consumer was created will be used. * @return A set of {@link EventData} that was received. The iterable contains up to {@code maximumMessageCount} * events. */ public IterableResponse<EventData> receive(int maximumMessageCount, Duration maximumWaitTime) { return new IterableResponse<>(Flux.empty()); } /** * {@inheritDoc} */ @Override public void close() throws IOException { consumer.close(); } }
"application/octet-stream" mime-type is well known. I'd use `.equalsIgnoreCase()` because they are also case-insensitive. https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/MIME_types
private Mono<HttpResponse> playbackHttpResponse(final HttpRequest request) { final String incomingUrl = applyReplacementRule(request.url().toString()); final String incomingMethod = request.httpMethod().toString(); final String matchingUrl = removeHost(incomingUrl); NetworkCallRecord networkCallRecord = recordedData.findFirstAndRemoveNetworkCall(record -> record.method().equalsIgnoreCase(incomingMethod) && removeHost(record.uri()).equalsIgnoreCase(matchingUrl)); count.incrementAndGet(); if (networkCallRecord == null) { logger.warning("NOT FOUND - Method: {} URL: {}", incomingMethod, incomingUrl); logger.warning("Records requested: {}.", count); return Mono.error(new IllegalStateException("==> Unexpected request: " + incomingMethod + " " + incomingUrl)); } if (networkCallRecord.exception() != null) { throw logger.logExceptionAsWarning(Exceptions.propagate(networkCallRecord.exception().get())); } int recordStatusCode = Integer.parseInt(networkCallRecord.response().get("StatusCode")); HttpHeaders headers = new HttpHeaders(); for (Map.Entry<String, String> pair : networkCallRecord.response().entrySet()) { if (!pair.getKey().equals("StatusCode") && !pair.getKey().equals("Body")) { String rawHeader = pair.getValue(); for (Map.Entry<String, String> rule : textReplacementRules.entrySet()) { if (rule.getValue() != null) { rawHeader = rawHeader.replaceAll(rule.getKey(), rule.getValue()); } } headers.put(pair.getKey(), rawHeader); } } String rawBody = networkCallRecord.response().get("Body"); byte[] bytes = null; if (rawBody != null) { for (Map.Entry<String, String> rule : textReplacementRules.entrySet()) { if (rule.getValue() != null) { rawBody = rawBody.replaceAll(rule.getKey(), rule.getValue()); } } String contentType = networkCallRecord.response().get("Content-Type"); if (contentType != null && contentType.contains("octet-stream")) { ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); for (String piece : rawBody.substring(1, rawBody.length() - 1).split(", ")) { outputStream.write(Byte.parseByte(piece)); } bytes = outputStream.toByteArray(); } else { bytes = rawBody.getBytes(StandardCharsets.UTF_8); } if (bytes.length > 0) { headers.put("Content-Length", String.valueOf(bytes.length)); } } HttpResponse response = new MockHttpResponse(request, recordStatusCode, headers, bytes); return Mono.just(response); }
if (contentType != null && contentType.contains("octet-stream")) {
private Mono<HttpResponse> playbackHttpResponse(final HttpRequest request) { final String incomingUrl = applyReplacementRule(request.url().toString()); final String incomingMethod = request.httpMethod().toString(); final String matchingUrl = removeHost(incomingUrl); NetworkCallRecord networkCallRecord = recordedData.findFirstAndRemoveNetworkCall(record -> record.method().equalsIgnoreCase(incomingMethod) && removeHost(record.uri()).equalsIgnoreCase(matchingUrl)); count.incrementAndGet(); if (networkCallRecord == null) { logger.warning("NOT FOUND - Method: {} URL: {}", incomingMethod, incomingUrl); logger.warning("Records requested: {}.", count); return Mono.error(new IllegalStateException("==> Unexpected request: " + incomingMethod + " " + incomingUrl)); } if (networkCallRecord.exception() != null) { throw logger.logExceptionAsWarning(Exceptions.propagate(networkCallRecord.exception().get())); } int recordStatusCode = Integer.parseInt(networkCallRecord.response().get("StatusCode")); HttpHeaders headers = new HttpHeaders(); for (Map.Entry<String, String> pair : networkCallRecord.response().entrySet()) { if (!pair.getKey().equals("StatusCode") && !pair.getKey().equals("Body")) { String rawHeader = pair.getValue(); for (Map.Entry<String, String> rule : textReplacementRules.entrySet()) { if (rule.getValue() != null) { rawHeader = rawHeader.replaceAll(rule.getKey(), rule.getValue()); } } headers.put(pair.getKey(), rawHeader); } } String rawBody = networkCallRecord.response().get("Body"); byte[] bytes = null; if (rawBody != null) { for (Map.Entry<String, String> rule : textReplacementRules.entrySet()) { if (rule.getValue() != null) { rawBody = rawBody.replaceAll(rule.getKey(), rule.getValue()); } } String contentType = networkCallRecord.response().get("Content-Type"); if (contentType != null && contentType.equalsIgnoreCase("application/octet-stream")) { ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); for (String piece : rawBody.substring(1, rawBody.length() - 1).split(", ")) { outputStream.write(Byte.parseByte(piece)); } bytes = outputStream.toByteArray(); } else { bytes = rawBody.getBytes(StandardCharsets.UTF_8); } if (bytes.length > 0) { headers.put("Content-Length", String.valueOf(bytes.length)); } } HttpResponse response = new MockHttpResponse(request, recordStatusCode, headers, bytes); return Mono.just(response); }
class PlaybackClient implements HttpClient { private final ClientLogger logger = new ClientLogger(PlaybackClient.class); private final AtomicInteger count = new AtomicInteger(0); private final Map<String, String> textReplacementRules; private final RecordedData recordedData; /** * Creates a PlaybackClient that replays network calls from {@code recordedData} and replaces {@link * NetworkCallRecord * * @param recordedData The data to playback. * @param textReplacementRules A set of rules to replace text in network call responses. */ public PlaybackClient(RecordedData recordedData, Map<String, String> textReplacementRules) { Objects.requireNonNull(recordedData); this.recordedData = recordedData; this.textReplacementRules = textReplacementRules == null ? new HashMap<>() : textReplacementRules; } /** * {@inheritDoc} */ @Override public Mono<HttpResponse> send(final HttpRequest request) { return Mono.defer(() -> playbackHttpResponse(request)); } /** * {@inheritDoc} */ @Override public HttpClient proxy(Supplier<ProxyOptions> supplier) { return this; } /** * {@inheritDoc} */ @Override public HttpClient wiretap(boolean b) { return this; } /** * {@inheritDoc} */ @Override public HttpClient port(int i) { return this; } private String applyReplacementRule(String text) { for (Map.Entry<String, String> rule : textReplacementRules.entrySet()) { if (rule.getValue() != null) { text = text.replaceAll(rule.getKey(), rule.getValue()); } } return text; } private static String removeHost(String url) { UrlBuilder urlBuilder = UrlBuilder.parse(url); if (urlBuilder.query().containsKey("sig")) { urlBuilder.setQueryParameter("sig", "REDACTED"); } return String.format("%s%s", urlBuilder.path(), urlBuilder.queryString()); } }
class PlaybackClient implements HttpClient { private final ClientLogger logger = new ClientLogger(PlaybackClient.class); private final AtomicInteger count = new AtomicInteger(0); private final Map<String, String> textReplacementRules; private final RecordedData recordedData; /** * Creates a PlaybackClient that replays network calls from {@code recordedData} and replaces {@link * NetworkCallRecord * * @param recordedData The data to playback. * @param textReplacementRules A set of rules to replace text in network call responses. */ public PlaybackClient(RecordedData recordedData, Map<String, String> textReplacementRules) { Objects.requireNonNull(recordedData); this.recordedData = recordedData; this.textReplacementRules = textReplacementRules == null ? new HashMap<>() : textReplacementRules; } /** * {@inheritDoc} */ @Override public Mono<HttpResponse> send(final HttpRequest request) { return Mono.defer(() -> playbackHttpResponse(request)); } /** * {@inheritDoc} */ @Override public HttpClient proxy(Supplier<ProxyOptions> supplier) { return this; } /** * {@inheritDoc} */ @Override public HttpClient wiretap(boolean b) { return this; } /** * {@inheritDoc} */ @Override public HttpClient port(int i) { return this; } private String applyReplacementRule(String text) { for (Map.Entry<String, String> rule : textReplacementRules.entrySet()) { if (rule.getValue() != null) { text = text.replaceAll(rule.getKey(), rule.getValue()); } } return text; } private static String removeHost(String url) { UrlBuilder urlBuilder = UrlBuilder.parse(url); if (urlBuilder.query().containsKey("sig")) { urlBuilder.setQueryParameter("sig", "REDACTED"); } return String.format("%s%s", urlBuilder.path(), urlBuilder.queryString()); } }
in the RFC, headers are case insensitive. It is by convention that you see "Content-Type" rather than "content-type". But they amount to the same thing.
private Mono<HttpResponse> playbackHttpResponse(final HttpRequest request) { final String incomingUrl = applyReplacementRule(request.url().toString()); final String incomingMethod = request.httpMethod().toString(); final String matchingUrl = removeHost(incomingUrl); NetworkCallRecord networkCallRecord = recordedData.findFirstAndRemoveNetworkCall(record -> record.method().equalsIgnoreCase(incomingMethod) && removeHost(record.uri()).equalsIgnoreCase(matchingUrl)); count.incrementAndGet(); if (networkCallRecord == null) { logger.warning("NOT FOUND - Method: {} URL: {}", incomingMethod, incomingUrl); logger.warning("Records requested: {}.", count); return Mono.error(new IllegalStateException("==> Unexpected request: " + incomingMethod + " " + incomingUrl)); } if (networkCallRecord.exception() != null) { throw logger.logExceptionAsWarning(Exceptions.propagate(networkCallRecord.exception().get())); } int recordStatusCode = Integer.parseInt(networkCallRecord.response().get("StatusCode")); HttpHeaders headers = new HttpHeaders(); for (Map.Entry<String, String> pair : networkCallRecord.response().entrySet()) { if (!pair.getKey().equals("StatusCode") && !pair.getKey().equals("Body")) { String rawHeader = pair.getValue(); for (Map.Entry<String, String> rule : textReplacementRules.entrySet()) { if (rule.getValue() != null) { rawHeader = rawHeader.replaceAll(rule.getKey(), rule.getValue()); } } headers.put(pair.getKey(), rawHeader); } } String rawBody = networkCallRecord.response().get("Body"); byte[] bytes = null; if (rawBody != null) { for (Map.Entry<String, String> rule : textReplacementRules.entrySet()) { if (rule.getValue() != null) { rawBody = rawBody.replaceAll(rule.getKey(), rule.getValue()); } } String contentType = networkCallRecord.response().get("Content-Type"); if (contentType != null && contentType.contains("octet-stream")) { ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); for (String piece : rawBody.substring(1, rawBody.length() - 1).split(", ")) { outputStream.write(Byte.parseByte(piece)); } bytes = outputStream.toByteArray(); } else { bytes = rawBody.getBytes(StandardCharsets.UTF_8); } if (bytes.length > 0) { headers.put("Content-Length", String.valueOf(bytes.length)); } } HttpResponse response = new MockHttpResponse(request, recordStatusCode, headers, bytes); return Mono.just(response); }
if (contentType != null && contentType.contains("octet-stream")) {
private Mono<HttpResponse> playbackHttpResponse(final HttpRequest request) { final String incomingUrl = applyReplacementRule(request.url().toString()); final String incomingMethod = request.httpMethod().toString(); final String matchingUrl = removeHost(incomingUrl); NetworkCallRecord networkCallRecord = recordedData.findFirstAndRemoveNetworkCall(record -> record.method().equalsIgnoreCase(incomingMethod) && removeHost(record.uri()).equalsIgnoreCase(matchingUrl)); count.incrementAndGet(); if (networkCallRecord == null) { logger.warning("NOT FOUND - Method: {} URL: {}", incomingMethod, incomingUrl); logger.warning("Records requested: {}.", count); return Mono.error(new IllegalStateException("==> Unexpected request: " + incomingMethod + " " + incomingUrl)); } if (networkCallRecord.exception() != null) { throw logger.logExceptionAsWarning(Exceptions.propagate(networkCallRecord.exception().get())); } int recordStatusCode = Integer.parseInt(networkCallRecord.response().get("StatusCode")); HttpHeaders headers = new HttpHeaders(); for (Map.Entry<String, String> pair : networkCallRecord.response().entrySet()) { if (!pair.getKey().equals("StatusCode") && !pair.getKey().equals("Body")) { String rawHeader = pair.getValue(); for (Map.Entry<String, String> rule : textReplacementRules.entrySet()) { if (rule.getValue() != null) { rawHeader = rawHeader.replaceAll(rule.getKey(), rule.getValue()); } } headers.put(pair.getKey(), rawHeader); } } String rawBody = networkCallRecord.response().get("Body"); byte[] bytes = null; if (rawBody != null) { for (Map.Entry<String, String> rule : textReplacementRules.entrySet()) { if (rule.getValue() != null) { rawBody = rawBody.replaceAll(rule.getKey(), rule.getValue()); } } String contentType = networkCallRecord.response().get("Content-Type"); if (contentType != null && contentType.equalsIgnoreCase("application/octet-stream")) { ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); for (String piece : rawBody.substring(1, rawBody.length() - 1).split(", ")) { outputStream.write(Byte.parseByte(piece)); } bytes = outputStream.toByteArray(); } else { bytes = rawBody.getBytes(StandardCharsets.UTF_8); } if (bytes.length > 0) { headers.put("Content-Length", String.valueOf(bytes.length)); } } HttpResponse response = new MockHttpResponse(request, recordStatusCode, headers, bytes); return Mono.just(response); }
class PlaybackClient implements HttpClient { private final ClientLogger logger = new ClientLogger(PlaybackClient.class); private final AtomicInteger count = new AtomicInteger(0); private final Map<String, String> textReplacementRules; private final RecordedData recordedData; /** * Creates a PlaybackClient that replays network calls from {@code recordedData} and replaces {@link * NetworkCallRecord * * @param recordedData The data to playback. * @param textReplacementRules A set of rules to replace text in network call responses. */ public PlaybackClient(RecordedData recordedData, Map<String, String> textReplacementRules) { Objects.requireNonNull(recordedData); this.recordedData = recordedData; this.textReplacementRules = textReplacementRules == null ? new HashMap<>() : textReplacementRules; } /** * {@inheritDoc} */ @Override public Mono<HttpResponse> send(final HttpRequest request) { return Mono.defer(() -> playbackHttpResponse(request)); } /** * {@inheritDoc} */ @Override public HttpClient proxy(Supplier<ProxyOptions> supplier) { return this; } /** * {@inheritDoc} */ @Override public HttpClient wiretap(boolean b) { return this; } /** * {@inheritDoc} */ @Override public HttpClient port(int i) { return this; } private String applyReplacementRule(String text) { for (Map.Entry<String, String> rule : textReplacementRules.entrySet()) { if (rule.getValue() != null) { text = text.replaceAll(rule.getKey(), rule.getValue()); } } return text; } private static String removeHost(String url) { UrlBuilder urlBuilder = UrlBuilder.parse(url); if (urlBuilder.query().containsKey("sig")) { urlBuilder.setQueryParameter("sig", "REDACTED"); } return String.format("%s%s", urlBuilder.path(), urlBuilder.queryString()); } }
class PlaybackClient implements HttpClient { private final ClientLogger logger = new ClientLogger(PlaybackClient.class); private final AtomicInteger count = new AtomicInteger(0); private final Map<String, String> textReplacementRules; private final RecordedData recordedData; /** * Creates a PlaybackClient that replays network calls from {@code recordedData} and replaces {@link * NetworkCallRecord * * @param recordedData The data to playback. * @param textReplacementRules A set of rules to replace text in network call responses. */ public PlaybackClient(RecordedData recordedData, Map<String, String> textReplacementRules) { Objects.requireNonNull(recordedData); this.recordedData = recordedData; this.textReplacementRules = textReplacementRules == null ? new HashMap<>() : textReplacementRules; } /** * {@inheritDoc} */ @Override public Mono<HttpResponse> send(final HttpRequest request) { return Mono.defer(() -> playbackHttpResponse(request)); } /** * {@inheritDoc} */ @Override public HttpClient proxy(Supplier<ProxyOptions> supplier) { return this; } /** * {@inheritDoc} */ @Override public HttpClient wiretap(boolean b) { return this; } /** * {@inheritDoc} */ @Override public HttpClient port(int i) { return this; } private String applyReplacementRule(String text) { for (Map.Entry<String, String> rule : textReplacementRules.entrySet()) { if (rule.getValue() != null) { text = text.replaceAll(rule.getKey(), rule.getValue()); } } return text; } private static String removeHost(String url) { UrlBuilder urlBuilder = UrlBuilder.parse(url); if (urlBuilder.query().containsKey("sig")) { urlBuilder.setQueryParameter("sig", "REDACTED"); } return String.format("%s%s", urlBuilder.path(), urlBuilder.queryString()); } }
Playback client is not validator. Is it better to ignore the case for all the hard-code string checking?
private Mono<HttpResponse> playbackHttpResponse(final HttpRequest request) { final String incomingUrl = applyReplacementRule(request.url().toString()); final String incomingMethod = request.httpMethod().toString(); final String matchingUrl = removeHost(incomingUrl); NetworkCallRecord networkCallRecord = recordedData.findFirstAndRemoveNetworkCall(record -> record.method().equalsIgnoreCase(incomingMethod) && removeHost(record.uri()).equalsIgnoreCase(matchingUrl)); count.incrementAndGet(); if (networkCallRecord == null) { logger.warning("NOT FOUND - Method: {} URL: {}", incomingMethod, incomingUrl); logger.warning("Records requested: {}.", count); return Mono.error(new IllegalStateException("==> Unexpected request: " + incomingMethod + " " + incomingUrl)); } if (networkCallRecord.exception() != null) { throw logger.logExceptionAsWarning(Exceptions.propagate(networkCallRecord.exception().get())); } int recordStatusCode = Integer.parseInt(networkCallRecord.response().get("StatusCode")); HttpHeaders headers = new HttpHeaders(); for (Map.Entry<String, String> pair : networkCallRecord.response().entrySet()) { if (!pair.getKey().equals("StatusCode") && !pair.getKey().equals("Body")) { String rawHeader = pair.getValue(); for (Map.Entry<String, String> rule : textReplacementRules.entrySet()) { if (rule.getValue() != null) { rawHeader = rawHeader.replaceAll(rule.getKey(), rule.getValue()); } } headers.put(pair.getKey(), rawHeader); } } String rawBody = networkCallRecord.response().get("Body"); byte[] bytes = null; if (rawBody != null) { for (Map.Entry<String, String> rule : textReplacementRules.entrySet()) { if (rule.getValue() != null) { rawBody = rawBody.replaceAll(rule.getKey(), rule.getValue()); } } String contentType = networkCallRecord.response().get("Content-Type"); if (contentType != null && contentType.contains("octet-stream")) { ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); for (String piece : rawBody.substring(1, rawBody.length() - 1).split(", ")) { outputStream.write(Byte.parseByte(piece)); } bytes = outputStream.toByteArray(); } else { bytes = rawBody.getBytes(StandardCharsets.UTF_8); } if (bytes.length > 0) { headers.put("Content-Length", String.valueOf(bytes.length)); } } HttpResponse response = new MockHttpResponse(request, recordStatusCode, headers, bytes); return Mono.just(response); }
if (contentType != null && contentType.contains("octet-stream")) {
private Mono<HttpResponse> playbackHttpResponse(final HttpRequest request) { final String incomingUrl = applyReplacementRule(request.url().toString()); final String incomingMethod = request.httpMethod().toString(); final String matchingUrl = removeHost(incomingUrl); NetworkCallRecord networkCallRecord = recordedData.findFirstAndRemoveNetworkCall(record -> record.method().equalsIgnoreCase(incomingMethod) && removeHost(record.uri()).equalsIgnoreCase(matchingUrl)); count.incrementAndGet(); if (networkCallRecord == null) { logger.warning("NOT FOUND - Method: {} URL: {}", incomingMethod, incomingUrl); logger.warning("Records requested: {}.", count); return Mono.error(new IllegalStateException("==> Unexpected request: " + incomingMethod + " " + incomingUrl)); } if (networkCallRecord.exception() != null) { throw logger.logExceptionAsWarning(Exceptions.propagate(networkCallRecord.exception().get())); } int recordStatusCode = Integer.parseInt(networkCallRecord.response().get("StatusCode")); HttpHeaders headers = new HttpHeaders(); for (Map.Entry<String, String> pair : networkCallRecord.response().entrySet()) { if (!pair.getKey().equals("StatusCode") && !pair.getKey().equals("Body")) { String rawHeader = pair.getValue(); for (Map.Entry<String, String> rule : textReplacementRules.entrySet()) { if (rule.getValue() != null) { rawHeader = rawHeader.replaceAll(rule.getKey(), rule.getValue()); } } headers.put(pair.getKey(), rawHeader); } } String rawBody = networkCallRecord.response().get("Body"); byte[] bytes = null; if (rawBody != null) { for (Map.Entry<String, String> rule : textReplacementRules.entrySet()) { if (rule.getValue() != null) { rawBody = rawBody.replaceAll(rule.getKey(), rule.getValue()); } } String contentType = networkCallRecord.response().get("Content-Type"); if (contentType != null && contentType.equalsIgnoreCase("application/octet-stream")) { ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); for (String piece : rawBody.substring(1, rawBody.length() - 1).split(", ")) { outputStream.write(Byte.parseByte(piece)); } bytes = outputStream.toByteArray(); } else { bytes = rawBody.getBytes(StandardCharsets.UTF_8); } if (bytes.length > 0) { headers.put("Content-Length", String.valueOf(bytes.length)); } } HttpResponse response = new MockHttpResponse(request, recordStatusCode, headers, bytes); return Mono.just(response); }
class PlaybackClient implements HttpClient { private final ClientLogger logger = new ClientLogger(PlaybackClient.class); private final AtomicInteger count = new AtomicInteger(0); private final Map<String, String> textReplacementRules; private final RecordedData recordedData; /** * Creates a PlaybackClient that replays network calls from {@code recordedData} and replaces {@link * NetworkCallRecord * * @param recordedData The data to playback. * @param textReplacementRules A set of rules to replace text in network call responses. */ public PlaybackClient(RecordedData recordedData, Map<String, String> textReplacementRules) { Objects.requireNonNull(recordedData); this.recordedData = recordedData; this.textReplacementRules = textReplacementRules == null ? new HashMap<>() : textReplacementRules; } /** * {@inheritDoc} */ @Override public Mono<HttpResponse> send(final HttpRequest request) { return Mono.defer(() -> playbackHttpResponse(request)); } /** * {@inheritDoc} */ @Override public HttpClient proxy(Supplier<ProxyOptions> supplier) { return this; } /** * {@inheritDoc} */ @Override public HttpClient wiretap(boolean b) { return this; } /** * {@inheritDoc} */ @Override public HttpClient port(int i) { return this; } private String applyReplacementRule(String text) { for (Map.Entry<String, String> rule : textReplacementRules.entrySet()) { if (rule.getValue() != null) { text = text.replaceAll(rule.getKey(), rule.getValue()); } } return text; } private static String removeHost(String url) { UrlBuilder urlBuilder = UrlBuilder.parse(url); if (urlBuilder.query().containsKey("sig")) { urlBuilder.setQueryParameter("sig", "REDACTED"); } return String.format("%s%s", urlBuilder.path(), urlBuilder.queryString()); } }
class PlaybackClient implements HttpClient { private final ClientLogger logger = new ClientLogger(PlaybackClient.class); private final AtomicInteger count = new AtomicInteger(0); private final Map<String, String> textReplacementRules; private final RecordedData recordedData; /** * Creates a PlaybackClient that replays network calls from {@code recordedData} and replaces {@link * NetworkCallRecord * * @param recordedData The data to playback. * @param textReplacementRules A set of rules to replace text in network call responses. */ public PlaybackClient(RecordedData recordedData, Map<String, String> textReplacementRules) { Objects.requireNonNull(recordedData); this.recordedData = recordedData; this.textReplacementRules = textReplacementRules == null ? new HashMap<>() : textReplacementRules; } /** * {@inheritDoc} */ @Override public Mono<HttpResponse> send(final HttpRequest request) { return Mono.defer(() -> playbackHttpResponse(request)); } /** * {@inheritDoc} */ @Override public HttpClient proxy(Supplier<ProxyOptions> supplier) { return this; } /** * {@inheritDoc} */ @Override public HttpClient wiretap(boolean b) { return this; } /** * {@inheritDoc} */ @Override public HttpClient port(int i) { return this; } private String applyReplacementRule(String text) { for (Map.Entry<String, String> rule : textReplacementRules.entrySet()) { if (rule.getValue() != null) { text = text.replaceAll(rule.getKey(), rule.getValue()); } } return text; } private static String removeHost(String url) { UrlBuilder urlBuilder = UrlBuilder.parse(url); if (urlBuilder.query().containsKey("sig")) { urlBuilder.setQueryParameter("sig", "REDACTED"); } return String.format("%s%s", urlBuilder.path(), urlBuilder.queryString()); } }
RecordNetworkCallPolicy encounters the same issues. I added a comment about that.
private Mono<HttpResponse> playbackHttpResponse(final HttpRequest request) { final String incomingUrl = applyReplacementRule(request.url().toString()); final String incomingMethod = request.httpMethod().toString(); final String matchingUrl = removeHost(incomingUrl); NetworkCallRecord networkCallRecord = recordedData.findFirstAndRemoveNetworkCall(record -> record.method().equalsIgnoreCase(incomingMethod) && removeHost(record.uri()).equalsIgnoreCase(matchingUrl)); count.incrementAndGet(); if (networkCallRecord == null) { logger.warning("NOT FOUND - Method: {} URL: {}", incomingMethod, incomingUrl); logger.warning("Records requested: {}.", count); return Mono.error(new IllegalStateException("==> Unexpected request: " + incomingMethod + " " + incomingUrl)); } if (networkCallRecord.exception() != null) { throw logger.logExceptionAsWarning(Exceptions.propagate(networkCallRecord.exception().get())); } int recordStatusCode = Integer.parseInt(networkCallRecord.response().get("StatusCode")); HttpHeaders headers = new HttpHeaders(); for (Map.Entry<String, String> pair : networkCallRecord.response().entrySet()) { if (!pair.getKey().equals("StatusCode") && !pair.getKey().equals("Body")) { String rawHeader = pair.getValue(); for (Map.Entry<String, String> rule : textReplacementRules.entrySet()) { if (rule.getValue() != null) { rawHeader = rawHeader.replaceAll(rule.getKey(), rule.getValue()); } } headers.put(pair.getKey(), rawHeader); } } String rawBody = networkCallRecord.response().get("Body"); byte[] bytes = null; if (rawBody != null) { for (Map.Entry<String, String> rule : textReplacementRules.entrySet()) { if (rule.getValue() != null) { rawBody = rawBody.replaceAll(rule.getKey(), rule.getValue()); } } String contentType = networkCallRecord.response().get("Content-Type"); if (contentType != null && contentType.contains("octet-stream")) { ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); for (String piece : rawBody.substring(1, rawBody.length() - 1).split(", ")) { outputStream.write(Byte.parseByte(piece)); } bytes = outputStream.toByteArray(); } else { bytes = rawBody.getBytes(StandardCharsets.UTF_8); } if (bytes.length > 0) { headers.put("Content-Length", String.valueOf(bytes.length)); } } HttpResponse response = new MockHttpResponse(request, recordStatusCode, headers, bytes); return Mono.just(response); }
if (contentType != null && contentType.contains("octet-stream")) {
private Mono<HttpResponse> playbackHttpResponse(final HttpRequest request) { final String incomingUrl = applyReplacementRule(request.url().toString()); final String incomingMethod = request.httpMethod().toString(); final String matchingUrl = removeHost(incomingUrl); NetworkCallRecord networkCallRecord = recordedData.findFirstAndRemoveNetworkCall(record -> record.method().equalsIgnoreCase(incomingMethod) && removeHost(record.uri()).equalsIgnoreCase(matchingUrl)); count.incrementAndGet(); if (networkCallRecord == null) { logger.warning("NOT FOUND - Method: {} URL: {}", incomingMethod, incomingUrl); logger.warning("Records requested: {}.", count); return Mono.error(new IllegalStateException("==> Unexpected request: " + incomingMethod + " " + incomingUrl)); } if (networkCallRecord.exception() != null) { throw logger.logExceptionAsWarning(Exceptions.propagate(networkCallRecord.exception().get())); } int recordStatusCode = Integer.parseInt(networkCallRecord.response().get("StatusCode")); HttpHeaders headers = new HttpHeaders(); for (Map.Entry<String, String> pair : networkCallRecord.response().entrySet()) { if (!pair.getKey().equals("StatusCode") && !pair.getKey().equals("Body")) { String rawHeader = pair.getValue(); for (Map.Entry<String, String> rule : textReplacementRules.entrySet()) { if (rule.getValue() != null) { rawHeader = rawHeader.replaceAll(rule.getKey(), rule.getValue()); } } headers.put(pair.getKey(), rawHeader); } } String rawBody = networkCallRecord.response().get("Body"); byte[] bytes = null; if (rawBody != null) { for (Map.Entry<String, String> rule : textReplacementRules.entrySet()) { if (rule.getValue() != null) { rawBody = rawBody.replaceAll(rule.getKey(), rule.getValue()); } } String contentType = networkCallRecord.response().get("Content-Type"); if (contentType != null && contentType.equalsIgnoreCase("application/octet-stream")) { ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); for (String piece : rawBody.substring(1, rawBody.length() - 1).split(", ")) { outputStream.write(Byte.parseByte(piece)); } bytes = outputStream.toByteArray(); } else { bytes = rawBody.getBytes(StandardCharsets.UTF_8); } if (bytes.length > 0) { headers.put("Content-Length", String.valueOf(bytes.length)); } } HttpResponse response = new MockHttpResponse(request, recordStatusCode, headers, bytes); return Mono.just(response); }
class PlaybackClient implements HttpClient { private final ClientLogger logger = new ClientLogger(PlaybackClient.class); private final AtomicInteger count = new AtomicInteger(0); private final Map<String, String> textReplacementRules; private final RecordedData recordedData; /** * Creates a PlaybackClient that replays network calls from {@code recordedData} and replaces {@link * NetworkCallRecord * * @param recordedData The data to playback. * @param textReplacementRules A set of rules to replace text in network call responses. */ public PlaybackClient(RecordedData recordedData, Map<String, String> textReplacementRules) { Objects.requireNonNull(recordedData); this.recordedData = recordedData; this.textReplacementRules = textReplacementRules == null ? new HashMap<>() : textReplacementRules; } /** * {@inheritDoc} */ @Override public Mono<HttpResponse> send(final HttpRequest request) { return Mono.defer(() -> playbackHttpResponse(request)); } /** * {@inheritDoc} */ @Override public HttpClient proxy(Supplier<ProxyOptions> supplier) { return this; } /** * {@inheritDoc} */ @Override public HttpClient wiretap(boolean b) { return this; } /** * {@inheritDoc} */ @Override public HttpClient port(int i) { return this; } private String applyReplacementRule(String text) { for (Map.Entry<String, String> rule : textReplacementRules.entrySet()) { if (rule.getValue() != null) { text = text.replaceAll(rule.getKey(), rule.getValue()); } } return text; } private static String removeHost(String url) { UrlBuilder urlBuilder = UrlBuilder.parse(url); if (urlBuilder.query().containsKey("sig")) { urlBuilder.setQueryParameter("sig", "REDACTED"); } return String.format("%s%s", urlBuilder.path(), urlBuilder.queryString()); } }
class PlaybackClient implements HttpClient { private final ClientLogger logger = new ClientLogger(PlaybackClient.class); private final AtomicInteger count = new AtomicInteger(0); private final Map<String, String> textReplacementRules; private final RecordedData recordedData; /** * Creates a PlaybackClient that replays network calls from {@code recordedData} and replaces {@link * NetworkCallRecord * * @param recordedData The data to playback. * @param textReplacementRules A set of rules to replace text in network call responses. */ public PlaybackClient(RecordedData recordedData, Map<String, String> textReplacementRules) { Objects.requireNonNull(recordedData); this.recordedData = recordedData; this.textReplacementRules = textReplacementRules == null ? new HashMap<>() : textReplacementRules; } /** * {@inheritDoc} */ @Override public Mono<HttpResponse> send(final HttpRequest request) { return Mono.defer(() -> playbackHttpResponse(request)); } /** * {@inheritDoc} */ @Override public HttpClient proxy(Supplier<ProxyOptions> supplier) { return this; } /** * {@inheritDoc} */ @Override public HttpClient wiretap(boolean b) { return this; } /** * {@inheritDoc} */ @Override public HttpClient port(int i) { return this; } private String applyReplacementRule(String text) { for (Map.Entry<String, String> rule : textReplacementRules.entrySet()) { if (rule.getValue() != null) { text = text.replaceAll(rule.getKey(), rule.getValue()); } } return text; } private static String removeHost(String url) { UrlBuilder urlBuilder = UrlBuilder.parse(url); if (urlBuilder.query().containsKey("sig")) { urlBuilder.setQueryParameter("sig", "REDACTED"); } return String.format("%s%s", urlBuilder.path(), urlBuilder.queryString()); } }
While RFC headers are case-insensitive the data structure, `Map`, we use to store this information in playback isn't. This is another cleanup we should do to either use `HttpHeaders` or a case-insensitive `Map` implementation.
private Mono<HttpResponse> playbackHttpResponse(final HttpRequest request) { final String incomingUrl = applyReplacementRule(request.url().toString()); final String incomingMethod = request.httpMethod().toString(); final String matchingUrl = removeHost(incomingUrl); NetworkCallRecord networkCallRecord = recordedData.findFirstAndRemoveNetworkCall(record -> record.method().equalsIgnoreCase(incomingMethod) && removeHost(record.uri()).equalsIgnoreCase(matchingUrl)); count.incrementAndGet(); if (networkCallRecord == null) { logger.warning("NOT FOUND - Method: {} URL: {}", incomingMethod, incomingUrl); logger.warning("Records requested: {}.", count); return Mono.error(new IllegalStateException("==> Unexpected request: " + incomingMethod + " " + incomingUrl)); } if (networkCallRecord.exception() != null) { throw logger.logExceptionAsWarning(Exceptions.propagate(networkCallRecord.exception().get())); } int recordStatusCode = Integer.parseInt(networkCallRecord.response().get("StatusCode")); HttpHeaders headers = new HttpHeaders(); for (Map.Entry<String, String> pair : networkCallRecord.response().entrySet()) { if (!pair.getKey().equals("StatusCode") && !pair.getKey().equals("Body")) { String rawHeader = pair.getValue(); for (Map.Entry<String, String> rule : textReplacementRules.entrySet()) { if (rule.getValue() != null) { rawHeader = rawHeader.replaceAll(rule.getKey(), rule.getValue()); } } headers.put(pair.getKey(), rawHeader); } } String rawBody = networkCallRecord.response().get("Body"); byte[] bytes = null; if (rawBody != null) { for (Map.Entry<String, String> rule : textReplacementRules.entrySet()) { if (rule.getValue() != null) { rawBody = rawBody.replaceAll(rule.getKey(), rule.getValue()); } } String contentType = networkCallRecord.response().get("Content-Type"); if (contentType != null && contentType.contains("octet-stream")) { ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); for (String piece : rawBody.substring(1, rawBody.length() - 1).split(", ")) { outputStream.write(Byte.parseByte(piece)); } bytes = outputStream.toByteArray(); } else { bytes = rawBody.getBytes(StandardCharsets.UTF_8); } if (bytes.length > 0) { headers.put("Content-Length", String.valueOf(bytes.length)); } } HttpResponse response = new MockHttpResponse(request, recordStatusCode, headers, bytes); return Mono.just(response); }
if (contentType != null && contentType.contains("octet-stream")) {
private Mono<HttpResponse> playbackHttpResponse(final HttpRequest request) { final String incomingUrl = applyReplacementRule(request.url().toString()); final String incomingMethod = request.httpMethod().toString(); final String matchingUrl = removeHost(incomingUrl); NetworkCallRecord networkCallRecord = recordedData.findFirstAndRemoveNetworkCall(record -> record.method().equalsIgnoreCase(incomingMethod) && removeHost(record.uri()).equalsIgnoreCase(matchingUrl)); count.incrementAndGet(); if (networkCallRecord == null) { logger.warning("NOT FOUND - Method: {} URL: {}", incomingMethod, incomingUrl); logger.warning("Records requested: {}.", count); return Mono.error(new IllegalStateException("==> Unexpected request: " + incomingMethod + " " + incomingUrl)); } if (networkCallRecord.exception() != null) { throw logger.logExceptionAsWarning(Exceptions.propagate(networkCallRecord.exception().get())); } int recordStatusCode = Integer.parseInt(networkCallRecord.response().get("StatusCode")); HttpHeaders headers = new HttpHeaders(); for (Map.Entry<String, String> pair : networkCallRecord.response().entrySet()) { if (!pair.getKey().equals("StatusCode") && !pair.getKey().equals("Body")) { String rawHeader = pair.getValue(); for (Map.Entry<String, String> rule : textReplacementRules.entrySet()) { if (rule.getValue() != null) { rawHeader = rawHeader.replaceAll(rule.getKey(), rule.getValue()); } } headers.put(pair.getKey(), rawHeader); } } String rawBody = networkCallRecord.response().get("Body"); byte[] bytes = null; if (rawBody != null) { for (Map.Entry<String, String> rule : textReplacementRules.entrySet()) { if (rule.getValue() != null) { rawBody = rawBody.replaceAll(rule.getKey(), rule.getValue()); } } String contentType = networkCallRecord.response().get("Content-Type"); if (contentType != null && contentType.equalsIgnoreCase("application/octet-stream")) { ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); for (String piece : rawBody.substring(1, rawBody.length() - 1).split(", ")) { outputStream.write(Byte.parseByte(piece)); } bytes = outputStream.toByteArray(); } else { bytes = rawBody.getBytes(StandardCharsets.UTF_8); } if (bytes.length > 0) { headers.put("Content-Length", String.valueOf(bytes.length)); } } HttpResponse response = new MockHttpResponse(request, recordStatusCode, headers, bytes); return Mono.just(response); }
class PlaybackClient implements HttpClient { private final ClientLogger logger = new ClientLogger(PlaybackClient.class); private final AtomicInteger count = new AtomicInteger(0); private final Map<String, String> textReplacementRules; private final RecordedData recordedData; /** * Creates a PlaybackClient that replays network calls from {@code recordedData} and replaces {@link * NetworkCallRecord * * @param recordedData The data to playback. * @param textReplacementRules A set of rules to replace text in network call responses. */ public PlaybackClient(RecordedData recordedData, Map<String, String> textReplacementRules) { Objects.requireNonNull(recordedData); this.recordedData = recordedData; this.textReplacementRules = textReplacementRules == null ? new HashMap<>() : textReplacementRules; } /** * {@inheritDoc} */ @Override public Mono<HttpResponse> send(final HttpRequest request) { return Mono.defer(() -> playbackHttpResponse(request)); } /** * {@inheritDoc} */ @Override public HttpClient proxy(Supplier<ProxyOptions> supplier) { return this; } /** * {@inheritDoc} */ @Override public HttpClient wiretap(boolean b) { return this; } /** * {@inheritDoc} */ @Override public HttpClient port(int i) { return this; } private String applyReplacementRule(String text) { for (Map.Entry<String, String> rule : textReplacementRules.entrySet()) { if (rule.getValue() != null) { text = text.replaceAll(rule.getKey(), rule.getValue()); } } return text; } private static String removeHost(String url) { UrlBuilder urlBuilder = UrlBuilder.parse(url); if (urlBuilder.query().containsKey("sig")) { urlBuilder.setQueryParameter("sig", "REDACTED"); } return String.format("%s%s", urlBuilder.path(), urlBuilder.queryString()); } }
class PlaybackClient implements HttpClient { private final ClientLogger logger = new ClientLogger(PlaybackClient.class); private final AtomicInteger count = new AtomicInteger(0); private final Map<String, String> textReplacementRules; private final RecordedData recordedData; /** * Creates a PlaybackClient that replays network calls from {@code recordedData} and replaces {@link * NetworkCallRecord * * @param recordedData The data to playback. * @param textReplacementRules A set of rules to replace text in network call responses. */ public PlaybackClient(RecordedData recordedData, Map<String, String> textReplacementRules) { Objects.requireNonNull(recordedData); this.recordedData = recordedData; this.textReplacementRules = textReplacementRules == null ? new HashMap<>() : textReplacementRules; } /** * {@inheritDoc} */ @Override public Mono<HttpResponse> send(final HttpRequest request) { return Mono.defer(() -> playbackHttpResponse(request)); } /** * {@inheritDoc} */ @Override public HttpClient proxy(Supplier<ProxyOptions> supplier) { return this; } /** * {@inheritDoc} */ @Override public HttpClient wiretap(boolean b) { return this; } /** * {@inheritDoc} */ @Override public HttpClient port(int i) { return this; } private String applyReplacementRule(String text) { for (Map.Entry<String, String> rule : textReplacementRules.entrySet()) { if (rule.getValue() != null) { text = text.replaceAll(rule.getKey(), rule.getValue()); } } return text; } private static String removeHost(String url) { UrlBuilder urlBuilder = UrlBuilder.parse(url); if (urlBuilder.query().containsKey("sig")) { urlBuilder.setQueryParameter("sig", "REDACTED"); } return String.format("%s%s", urlBuilder.path(), urlBuilder.queryString()); } }
SO has revealed that there is a way to have case insensitive key comparison. One thing that .NET provides over HashMap. https://stackoverflow.com/a/22336599
private Mono<HttpResponse> playbackHttpResponse(final HttpRequest request) { final String incomingUrl = applyReplacementRule(request.url().toString()); final String incomingMethod = request.httpMethod().toString(); final String matchingUrl = removeHost(incomingUrl); NetworkCallRecord networkCallRecord = recordedData.findFirstAndRemoveNetworkCall(record -> record.method().equalsIgnoreCase(incomingMethod) && removeHost(record.uri()).equalsIgnoreCase(matchingUrl)); count.incrementAndGet(); if (networkCallRecord == null) { logger.warning("NOT FOUND - Method: {} URL: {}", incomingMethod, incomingUrl); logger.warning("Records requested: {}.", count); return Mono.error(new IllegalStateException("==> Unexpected request: " + incomingMethod + " " + incomingUrl)); } if (networkCallRecord.exception() != null) { throw logger.logExceptionAsWarning(Exceptions.propagate(networkCallRecord.exception().get())); } int recordStatusCode = Integer.parseInt(networkCallRecord.response().get("StatusCode")); HttpHeaders headers = new HttpHeaders(); for (Map.Entry<String, String> pair : networkCallRecord.response().entrySet()) { if (!pair.getKey().equals("StatusCode") && !pair.getKey().equals("Body")) { String rawHeader = pair.getValue(); for (Map.Entry<String, String> rule : textReplacementRules.entrySet()) { if (rule.getValue() != null) { rawHeader = rawHeader.replaceAll(rule.getKey(), rule.getValue()); } } headers.put(pair.getKey(), rawHeader); } } String rawBody = networkCallRecord.response().get("Body"); byte[] bytes = null; if (rawBody != null) { for (Map.Entry<String, String> rule : textReplacementRules.entrySet()) { if (rule.getValue() != null) { rawBody = rawBody.replaceAll(rule.getKey(), rule.getValue()); } } String contentType = networkCallRecord.response().get("Content-Type"); if (contentType != null && contentType.contains("octet-stream")) { ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); for (String piece : rawBody.substring(1, rawBody.length() - 1).split(", ")) { outputStream.write(Byte.parseByte(piece)); } bytes = outputStream.toByteArray(); } else { bytes = rawBody.getBytes(StandardCharsets.UTF_8); } if (bytes.length > 0) { headers.put("Content-Length", String.valueOf(bytes.length)); } } HttpResponse response = new MockHttpResponse(request, recordStatusCode, headers, bytes); return Mono.just(response); }
if (contentType != null && contentType.contains("octet-stream")) {
private Mono<HttpResponse> playbackHttpResponse(final HttpRequest request) { final String incomingUrl = applyReplacementRule(request.url().toString()); final String incomingMethod = request.httpMethod().toString(); final String matchingUrl = removeHost(incomingUrl); NetworkCallRecord networkCallRecord = recordedData.findFirstAndRemoveNetworkCall(record -> record.method().equalsIgnoreCase(incomingMethod) && removeHost(record.uri()).equalsIgnoreCase(matchingUrl)); count.incrementAndGet(); if (networkCallRecord == null) { logger.warning("NOT FOUND - Method: {} URL: {}", incomingMethod, incomingUrl); logger.warning("Records requested: {}.", count); return Mono.error(new IllegalStateException("==> Unexpected request: " + incomingMethod + " " + incomingUrl)); } if (networkCallRecord.exception() != null) { throw logger.logExceptionAsWarning(Exceptions.propagate(networkCallRecord.exception().get())); } int recordStatusCode = Integer.parseInt(networkCallRecord.response().get("StatusCode")); HttpHeaders headers = new HttpHeaders(); for (Map.Entry<String, String> pair : networkCallRecord.response().entrySet()) { if (!pair.getKey().equals("StatusCode") && !pair.getKey().equals("Body")) { String rawHeader = pair.getValue(); for (Map.Entry<String, String> rule : textReplacementRules.entrySet()) { if (rule.getValue() != null) { rawHeader = rawHeader.replaceAll(rule.getKey(), rule.getValue()); } } headers.put(pair.getKey(), rawHeader); } } String rawBody = networkCallRecord.response().get("Body"); byte[] bytes = null; if (rawBody != null) { for (Map.Entry<String, String> rule : textReplacementRules.entrySet()) { if (rule.getValue() != null) { rawBody = rawBody.replaceAll(rule.getKey(), rule.getValue()); } } String contentType = networkCallRecord.response().get("Content-Type"); if (contentType != null && contentType.equalsIgnoreCase("application/octet-stream")) { ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); for (String piece : rawBody.substring(1, rawBody.length() - 1).split(", ")) { outputStream.write(Byte.parseByte(piece)); } bytes = outputStream.toByteArray(); } else { bytes = rawBody.getBytes(StandardCharsets.UTF_8); } if (bytes.length > 0) { headers.put("Content-Length", String.valueOf(bytes.length)); } } HttpResponse response = new MockHttpResponse(request, recordStatusCode, headers, bytes); return Mono.just(response); }
class PlaybackClient implements HttpClient { private final ClientLogger logger = new ClientLogger(PlaybackClient.class); private final AtomicInteger count = new AtomicInteger(0); private final Map<String, String> textReplacementRules; private final RecordedData recordedData; /** * Creates a PlaybackClient that replays network calls from {@code recordedData} and replaces {@link * NetworkCallRecord * * @param recordedData The data to playback. * @param textReplacementRules A set of rules to replace text in network call responses. */ public PlaybackClient(RecordedData recordedData, Map<String, String> textReplacementRules) { Objects.requireNonNull(recordedData); this.recordedData = recordedData; this.textReplacementRules = textReplacementRules == null ? new HashMap<>() : textReplacementRules; } /** * {@inheritDoc} */ @Override public Mono<HttpResponse> send(final HttpRequest request) { return Mono.defer(() -> playbackHttpResponse(request)); } /** * {@inheritDoc} */ @Override public HttpClient proxy(Supplier<ProxyOptions> supplier) { return this; } /** * {@inheritDoc} */ @Override public HttpClient wiretap(boolean b) { return this; } /** * {@inheritDoc} */ @Override public HttpClient port(int i) { return this; } private String applyReplacementRule(String text) { for (Map.Entry<String, String> rule : textReplacementRules.entrySet()) { if (rule.getValue() != null) { text = text.replaceAll(rule.getKey(), rule.getValue()); } } return text; } private static String removeHost(String url) { UrlBuilder urlBuilder = UrlBuilder.parse(url); if (urlBuilder.query().containsKey("sig")) { urlBuilder.setQueryParameter("sig", "REDACTED"); } return String.format("%s%s", urlBuilder.path(), urlBuilder.queryString()); } }
class PlaybackClient implements HttpClient { private final ClientLogger logger = new ClientLogger(PlaybackClient.class); private final AtomicInteger count = new AtomicInteger(0); private final Map<String, String> textReplacementRules; private final RecordedData recordedData; /** * Creates a PlaybackClient that replays network calls from {@code recordedData} and replaces {@link * NetworkCallRecord * * @param recordedData The data to playback. * @param textReplacementRules A set of rules to replace text in network call responses. */ public PlaybackClient(RecordedData recordedData, Map<String, String> textReplacementRules) { Objects.requireNonNull(recordedData); this.recordedData = recordedData; this.textReplacementRules = textReplacementRules == null ? new HashMap<>() : textReplacementRules; } /** * {@inheritDoc} */ @Override public Mono<HttpResponse> send(final HttpRequest request) { return Mono.defer(() -> playbackHttpResponse(request)); } /** * {@inheritDoc} */ @Override public HttpClient proxy(Supplier<ProxyOptions> supplier) { return this; } /** * {@inheritDoc} */ @Override public HttpClient wiretap(boolean b) { return this; } /** * {@inheritDoc} */ @Override public HttpClient port(int i) { return this; } private String applyReplacementRule(String text) { for (Map.Entry<String, String> rule : textReplacementRules.entrySet()) { if (rule.getValue() != null) { text = text.replaceAll(rule.getKey(), rule.getValue()); } } return text; } private static String removeHost(String url) { UrlBuilder urlBuilder = UrlBuilder.parse(url); if (urlBuilder.query().containsKey("sig")) { urlBuilder.setQueryParameter("sig", "REDACTED"); } return String.format("%s%s", urlBuilder.path(), urlBuilder.queryString()); } }
Updated to the case ignores check.
private Mono<HttpResponse> playbackHttpResponse(final HttpRequest request) { final String incomingUrl = applyReplacementRule(request.url().toString()); final String incomingMethod = request.httpMethod().toString(); final String matchingUrl = removeHost(incomingUrl); NetworkCallRecord networkCallRecord = recordedData.findFirstAndRemoveNetworkCall(record -> record.method().equalsIgnoreCase(incomingMethod) && removeHost(record.uri()).equalsIgnoreCase(matchingUrl)); count.incrementAndGet(); if (networkCallRecord == null) { logger.warning("NOT FOUND - Method: {} URL: {}", incomingMethod, incomingUrl); logger.warning("Records requested: {}.", count); return Mono.error(new IllegalStateException("==> Unexpected request: " + incomingMethod + " " + incomingUrl)); } if (networkCallRecord.exception() != null) { throw logger.logExceptionAsWarning(Exceptions.propagate(networkCallRecord.exception().get())); } int recordStatusCode = Integer.parseInt(networkCallRecord.response().get("StatusCode")); HttpHeaders headers = new HttpHeaders(); for (Map.Entry<String, String> pair : networkCallRecord.response().entrySet()) { if (!pair.getKey().equals("StatusCode") && !pair.getKey().equals("Body")) { String rawHeader = pair.getValue(); for (Map.Entry<String, String> rule : textReplacementRules.entrySet()) { if (rule.getValue() != null) { rawHeader = rawHeader.replaceAll(rule.getKey(), rule.getValue()); } } headers.put(pair.getKey(), rawHeader); } } String rawBody = networkCallRecord.response().get("Body"); byte[] bytes = null; if (rawBody != null) { for (Map.Entry<String, String> rule : textReplacementRules.entrySet()) { if (rule.getValue() != null) { rawBody = rawBody.replaceAll(rule.getKey(), rule.getValue()); } } String contentType = networkCallRecord.response().get("Content-Type"); if (contentType != null && contentType.contains("octet-stream")) { ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); for (String piece : rawBody.substring(1, rawBody.length() - 1).split(", ")) { outputStream.write(Byte.parseByte(piece)); } bytes = outputStream.toByteArray(); } else { bytes = rawBody.getBytes(StandardCharsets.UTF_8); } if (bytes.length > 0) { headers.put("Content-Length", String.valueOf(bytes.length)); } } HttpResponse response = new MockHttpResponse(request, recordStatusCode, headers, bytes); return Mono.just(response); }
if (contentType != null && contentType.contains("octet-stream")) {
private Mono<HttpResponse> playbackHttpResponse(final HttpRequest request) { final String incomingUrl = applyReplacementRule(request.url().toString()); final String incomingMethod = request.httpMethod().toString(); final String matchingUrl = removeHost(incomingUrl); NetworkCallRecord networkCallRecord = recordedData.findFirstAndRemoveNetworkCall(record -> record.method().equalsIgnoreCase(incomingMethod) && removeHost(record.uri()).equalsIgnoreCase(matchingUrl)); count.incrementAndGet(); if (networkCallRecord == null) { logger.warning("NOT FOUND - Method: {} URL: {}", incomingMethod, incomingUrl); logger.warning("Records requested: {}.", count); return Mono.error(new IllegalStateException("==> Unexpected request: " + incomingMethod + " " + incomingUrl)); } if (networkCallRecord.exception() != null) { throw logger.logExceptionAsWarning(Exceptions.propagate(networkCallRecord.exception().get())); } int recordStatusCode = Integer.parseInt(networkCallRecord.response().get("StatusCode")); HttpHeaders headers = new HttpHeaders(); for (Map.Entry<String, String> pair : networkCallRecord.response().entrySet()) { if (!pair.getKey().equals("StatusCode") && !pair.getKey().equals("Body")) { String rawHeader = pair.getValue(); for (Map.Entry<String, String> rule : textReplacementRules.entrySet()) { if (rule.getValue() != null) { rawHeader = rawHeader.replaceAll(rule.getKey(), rule.getValue()); } } headers.put(pair.getKey(), rawHeader); } } String rawBody = networkCallRecord.response().get("Body"); byte[] bytes = null; if (rawBody != null) { for (Map.Entry<String, String> rule : textReplacementRules.entrySet()) { if (rule.getValue() != null) { rawBody = rawBody.replaceAll(rule.getKey(), rule.getValue()); } } String contentType = networkCallRecord.response().get("Content-Type"); if (contentType != null && contentType.equalsIgnoreCase("application/octet-stream")) { ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); for (String piece : rawBody.substring(1, rawBody.length() - 1).split(", ")) { outputStream.write(Byte.parseByte(piece)); } bytes = outputStream.toByteArray(); } else { bytes = rawBody.getBytes(StandardCharsets.UTF_8); } if (bytes.length > 0) { headers.put("Content-Length", String.valueOf(bytes.length)); } } HttpResponse response = new MockHttpResponse(request, recordStatusCode, headers, bytes); return Mono.just(response); }
class PlaybackClient implements HttpClient { private final ClientLogger logger = new ClientLogger(PlaybackClient.class); private final AtomicInteger count = new AtomicInteger(0); private final Map<String, String> textReplacementRules; private final RecordedData recordedData; /** * Creates a PlaybackClient that replays network calls from {@code recordedData} and replaces {@link * NetworkCallRecord * * @param recordedData The data to playback. * @param textReplacementRules A set of rules to replace text in network call responses. */ public PlaybackClient(RecordedData recordedData, Map<String, String> textReplacementRules) { Objects.requireNonNull(recordedData); this.recordedData = recordedData; this.textReplacementRules = textReplacementRules == null ? new HashMap<>() : textReplacementRules; } /** * {@inheritDoc} */ @Override public Mono<HttpResponse> send(final HttpRequest request) { return Mono.defer(() -> playbackHttpResponse(request)); } /** * {@inheritDoc} */ @Override public HttpClient proxy(Supplier<ProxyOptions> supplier) { return this; } /** * {@inheritDoc} */ @Override public HttpClient wiretap(boolean b) { return this; } /** * {@inheritDoc} */ @Override public HttpClient port(int i) { return this; } private String applyReplacementRule(String text) { for (Map.Entry<String, String> rule : textReplacementRules.entrySet()) { if (rule.getValue() != null) { text = text.replaceAll(rule.getKey(), rule.getValue()); } } return text; } private static String removeHost(String url) { UrlBuilder urlBuilder = UrlBuilder.parse(url); if (urlBuilder.query().containsKey("sig")) { urlBuilder.setQueryParameter("sig", "REDACTED"); } return String.format("%s%s", urlBuilder.path(), urlBuilder.queryString()); } }
class PlaybackClient implements HttpClient { private final ClientLogger logger = new ClientLogger(PlaybackClient.class); private final AtomicInteger count = new AtomicInteger(0); private final Map<String, String> textReplacementRules; private final RecordedData recordedData; /** * Creates a PlaybackClient that replays network calls from {@code recordedData} and replaces {@link * NetworkCallRecord * * @param recordedData The data to playback. * @param textReplacementRules A set of rules to replace text in network call responses. */ public PlaybackClient(RecordedData recordedData, Map<String, String> textReplacementRules) { Objects.requireNonNull(recordedData); this.recordedData = recordedData; this.textReplacementRules = textReplacementRules == null ? new HashMap<>() : textReplacementRules; } /** * {@inheritDoc} */ @Override public Mono<HttpResponse> send(final HttpRequest request) { return Mono.defer(() -> playbackHttpResponse(request)); } /** * {@inheritDoc} */ @Override public HttpClient proxy(Supplier<ProxyOptions> supplier) { return this; } /** * {@inheritDoc} */ @Override public HttpClient wiretap(boolean b) { return this; } /** * {@inheritDoc} */ @Override public HttpClient port(int i) { return this; } private String applyReplacementRule(String text) { for (Map.Entry<String, String> rule : textReplacementRules.entrySet()) { if (rule.getValue() != null) { text = text.replaceAll(rule.getKey(), rule.getValue()); } } return text; } private static String removeHost(String url) { UrlBuilder urlBuilder = UrlBuilder.parse(url); if (urlBuilder.query().containsKey("sig")) { urlBuilder.setQueryParameter("sig", "REDACTED"); } return String.format("%s%s", urlBuilder.path(), urlBuilder.queryString()); } }
... or do we just reset the state to assume that things are not balanced each time we stop? ... or do we assume things are balanced and then slowly realize they're not and re balance them? I don't have a strong opinion, I think either case works - its just a matter of how aggressive we pursue work stealing. However, I'd suggest that we avoid throwing away an instance and triggering an allocation for a new one if we just need a state reset.
private Mono<Void> loadBalance(final Tuple2<Map<String, PartitionOwnership>, List<String>> tuple) { Map<String, PartitionOwnership> partitionOwnershipMap = tuple.getT1(); List<String> partitionIds = tuple.getT2(); if (ImplUtils.isNullOrEmpty(partitionIds)) { return Mono.error(new IllegalStateException("There are no partitions in Event Hub " + this.eventHubName)); } if (!isValid(partitionOwnershipMap)) { return Mono.error(new IllegalStateException("Invalid partitionOwnership data from PartitionManager")); } /* * Remove all partitions' ownership that have not be modified for a configuration period of time. This means * that the previous EventProcessor that owned the partition is probably down and the partition is now eligible * to be claimed by other EventProcessors. */ Map<String, PartitionOwnership> activePartitionOwnershipMap = removeInactivePartitionOwnerships( partitionOwnershipMap); int numberOfPartitions = partitionIds.size(); if (ImplUtils.isNullOrEmpty(activePartitionOwnershipMap)) { /* * If the active partition ownership map is empty, this is the first time an event processor is * running or all Event Processors are down for this Event Hub, consumer group combination. All * partitions in this Event Hub are available to claim. Choose a random partition to claim ownership. */ claimOwnership(partitionOwnershipMap, partitionIds.get(RANDOM.nextInt(numberOfPartitions))); return Mono.empty(); } /* * Create a map of owner id and a list of partitions it owns */ Map<String, List<PartitionOwnership>> ownerPartitionMap = activePartitionOwnershipMap.values() .stream() .collect( Collectors.groupingBy(PartitionOwnership::ownerId, mapping(Function.identity(), toList()))); ownerPartitionMap.putIfAbsent(this.ownerId, new ArrayList<>()); /* * Find the minimum number of partitions every event processor should own when the load is * evenly distributed. */ int numberOfActiveEventProcessors = ownerPartitionMap.size(); int minPartitionsPerEventProcessor = numberOfPartitions / numberOfActiveEventProcessors; /* * If the number of partitions in Event Hub is not evenly divisible by number of active event processors, * a few Event Processors may own 1 additional partition than the minimum when the load is balanced. Calculate * the number of event processors that can own additional partition. */ int numberOfEventProcessorsWithAdditionalPartition = numberOfPartitions % numberOfActiveEventProcessors; if (isLoadBalanced(minPartitionsPerEventProcessor, numberOfEventProcessorsWithAdditionalPartition, ownerPartitionMap)) { return Mono.empty(); } if (!shouldOwnMorePartitions(minPartitionsPerEventProcessor, ownerPartitionMap)) { return Mono.empty(); } /* * If some partitions are unclaimed, this could be because an event processor is down and * it's partitions are now available for others to own or because event processors are just * starting up and gradually claiming partitions to own or new partitions were added to Event Hub. * Find any partition that is not actively owned and claim it. * * OR * * Find a partition to steal from another event processor. Pick the event processor that has owns the highest * number of partitions. */ String partitionToClaim = partitionIds.parallelStream() .filter(partitionId -> !activePartitionOwnershipMap.containsKey(partitionId)) .findAny() .orElseGet(() -> findPartitionToSteal(ownerPartitionMap)); claimOwnership(partitionOwnershipMap, partitionToClaim); return Mono.empty(); }
return Mono.empty();
private Mono<Void> loadBalance(final Tuple2<Map<String, PartitionOwnership>, List<String>> tuple) { return Mono.fromRunnable(() -> { logger.info("Starting load balancer"); Map<String, PartitionOwnership> partitionOwnershipMap = tuple.getT1(); List<String> partitionIds = tuple.getT2(); if (ImplUtils.isNullOrEmpty(partitionIds)) { throw Exceptions .propagate(new IllegalStateException("There are no partitions in Event Hub " + this.eventHubName)); } int numberOfPartitions = partitionIds.size(); logger.info("Partition manager returned {} ownership records", partitionOwnershipMap.size()); logger.info("EventHubAsyncClient returned {} partitions", numberOfPartitions); if (!isValid(partitionOwnershipMap)) { throw Exceptions .propagate(new IllegalStateException("Invalid partitionOwnership data from PartitionManager")); } /* * Remove all partitions' ownership that have not been modified for a configuration period of time. This * means * that the previous EventProcessor that owned the partition is probably down and the partition is now eligible * to be claimed by other EventProcessors. */ Map<String, PartitionOwnership> activePartitionOwnershipMap = removeInactivePartitionOwnerships( partitionOwnershipMap); logger.info("Number of active ownership records {}", activePartitionOwnershipMap.size()); if (ImplUtils.isNullOrEmpty(activePartitionOwnershipMap)) { /* * If the active partition ownership map is empty, this is the first time an event processor is * running or all Event Processors are down for this Event Hub, consumer group combination. All * partitions in this Event Hub are available to claim. Choose a random partition to claim ownership. */ claimOwnership(partitionOwnershipMap, partitionIds.get(RANDOM.nextInt(numberOfPartitions))); return; } /* * Create a map of owner id and a list of partitions it owns */ Map<String, List<PartitionOwnership>> ownerPartitionMap = activePartitionOwnershipMap.values() .stream() .collect( Collectors.groupingBy(PartitionOwnership::ownerId, mapping(Function.identity(), toList()))); ownerPartitionMap.putIfAbsent(this.ownerId, new ArrayList<>()); /* * Find the minimum number of partitions every event processor should own when the load is * evenly distributed. */ int numberOfActiveEventProcessors = ownerPartitionMap.size(); logger.info("Number of active event processors {}", ownerPartitionMap.size()); int minPartitionsPerEventProcessor = numberOfPartitions / numberOfActiveEventProcessors; /* * If the number of partitions in Event Hub is not evenly divisible by number of active event processors, * a few Event Processors may own 1 additional partition than the minimum when the load is balanced. Calculate * the number of event processors that can own additional partition. */ int numberOfEventProcessorsWithAdditionalPartition = numberOfPartitions % numberOfActiveEventProcessors; logger.info("Expected min partitions per event processor = {}, expected number of event " + "processors with additional partition = {}", minPartitionsPerEventProcessor, numberOfEventProcessorsWithAdditionalPartition); if (isLoadBalanced(minPartitionsPerEventProcessor, numberOfEventProcessorsWithAdditionalPartition, ownerPartitionMap)) { logger.info("Load is balanced"); return; } if (!shouldOwnMorePartitions(minPartitionsPerEventProcessor, ownerPartitionMap)) { logger.info("This event processor owns {} partitions and shouldn't own more", ownerPartitionMap.get(ownerId).size()); return; } logger.info( "Load is unbalanced and this event processor should own more partitions"); /* * If some partitions are unclaimed, this could be because an event processor is down and * it's partitions are now available for others to own or because event processors are just * starting up and gradually claiming partitions to own or new partitions were added to Event Hub. * Find any partition that is not actively owned and claim it. * * OR * * Find a partition to steal from another event processor. Pick the event processor that has owns the highest * number of partitions. */ String partitionToClaim = partitionIds.parallelStream() .filter(partitionId -> !activePartitionOwnershipMap.containsKey(partitionId)) .findAny() .orElseGet(() -> { logger.info("No unclaimed partitions, stealing from another event processor"); return findPartitionToSteal(ownerPartitionMap); }); claimOwnership(partitionOwnershipMap, partitionToClaim); }); }
class PartitionBasedLoadBalancer { private static final Random RANDOM = new Random(); private final ClientLogger logger = new ClientLogger(PartitionBasedLoadBalancer.class); private final String eventHubName; private final String consumerGroupName; private final PartitionManager partitionManager; private final EventHubAsyncClient eventHubAsyncClient; private final String ownerId; private final long inactiveTimeLimitInSeconds; private final PartitionPumpManager partitionPumpManager; /** * Creates an instance of PartitionBasedLoadBalancer for the given Event Hub name and consumer group. * * @param partitionManager The partition manager that this load balancer will use to read/update ownership details. * @param eventHubAsyncClient The asynchronous Event Hub client used to consume events. * @param eventHubName The Event Hub name the {@link EventProcessor} is associated with. * @param consumerGroupName The consumer group name the {@link EventProcessor} is associated with. * @param ownerId The owner identifier for the {@link EventProcessor} this load balancer is associated with. * @param inactiveTimeLimitInSeconds The time in seconds to wait for an update on an ownership record before * assuming the owner of the partition is inactive. * @param partitionPumpManager The partition pump manager that keeps track of all EventHubConsumers and partitions * that this {@link EventProcessor} is processing. */ public PartitionBasedLoadBalancer(final PartitionManager partitionManager, final EventHubAsyncClient eventHubAsyncClient, final String eventHubName, final String consumerGroupName, final String ownerId, final long inactiveTimeLimitInSeconds, final PartitionPumpManager partitionPumpManager) { this.partitionManager = partitionManager; this.eventHubAsyncClient = eventHubAsyncClient; this.eventHubName = eventHubName; this.consumerGroupName = consumerGroupName; this.ownerId = ownerId; this.inactiveTimeLimitInSeconds = inactiveTimeLimitInSeconds; this.partitionPumpManager = partitionPumpManager; } /** * This is the main method responsible for load balancing. This method is expected to be invoked by the {@link * EventProcessor} periodically. Every call to this method will result in this {@link EventProcessor} owning <b>at * most one</b> new partition. * <p> * The load is considered balanced when no active EventProcessor owns 2 partitions more than any other active * EventProcessor.Given that each invocation to this method results in ownership claim of at most one partition, * this algorithm converges gradually towards a steady state. * </p> * When a new partition is claimed, this method is also responsible for starting a partition pump that creates an * {@link EventHubAsyncConsumer} for processing events from that partition. */ public void loadBalance() { /* * Retrieve current partition ownership details from the datastore. */ final Mono<Map<String, PartitionOwnership>> partitionOwnershipMono = partitionManager .listOwnership(eventHubName, consumerGroupName) .timeout(Duration.ofSeconds(1)) .collectMap(PartitionOwnership::partitionId, Function.identity()); /* * Retrieve the list of partition ids from the Event Hub. */ final Mono<List<String>> partitionsMono = eventHubAsyncClient .getPartitionIds() .timeout(Duration.ofSeconds(1)) .collectList(); Mono.zip(partitionOwnershipMono, partitionsMono) .map(this::loadBalance) .doOnError(ex -> logger.warning("Load balancing for event processor failed - {}", ex.getMessage())) .subscribe(); } /* * This method works with the given partition ownership details and Event Hub partitions to evaluate whether the * current Event Processor should take on the responsibility of processing more partitions. */ /* * Check if partition ownership data is valid before proceeding with load balancing. */ private boolean isValid(final Map<String, PartitionOwnership> partitionOwnershipMap) { return partitionOwnershipMap.values() .stream() .noneMatch(partitionOwnership -> { return partitionOwnership.ownerId() == null || partitionOwnership.eventHubName() == null || !partitionOwnership.eventHubName().equals(this.eventHubName) || partitionOwnership.consumerGroupName() == null || !partitionOwnership.consumerGroupName().equals(this.consumerGroupName) || partitionOwnership.partitionId() == null || partitionOwnership.lastModifiedTime() == null || partitionOwnership.eTag() == null; }); } /* * Find the event processor that owns the maximum number of partitions and steal a random partition * from it. */ private String findPartitionToSteal(final Map<String, List<PartitionOwnership>> ownerPartitionMap) { List<PartitionOwnership> maxList = ownerPartitionMap.values() .stream() .max(Comparator.comparingInt(List::size)) .get(); return maxList.get(RANDOM.nextInt(maxList.size())).partitionId(); } /* * When the load is balanced, all active event processors own at least {@code minPartitionsPerEventProcessor} * and only {@code numberOfEventProcessorsWithAdditionalPartition} event processors will own 1 additional * partition. */ private boolean isLoadBalanced(final int minPartitionsPerEventProcessor, final int numberOfEventProcessorsWithAdditionalPartition, final Map<String, List<PartitionOwnership>> ownerPartitionMap) { int count = 0; for (List<PartitionOwnership> partitionOwnership : ownerPartitionMap.values()) { int numberOfPartitions = partitionOwnership.size(); if (numberOfPartitions < minPartitionsPerEventProcessor || numberOfPartitions > minPartitionsPerEventProcessor + 1) { return false; } if (numberOfPartitions == minPartitionsPerEventProcessor + 1) { count++; } } return count == numberOfEventProcessorsWithAdditionalPartition; } /* * This method is called after determining that the load is not balanced. This method will evaluate * if the current event processor should own more partitions. Specifically, this method returns true if the * current event processor owns less than the minimum number of partitions or if it owns the minimum number * and no other event processor owns lesser number of partitions than this event processor. */ private boolean shouldOwnMorePartitions(final int minPartitionsPerEventProcessor, final Map<String, List<PartitionOwnership>> ownerPartitionMap) { int numberOfPartitionsOwned = ownerPartitionMap.get(this.ownerId).size(); int leastPartitionsOwnedByAnyEventProcessor = ownerPartitionMap.values().stream().min(Comparator.comparingInt(List::size)).get().size(); return numberOfPartitionsOwned < minPartitionsPerEventProcessor || numberOfPartitionsOwned == leastPartitionsOwnedByAnyEventProcessor; } /* * This method will create a new map of partition id and PartitionOwnership containing only those partitions * that are actively owned. All entries in the original map returned by PartitionManager that haven't been * modified for a duration of time greater than the allowed inactivity time limit are assumed to be owned by * dead event processors. These will not be included in the map returned by this method. */ private Map<String, PartitionOwnership> removeInactivePartitionOwnerships( final Map<String, PartitionOwnership> partitionOwnershipMap) { return partitionOwnershipMap .entrySet() .stream() .filter(entry -> { return System.currentTimeMillis() - entry.getValue().lastModifiedTime() < TimeUnit.SECONDS .toMillis(inactiveTimeLimitInSeconds); }).collect(Collectors.toMap(Entry::getKey, Entry::getValue)); } private void claimOwnership(final Map<String, PartitionOwnership> partitionOwnershipMap, final String partitionIdToClaim) { PartitionOwnership ownershipRequest = createPartitionOwnershipRequest(partitionOwnershipMap, partitionIdToClaim); partitionManager .claimOwnership(ownershipRequest) .timeout(Duration.ofSeconds(1)) .doOnError(ex -> logger .warning("Failed to claim ownership of partition {} - {}", ownershipRequest.partitionId(), ex.getMessage(), ex)) .subscribe(partitionPumpManager::startPartitionPump); } private PartitionOwnership createPartitionOwnershipRequest( final Map<String, PartitionOwnership> partitionOwnershipMap, final String partitionIdToClaim) { PartitionOwnership previousPartitionOwnership = partitionOwnershipMap.get(partitionIdToClaim); PartitionOwnership partitionOwnershipRequest = new PartitionOwnership() .ownerId(this.ownerId) .partitionId(partitionIdToClaim) .consumerGroupName(this.consumerGroupName) .eventHubName(this.eventHubName) .sequenceNumber(previousPartitionOwnership == null ? null : previousPartitionOwnership.sequenceNumber()) .offset(previousPartitionOwnership == null ? null : previousPartitionOwnership.offset()) .eTag(previousPartitionOwnership == null ? null : previousPartitionOwnership.eTag()) .ownerLevel(0L); return partitionOwnershipRequest; } }
class PartitionBasedLoadBalancer { private static final Random RANDOM = new Random(); private final ClientLogger logger = new ClientLogger(PartitionBasedLoadBalancer.class); private final String eventHubName; private final String consumerGroupName; private final PartitionManager partitionManager; private final EventHubAsyncClient eventHubAsyncClient; private final String ownerId; private final long inactiveTimeLimitInSeconds; private final PartitionPumpManager partitionPumpManager; /** * Creates an instance of PartitionBasedLoadBalancer for the given Event Hub name and consumer group. * * @param partitionManager The partition manager that this load balancer will use to read/update ownership details. * @param eventHubAsyncClient The asynchronous Event Hub client used to consume events. * @param eventHubName The Event Hub name the {@link EventProcessor} is associated with. * @param consumerGroupName The consumer group name the {@link EventProcessor} is associated with. * @param ownerId The identifier of the {@link EventProcessor} that owns this load balancer. * @param inactiveTimeLimitInSeconds The time in seconds to wait for an update on an ownership record before * assuming the owner of the partition is inactive. * @param partitionPumpManager The partition pump manager that keeps track of all EventHubConsumers and partitions * that this {@link EventProcessor} is processing. */ public PartitionBasedLoadBalancer(final PartitionManager partitionManager, final EventHubAsyncClient eventHubAsyncClient, final String eventHubName, final String consumerGroupName, final String ownerId, final long inactiveTimeLimitInSeconds, final PartitionPumpManager partitionPumpManager) { this.partitionManager = partitionManager; this.eventHubAsyncClient = eventHubAsyncClient; this.eventHubName = eventHubName; this.consumerGroupName = consumerGroupName; this.ownerId = ownerId; this.inactiveTimeLimitInSeconds = inactiveTimeLimitInSeconds; this.partitionPumpManager = partitionPumpManager; } /** * This is the main method responsible for load balancing. This method is expected to be invoked by the {@link * EventProcessor} periodically. Every call to this method will result in this {@link EventProcessor} owning <b>at * most one</b> new partition. * <p> * The load is considered balanced when no active EventProcessor owns 2 partitions more than any other active * EventProcessor. Given that each invocation to this method results in ownership claim of at most one partition, * this algorithm converges gradually towards a steady state. * </p> * When a new partition is claimed, this method is also responsible for starting a partition pump that creates an * {@link EventHubAsyncConsumer} for processing events from that partition. */ public void loadBalance() { /* * Retrieve current partition ownership details from the datastore. */ final Mono<Map<String, PartitionOwnership>> partitionOwnershipMono = partitionManager .listOwnership(eventHubName, consumerGroupName) .timeout(Duration.ofSeconds(1)) .collectMap(PartitionOwnership::partitionId, Function.identity()); /* * Retrieve the list of partition ids from the Event Hub. */ final Mono<List<String>> partitionsMono = eventHubAsyncClient .getPartitionIds() .timeout(Duration.ofSeconds(1)) .collectList(); Mono.zip(partitionOwnershipMono, partitionsMono) .flatMap(this::loadBalance) .doOnError(ex -> logger.warning("Load balancing for event processor failed - {}", ex.getMessage())) .subscribe(); } /* * This method works with the given partition ownership details and Event Hub partitions to evaluate whether the * current Event Processor should take on the responsibility of processing more partitions. */ /* * Check if partition ownership data is valid before proceeding with load balancing. */ private boolean isValid(final Map<String, PartitionOwnership> partitionOwnershipMap) { return partitionOwnershipMap.values() .stream() .noneMatch(partitionOwnership -> { return partitionOwnership.eventHubName() == null || !partitionOwnership.eventHubName().equals(this.eventHubName) || partitionOwnership.consumerGroupName() == null || !partitionOwnership.consumerGroupName().equals(this.consumerGroupName) || partitionOwnership.partitionId() == null || partitionOwnership.lastModifiedTime() == null || partitionOwnership.eTag() == null; }); } /* * Find the event processor that owns the maximum number of partitions and steal a random partition * from it. */ private String findPartitionToSteal(final Map<String, List<PartitionOwnership>> ownerPartitionMap) { Map.Entry<String, List<PartitionOwnership>> ownerWithMaxPartitions = ownerPartitionMap.entrySet() .stream() .max(Comparator.comparingInt(entry -> entry.getValue().size())) .get(); int numberOfPartitions = ownerWithMaxPartitions.getValue().size(); logger.info("Owner id {} owns {} partitions, stealing a partition from it", ownerWithMaxPartitions.getKey(), numberOfPartitions); return ownerWithMaxPartitions.getValue().get(RANDOM.nextInt(numberOfPartitions)).partitionId(); } /* * When the load is balanced, all active event processors own at least {@code minPartitionsPerEventProcessor} * and only {@code numberOfEventProcessorsWithAdditionalPartition} event processors will own 1 additional * partition. */ private boolean isLoadBalanced(final int minPartitionsPerEventProcessor, final int numberOfEventProcessorsWithAdditionalPartition, final Map<String, List<PartitionOwnership>> ownerPartitionMap) { int count = 0; for (List<PartitionOwnership> partitionOwnership : ownerPartitionMap.values()) { int numberOfPartitions = partitionOwnership.size(); if (numberOfPartitions < minPartitionsPerEventProcessor || numberOfPartitions > minPartitionsPerEventProcessor + 1) { return false; } if (numberOfPartitions == minPartitionsPerEventProcessor + 1) { count++; } } return count == numberOfEventProcessorsWithAdditionalPartition; } /* * This method is called after determining that the load is not balanced. This method will evaluate * if the current event processor should own more partitions. Specifically, this method returns true if the * current event processor owns less than the minimum number of partitions or if it owns the minimum number * and no other event processor owns lesser number of partitions than this event processor. */ private boolean shouldOwnMorePartitions(final int minPartitionsPerEventProcessor, final Map<String, List<PartitionOwnership>> ownerPartitionMap) { int numberOfPartitionsOwned = ownerPartitionMap.get(this.ownerId).size(); int leastPartitionsOwnedByAnyEventProcessor = ownerPartitionMap.values().stream().min(Comparator.comparingInt(List::size)).get().size(); return numberOfPartitionsOwned < minPartitionsPerEventProcessor || numberOfPartitionsOwned == leastPartitionsOwnedByAnyEventProcessor; } /* * This method will create a new map of partition id and PartitionOwnership containing only those partitions * that are actively owned. All entries in the original map returned by PartitionManager that haven't been * modified for a duration of time greater than the allowed inactivity time limit are assumed to be owned by * dead event processors. These will not be included in the map returned by this method. */ private Map<String, PartitionOwnership> removeInactivePartitionOwnerships( final Map<String, PartitionOwnership> partitionOwnershipMap) { return partitionOwnershipMap .entrySet() .stream() .filter(entry -> { return (System.currentTimeMillis() - entry.getValue().lastModifiedTime() < TimeUnit.SECONDS .toMillis(inactiveTimeLimitInSeconds)) && !ImplUtils.isNullOrEmpty(entry.getValue().ownerId()); }).collect(Collectors.toMap(Entry::getKey, Entry::getValue)); } private void claimOwnership(final Map<String, PartitionOwnership> partitionOwnershipMap, final String partitionIdToClaim) { logger.info("Attempting to claim ownership of partition {}", partitionIdToClaim); PartitionOwnership ownershipRequest = createPartitionOwnershipRequest(partitionOwnershipMap, partitionIdToClaim); partitionManager .claimOwnership(ownershipRequest) .timeout(Duration.ofSeconds(1)) .doOnNext(partitionOwnership -> logger.info("Successfully claimed ownership of partition {}", partitionOwnership.partitionId())) .doOnError(ex -> logger .warning("Failed to claim ownership of partition {} - {}", ownershipRequest.partitionId(), ex.getMessage(), ex)) .subscribe(partitionPumpManager::startPartitionPump); } private PartitionOwnership createPartitionOwnershipRequest( final Map<String, PartitionOwnership> partitionOwnershipMap, final String partitionIdToClaim) { PartitionOwnership previousPartitionOwnership = partitionOwnershipMap.get(partitionIdToClaim); PartitionOwnership partitionOwnershipRequest = new PartitionOwnership() .ownerId(this.ownerId) .partitionId(partitionIdToClaim) .consumerGroupName(this.consumerGroupName) .eventHubName(this.eventHubName) .sequenceNumber(previousPartitionOwnership == null ? null : previousPartitionOwnership.sequenceNumber()) .offset(previousPartitionOwnership == null ? null : previousPartitionOwnership.offset()) .eTag(previousPartitionOwnership == null ? null : previousPartitionOwnership.eTag()) .ownerLevel(0L); return partitionOwnershipRequest; } }
"Ideally" or "under normal circumstances"... What I'm asking is whether this is something that would potentially occur and is expected or is an indicator that something is wrong. The comment implies the former, but the implementation seems to imply the latter.
private Mono<Void> loadBalance(final Tuple2<Map<String, PartitionOwnership>, List<String>> tuple) { Map<String, PartitionOwnership> partitionOwnershipMap = tuple.getT1(); List<String> partitionIds = tuple.getT2(); if (ImplUtils.isNullOrEmpty(partitionIds)) { return Mono.error(new IllegalStateException("There are no partitions in Event Hub " + this.eventHubName)); } if (!isValid(partitionOwnershipMap)) { return Mono.error(new IllegalStateException("Invalid partitionOwnership data from PartitionManager")); } /* * Remove all partitions ownerships that have not be modified for a long time. This means that the previous * event processor that owned the partition is probably down and the partition is now eligible to be * claimed by other event processors. */ Map<String, PartitionOwnership> activePartitionOwnershipMap = removeInactivePartitionOwnerships( partitionOwnershipMap); if (ImplUtils.isNullOrEmpty(activePartitionOwnershipMap)) { /* * If the active partition ownership map is empty, this is the first time an event processor is * running or all Event Processors are down for this Event Hub, consumer group combination. All * partitions in this Event Hub are available to claim. Choose a random partition to claim ownership. */ claimOwnership(partitionOwnershipMap, partitionIds.get(RANDOM.nextInt(partitionIds.size()))); return Mono.empty(); } /* * Create a map of owner id and a list of partitions it owns */ Map<String, List<PartitionOwnership>> ownerPartitionMap = activePartitionOwnershipMap.values() .stream() .collect( Collectors.groupingBy(PartitionOwnership::ownerId, mapping(Function.identity(), toList()))); ownerPartitionMap.putIfAbsent(this.ownerId, new ArrayList<>()); /* * Find the minimum number of partitions every event processor should own when the load is * evenly distributed. */ int minPartitionsPerEventProcessor = partitionIds.size() / ownerPartitionMap.size(); /* * If the number of partitions in Event Hub is not evenly divisible by number of active event processors, * a few Event Processors may own 1 additional partition than the minimum when the load is balanced. Calculate * the number of event processors that can own additional partition. */ int numberOfEventProcessorsWithAdditionalPartition = partitionIds.size() % ownerPartitionMap.size(); if (isLoadBalanced(minPartitionsPerEventProcessor, numberOfEventProcessorsWithAdditionalPartition, ownerPartitionMap)) { return Mono.empty(); } if (!shouldOwnMorePartitions(minPartitionsPerEventProcessor, ownerPartitionMap)) { return Mono.empty(); } /* * If some partitions are unclaimed, this could be because an event processor is down and * it's partitions are now available for others to own or because event processors are just * starting up and gradually claiming partitions to own or new partitions were added to Event Hub. * Find any partition that is not actively owned and claim it. * * OR * * Find a partition to steal from another event processor. Pick the event processor that has owns the highest * number of partitions. */ String partitionToClaim = partitionIds.parallelStream() .filter(partitionId -> !activePartitionOwnershipMap.containsKey(partitionId)) .findAny() .orElseGet(() -> findPartitionToSteal(ownerPartitionMap)); claimOwnership(partitionOwnershipMap, partitionToClaim); return Mono.empty(); }
private Mono<Void> loadBalance(final Tuple2<Map<String, PartitionOwnership>, List<String>> tuple) { return Mono.fromRunnable(() -> { logger.info("Starting load balancer"); Map<String, PartitionOwnership> partitionOwnershipMap = tuple.getT1(); List<String> partitionIds = tuple.getT2(); if (ImplUtils.isNullOrEmpty(partitionIds)) { throw Exceptions .propagate(new IllegalStateException("There are no partitions in Event Hub " + this.eventHubName)); } int numberOfPartitions = partitionIds.size(); logger.info("Partition manager returned {} ownership records", partitionOwnershipMap.size()); logger.info("EventHubAsyncClient returned {} partitions", numberOfPartitions); if (!isValid(partitionOwnershipMap)) { throw Exceptions .propagate(new IllegalStateException("Invalid partitionOwnership data from PartitionManager")); } /* * Remove all partitions' ownership that have not been modified for a configuration period of time. This * means * that the previous EventProcessor that owned the partition is probably down and the partition is now eligible * to be claimed by other EventProcessors. */ Map<String, PartitionOwnership> activePartitionOwnershipMap = removeInactivePartitionOwnerships( partitionOwnershipMap); logger.info("Number of active ownership records {}", activePartitionOwnershipMap.size()); if (ImplUtils.isNullOrEmpty(activePartitionOwnershipMap)) { /* * If the active partition ownership map is empty, this is the first time an event processor is * running or all Event Processors are down for this Event Hub, consumer group combination. All * partitions in this Event Hub are available to claim. Choose a random partition to claim ownership. */ claimOwnership(partitionOwnershipMap, partitionIds.get(RANDOM.nextInt(numberOfPartitions))); return; } /* * Create a map of owner id and a list of partitions it owns */ Map<String, List<PartitionOwnership>> ownerPartitionMap = activePartitionOwnershipMap.values() .stream() .collect( Collectors.groupingBy(PartitionOwnership::ownerId, mapping(Function.identity(), toList()))); ownerPartitionMap.putIfAbsent(this.ownerId, new ArrayList<>()); /* * Find the minimum number of partitions every event processor should own when the load is * evenly distributed. */ int numberOfActiveEventProcessors = ownerPartitionMap.size(); logger.info("Number of active event processors {}", ownerPartitionMap.size()); int minPartitionsPerEventProcessor = numberOfPartitions / numberOfActiveEventProcessors; /* * If the number of partitions in Event Hub is not evenly divisible by number of active event processors, * a few Event Processors may own 1 additional partition than the minimum when the load is balanced. Calculate * the number of event processors that can own additional partition. */ int numberOfEventProcessorsWithAdditionalPartition = numberOfPartitions % numberOfActiveEventProcessors; logger.info("Expected min partitions per event processor = {}, expected number of event " + "processors with additional partition = {}", minPartitionsPerEventProcessor, numberOfEventProcessorsWithAdditionalPartition); if (isLoadBalanced(minPartitionsPerEventProcessor, numberOfEventProcessorsWithAdditionalPartition, ownerPartitionMap)) { logger.info("Load is balanced"); return; } if (!shouldOwnMorePartitions(minPartitionsPerEventProcessor, ownerPartitionMap)) { logger.info("This event processor owns {} partitions and shouldn't own more", ownerPartitionMap.get(ownerId).size()); return; } logger.info( "Load is unbalanced and this event processor should own more partitions"); /* * If some partitions are unclaimed, this could be because an event processor is down and * it's partitions are now available for others to own or because event processors are just * starting up and gradually claiming partitions to own or new partitions were added to Event Hub. * Find any partition that is not actively owned and claim it. * * OR * * Find a partition to steal from another event processor. Pick the event processor that has owns the highest * number of partitions. */ String partitionToClaim = partitionIds.parallelStream() .filter(partitionId -> !activePartitionOwnershipMap.containsKey(partitionId)) .findAny() .orElseGet(() -> { logger.info("No unclaimed partitions, stealing from another event processor"); return findPartitionToSteal(ownerPartitionMap); }); claimOwnership(partitionOwnershipMap, partitionToClaim); }); }
class PartitionBasedLoadBalancer { private static final Random RANDOM = new Random(); private final ClientLogger logger = new ClientLogger(PartitionBasedLoadBalancer.class); private final String eventHubName; private final String consumerGroupName; private final PartitionManager partitionManager; private final EventHubAsyncClient eventHubAsyncClient; private final String ownerId; private final long inactiveTimeLimitInSeconds; private final PartitionPumpManager partitionPumpManager; /** * Creates an instance of PartitionBasedLoadBalancer for the given Event Hub name and consumer group. * * @param partitionManager The partition manager that this load balancer will use to read/update ownership details. * @param eventHubAsyncClient The asynchronous Event Hub client used to consume events. * @param eventHubName The Event Hub name the {@link EventProcessor} is associated with. * @param consumerGroupName The consumer group name the {@link EventProcessor} is associated with. * @param ownerId The owner identifier for the {@link EventProcessor} this load balancer is associated with. * @param inactiveTimeLimitInSeconds The time in seconds to wait for an update on an ownership record before * assuming the owner of the partition is inactive. * @param partitionPumpManager The partition pump manager that keeps track of all EventHubConsumers and partitions * that this {@link EventProcessor} is processing. */ public PartitionBasedLoadBalancer(final PartitionManager partitionManager, final EventHubAsyncClient eventHubAsyncClient, final String eventHubName, final String consumerGroupName, final String ownerId, final long inactiveTimeLimitInSeconds, final PartitionPumpManager partitionPumpManager) { this.partitionManager = partitionManager; this.eventHubAsyncClient = eventHubAsyncClient; this.eventHubName = eventHubName; this.consumerGroupName = consumerGroupName; this.ownerId = ownerId; this.inactiveTimeLimitInSeconds = inactiveTimeLimitInSeconds; this.partitionPumpManager = partitionPumpManager; } /** * This is the main method responsible for load balancing. This method is expected to be invoked by the {@link * EventProcessor} periodically. Every call to this method will result in this {@link EventProcessor} owning <b>at * most one</b> new partition. * <p> * The load is considered balanced when no active EventProcessor owns 2 partitions more than any other active * EventProcessor.Given that each invocation to this method results in ownership claim of at most one partition, * this algorithm converges gradually towards a steady state. * </p> * When a new partition is claimed, this method is also responsible for starting a partition pump that creates an * {@link EventHubAsyncConsumer} for processing events from that partition. */ public void loadBalance() { /* * Retrieve current partition ownership details from the datastore. */ final Mono<Map<String, PartitionOwnership>> partitionOwnershipMono = partitionManager .listOwnership(eventHubName, consumerGroupName) .timeout(Duration.ofSeconds(1)) .collectMap(PartitionOwnership::partitionId, Function.identity()); /* * Retrieve the list of partition ids from the Event Hub. */ final Mono<List<String>> partitionsMono = eventHubAsyncClient .getPartitionIds() .timeout(Duration.ofSeconds(1)) .collectList(); Mono.zip(partitionOwnershipMono, partitionsMono) .map(this::loadBalance) .doOnError(ex -> logger.warning("Load balancing for event processor failed - {}", ex.getMessage())) .subscribe(); } /* * This method works with the given partition ownership details and Event Hub partitions to evaluate whether the * current Event Processor should take on the responsibility of processing more partitions. */ /* * Check if partition ownership data is valid before proceeding with load balancing. */ private boolean isValid(final Map<String, PartitionOwnership> partitionOwnershipMap) { return partitionOwnershipMap.values() .stream() .noneMatch(partitionOwnership -> { return partitionOwnership.ownerId() == null || partitionOwnership.eventHubName() == null || !partitionOwnership.eventHubName().equals(this.eventHubName) || partitionOwnership.consumerGroupName() == null || !partitionOwnership.consumerGroupName().equals(this.consumerGroupName) || partitionOwnership.partitionId() == null || partitionOwnership.lastModifiedTime() == null || partitionOwnership.eTag() == null; }); } /* * Find the event processor that owns the maximum number of partitions and steal a random partition * from it. */ private String findPartitionToSteal(final Map<String, List<PartitionOwnership>> ownerPartitionMap) { List<PartitionOwnership> maxList = ownerPartitionMap.values() .stream() .max(Comparator.comparingInt(List::size)) .get(); return maxList.get(RANDOM.nextInt(maxList.size())).partitionId(); } /* * When the load is balanced, all active event processors own at least {@code minPartitionsPerEventProcessor} * and only {@code numberOfEventProcessorsWithAdditionalPartition} event processors will own 1 additional * partition. */ private boolean isLoadBalanced(final int minPartitionsPerEventProcessor, final int numberOfEventProcessorsWithAdditionalPartition, final Map<String, List<PartitionOwnership>> ownerPartitionMap) { if (ownerPartitionMap.values() .stream() .noneMatch(ownershipList -> { return ownershipList.size() < minPartitionsPerEventProcessor || ownershipList.size() > minPartitionsPerEventProcessor + 1; })) { long count = ownerPartitionMap.values() .stream() .filter(ownershipList -> ownershipList.size() == minPartitionsPerEventProcessor + 1) .count(); return count == numberOfEventProcessorsWithAdditionalPartition; } return false; } /* * This method is called after determining that the load is not balanced. This method will evaluate * if the current event processor should own more partitions. Specifically, this method returns true if the * current event processor owns less than the minimum number of partitions or if it owns the minimum number * and no other event processor owns lesser number of partitions than this event processor. */ private boolean shouldOwnMorePartitions(final int minPartitionsPerEventProcessor, final Map<String, List<PartitionOwnership>> ownerPartitionMap) { int numberOfPartitionsOwned = ownerPartitionMap.get(this.ownerId).size(); int leastPartitionsOwnedByAnyEventProcessor = ownerPartitionMap.values().stream().min(Comparator.comparingInt(List::size)).get().size(); if (numberOfPartitionsOwned < minPartitionsPerEventProcessor || numberOfPartitionsOwned == leastPartitionsOwnedByAnyEventProcessor) { return true; } return false; } /* * This method will create a new map of partition id and PartitionOwnership containing only those partitions * that are actively owned. All entries in the original map returned by PartitionManager that haven't been * modified for a duration of time greater than the allowed inactivity time limit are assumed to be owned by * dead event processors. These will not be included in the map returned by this method. */ private Map<String, PartitionOwnership> removeInactivePartitionOwnerships( final Map<String, PartitionOwnership> partitionOwnershipMap) { return partitionOwnershipMap .entrySet() .stream() .filter(entry -> { return System.currentTimeMillis() - entry.getValue().lastModifiedTime() < TimeUnit.SECONDS .toMillis(inactiveTimeLimitInSeconds); }).collect(Collectors.toMap(Entry::getKey, Entry::getValue)); } private void claimOwnership(final Map<String, PartitionOwnership> partitionOwnershipMap, final String partitionIdToClaim) { PartitionOwnership ownershipRequest = createPartitionOwnershipRequest(partitionOwnershipMap, partitionIdToClaim); partitionManager .claimOwnership(ownershipRequest) .timeout(Duration.ofSeconds(1)) .doOnError(ex -> logger .warning("Failed to claim ownership of partition {} - {}", ownershipRequest.partitionId(), ex.getMessage(), ex)) .subscribe(partitionPumpManager::startPartitionPump); } private PartitionOwnership createPartitionOwnershipRequest( final Map<String, PartitionOwnership> partitionOwnershipMap, final String partitionIdToClaim) { PartitionOwnership previousPartitionOwnership = partitionOwnershipMap.get(partitionIdToClaim); PartitionOwnership partitionOwnershipRequest = new PartitionOwnership() .ownerId(this.ownerId) .partitionId(partitionIdToClaim) .consumerGroupName(this.consumerGroupName) .eventHubName(this.eventHubName) .sequenceNumber(previousPartitionOwnership == null ? null : previousPartitionOwnership.sequenceNumber()) .offset(previousPartitionOwnership == null ? null : previousPartitionOwnership.offset()) .eTag(previousPartitionOwnership == null ? null : previousPartitionOwnership.eTag()) .ownerLevel(0L); return partitionOwnershipRequest; } }
class PartitionBasedLoadBalancer { private static final Random RANDOM = new Random(); private final ClientLogger logger = new ClientLogger(PartitionBasedLoadBalancer.class); private final String eventHubName; private final String consumerGroupName; private final PartitionManager partitionManager; private final EventHubAsyncClient eventHubAsyncClient; private final String ownerId; private final long inactiveTimeLimitInSeconds; private final PartitionPumpManager partitionPumpManager; /** * Creates an instance of PartitionBasedLoadBalancer for the given Event Hub name and consumer group. * * @param partitionManager The partition manager that this load balancer will use to read/update ownership details. * @param eventHubAsyncClient The asynchronous Event Hub client used to consume events. * @param eventHubName The Event Hub name the {@link EventProcessor} is associated with. * @param consumerGroupName The consumer group name the {@link EventProcessor} is associated with. * @param ownerId The identifier of the {@link EventProcessor} that owns this load balancer. * @param inactiveTimeLimitInSeconds The time in seconds to wait for an update on an ownership record before * assuming the owner of the partition is inactive. * @param partitionPumpManager The partition pump manager that keeps track of all EventHubConsumers and partitions * that this {@link EventProcessor} is processing. */ public PartitionBasedLoadBalancer(final PartitionManager partitionManager, final EventHubAsyncClient eventHubAsyncClient, final String eventHubName, final String consumerGroupName, final String ownerId, final long inactiveTimeLimitInSeconds, final PartitionPumpManager partitionPumpManager) { this.partitionManager = partitionManager; this.eventHubAsyncClient = eventHubAsyncClient; this.eventHubName = eventHubName; this.consumerGroupName = consumerGroupName; this.ownerId = ownerId; this.inactiveTimeLimitInSeconds = inactiveTimeLimitInSeconds; this.partitionPumpManager = partitionPumpManager; } /** * This is the main method responsible for load balancing. This method is expected to be invoked by the {@link * EventProcessor} periodically. Every call to this method will result in this {@link EventProcessor} owning <b>at * most one</b> new partition. * <p> * The load is considered balanced when no active EventProcessor owns 2 partitions more than any other active * EventProcessor. Given that each invocation to this method results in ownership claim of at most one partition, * this algorithm converges gradually towards a steady state. * </p> * When a new partition is claimed, this method is also responsible for starting a partition pump that creates an * {@link EventHubAsyncConsumer} for processing events from that partition. */ public void loadBalance() { /* * Retrieve current partition ownership details from the datastore. */ final Mono<Map<String, PartitionOwnership>> partitionOwnershipMono = partitionManager .listOwnership(eventHubName, consumerGroupName) .timeout(Duration.ofSeconds(1)) .collectMap(PartitionOwnership::partitionId, Function.identity()); /* * Retrieve the list of partition ids from the Event Hub. */ final Mono<List<String>> partitionsMono = eventHubAsyncClient .getPartitionIds() .timeout(Duration.ofSeconds(1)) .collectList(); Mono.zip(partitionOwnershipMono, partitionsMono) .flatMap(this::loadBalance) .doOnError(ex -> logger.warning("Load balancing for event processor failed - {}", ex.getMessage())) .subscribe(); } /* * This method works with the given partition ownership details and Event Hub partitions to evaluate whether the * current Event Processor should take on the responsibility of processing more partitions. */ /* * Check if partition ownership data is valid before proceeding with load balancing. */ private boolean isValid(final Map<String, PartitionOwnership> partitionOwnershipMap) { return partitionOwnershipMap.values() .stream() .noneMatch(partitionOwnership -> { return partitionOwnership.eventHubName() == null || !partitionOwnership.eventHubName().equals(this.eventHubName) || partitionOwnership.consumerGroupName() == null || !partitionOwnership.consumerGroupName().equals(this.consumerGroupName) || partitionOwnership.partitionId() == null || partitionOwnership.lastModifiedTime() == null || partitionOwnership.eTag() == null; }); } /* * Find the event processor that owns the maximum number of partitions and steal a random partition * from it. */ private String findPartitionToSteal(final Map<String, List<PartitionOwnership>> ownerPartitionMap) { Map.Entry<String, List<PartitionOwnership>> ownerWithMaxPartitions = ownerPartitionMap.entrySet() .stream() .max(Comparator.comparingInt(entry -> entry.getValue().size())) .get(); int numberOfPartitions = ownerWithMaxPartitions.getValue().size(); logger.info("Owner id {} owns {} partitions, stealing a partition from it", ownerWithMaxPartitions.getKey(), numberOfPartitions); return ownerWithMaxPartitions.getValue().get(RANDOM.nextInt(numberOfPartitions)).partitionId(); } /* * When the load is balanced, all active event processors own at least {@code minPartitionsPerEventProcessor} * and only {@code numberOfEventProcessorsWithAdditionalPartition} event processors will own 1 additional * partition. */ private boolean isLoadBalanced(final int minPartitionsPerEventProcessor, final int numberOfEventProcessorsWithAdditionalPartition, final Map<String, List<PartitionOwnership>> ownerPartitionMap) { int count = 0; for (List<PartitionOwnership> partitionOwnership : ownerPartitionMap.values()) { int numberOfPartitions = partitionOwnership.size(); if (numberOfPartitions < minPartitionsPerEventProcessor || numberOfPartitions > minPartitionsPerEventProcessor + 1) { return false; } if (numberOfPartitions == minPartitionsPerEventProcessor + 1) { count++; } } return count == numberOfEventProcessorsWithAdditionalPartition; } /* * This method is called after determining that the load is not balanced. This method will evaluate * if the current event processor should own more partitions. Specifically, this method returns true if the * current event processor owns less than the minimum number of partitions or if it owns the minimum number * and no other event processor owns lesser number of partitions than this event processor. */ private boolean shouldOwnMorePartitions(final int minPartitionsPerEventProcessor, final Map<String, List<PartitionOwnership>> ownerPartitionMap) { int numberOfPartitionsOwned = ownerPartitionMap.get(this.ownerId).size(); int leastPartitionsOwnedByAnyEventProcessor = ownerPartitionMap.values().stream().min(Comparator.comparingInt(List::size)).get().size(); return numberOfPartitionsOwned < minPartitionsPerEventProcessor || numberOfPartitionsOwned == leastPartitionsOwnedByAnyEventProcessor; } /* * This method will create a new map of partition id and PartitionOwnership containing only those partitions * that are actively owned. All entries in the original map returned by PartitionManager that haven't been * modified for a duration of time greater than the allowed inactivity time limit are assumed to be owned by * dead event processors. These will not be included in the map returned by this method. */ private Map<String, PartitionOwnership> removeInactivePartitionOwnerships( final Map<String, PartitionOwnership> partitionOwnershipMap) { return partitionOwnershipMap .entrySet() .stream() .filter(entry -> { return (System.currentTimeMillis() - entry.getValue().lastModifiedTime() < TimeUnit.SECONDS .toMillis(inactiveTimeLimitInSeconds)) && !ImplUtils.isNullOrEmpty(entry.getValue().ownerId()); }).collect(Collectors.toMap(Entry::getKey, Entry::getValue)); } private void claimOwnership(final Map<String, PartitionOwnership> partitionOwnershipMap, final String partitionIdToClaim) { logger.info("Attempting to claim ownership of partition {}", partitionIdToClaim); PartitionOwnership ownershipRequest = createPartitionOwnershipRequest(partitionOwnershipMap, partitionIdToClaim); partitionManager .claimOwnership(ownershipRequest) .timeout(Duration.ofSeconds(1)) .doOnNext(partitionOwnership -> logger.info("Successfully claimed ownership of partition {}", partitionOwnership.partitionId())) .doOnError(ex -> logger .warning("Failed to claim ownership of partition {} - {}", ownershipRequest.partitionId(), ex.getMessage(), ex)) .subscribe(partitionPumpManager::startPartitionPump); } private PartitionOwnership createPartitionOwnershipRequest( final Map<String, PartitionOwnership> partitionOwnershipMap, final String partitionIdToClaim) { PartitionOwnership previousPartitionOwnership = partitionOwnershipMap.get(partitionIdToClaim); PartitionOwnership partitionOwnershipRequest = new PartitionOwnership() .ownerId(this.ownerId) .partitionId(partitionIdToClaim) .consumerGroupName(this.consumerGroupName) .eventHubName(this.eventHubName) .sequenceNumber(previousPartitionOwnership == null ? null : previousPartitionOwnership.sequenceNumber()) .offset(previousPartitionOwnership == null ? null : previousPartitionOwnership.offset()) .eTag(previousPartitionOwnership == null ? null : previousPartitionOwnership.eTag()) .ownerLevel(0L); return partitionOwnershipRequest; } }
Assuming this is reading the actual Event Hub metadata, this is impossible and would indicate a service failure of some sort. Based on that, I think the comment is the artifact that is out of sync here.
private Mono<Void> loadBalance(final Tuple2<Map<String, PartitionOwnership>, List<String>> tuple) { Map<String, PartitionOwnership> partitionOwnershipMap = tuple.getT1(); List<String> partitionIds = tuple.getT2(); if (ImplUtils.isNullOrEmpty(partitionIds)) { return Mono.error(new IllegalStateException("There are no partitions in Event Hub " + this.eventHubName)); } if (!isValid(partitionOwnershipMap)) { return Mono.error(new IllegalStateException("Invalid partitionOwnership data from PartitionManager")); } /* * Remove all partitions ownerships that have not be modified for a long time. This means that the previous * event processor that owned the partition is probably down and the partition is now eligible to be * claimed by other event processors. */ Map<String, PartitionOwnership> activePartitionOwnershipMap = removeInactivePartitionOwnerships( partitionOwnershipMap); if (ImplUtils.isNullOrEmpty(activePartitionOwnershipMap)) { /* * If the active partition ownership map is empty, this is the first time an event processor is * running or all Event Processors are down for this Event Hub, consumer group combination. All * partitions in this Event Hub are available to claim. Choose a random partition to claim ownership. */ claimOwnership(partitionOwnershipMap, partitionIds.get(RANDOM.nextInt(partitionIds.size()))); return Mono.empty(); } /* * Create a map of owner id and a list of partitions it owns */ Map<String, List<PartitionOwnership>> ownerPartitionMap = activePartitionOwnershipMap.values() .stream() .collect( Collectors.groupingBy(PartitionOwnership::ownerId, mapping(Function.identity(), toList()))); ownerPartitionMap.putIfAbsent(this.ownerId, new ArrayList<>()); /* * Find the minimum number of partitions every event processor should own when the load is * evenly distributed. */ int minPartitionsPerEventProcessor = partitionIds.size() / ownerPartitionMap.size(); /* * If the number of partitions in Event Hub is not evenly divisible by number of active event processors, * a few Event Processors may own 1 additional partition than the minimum when the load is balanced. Calculate * the number of event processors that can own additional partition. */ int numberOfEventProcessorsWithAdditionalPartition = partitionIds.size() % ownerPartitionMap.size(); if (isLoadBalanced(minPartitionsPerEventProcessor, numberOfEventProcessorsWithAdditionalPartition, ownerPartitionMap)) { return Mono.empty(); } if (!shouldOwnMorePartitions(minPartitionsPerEventProcessor, ownerPartitionMap)) { return Mono.empty(); } /* * If some partitions are unclaimed, this could be because an event processor is down and * it's partitions are now available for others to own or because event processors are just * starting up and gradually claiming partitions to own or new partitions were added to Event Hub. * Find any partition that is not actively owned and claim it. * * OR * * Find a partition to steal from another event processor. Pick the event processor that has owns the highest * number of partitions. */ String partitionToClaim = partitionIds.parallelStream() .filter(partitionId -> !activePartitionOwnershipMap.containsKey(partitionId)) .findAny() .orElseGet(() -> findPartitionToSteal(ownerPartitionMap)); claimOwnership(partitionOwnershipMap, partitionToClaim); return Mono.empty(); }
private Mono<Void> loadBalance(final Tuple2<Map<String, PartitionOwnership>, List<String>> tuple) { return Mono.fromRunnable(() -> { logger.info("Starting load balancer"); Map<String, PartitionOwnership> partitionOwnershipMap = tuple.getT1(); List<String> partitionIds = tuple.getT2(); if (ImplUtils.isNullOrEmpty(partitionIds)) { throw Exceptions .propagate(new IllegalStateException("There are no partitions in Event Hub " + this.eventHubName)); } int numberOfPartitions = partitionIds.size(); logger.info("Partition manager returned {} ownership records", partitionOwnershipMap.size()); logger.info("EventHubAsyncClient returned {} partitions", numberOfPartitions); if (!isValid(partitionOwnershipMap)) { throw Exceptions .propagate(new IllegalStateException("Invalid partitionOwnership data from PartitionManager")); } /* * Remove all partitions' ownership that have not been modified for a configuration period of time. This * means * that the previous EventProcessor that owned the partition is probably down and the partition is now eligible * to be claimed by other EventProcessors. */ Map<String, PartitionOwnership> activePartitionOwnershipMap = removeInactivePartitionOwnerships( partitionOwnershipMap); logger.info("Number of active ownership records {}", activePartitionOwnershipMap.size()); if (ImplUtils.isNullOrEmpty(activePartitionOwnershipMap)) { /* * If the active partition ownership map is empty, this is the first time an event processor is * running or all Event Processors are down for this Event Hub, consumer group combination. All * partitions in this Event Hub are available to claim. Choose a random partition to claim ownership. */ claimOwnership(partitionOwnershipMap, partitionIds.get(RANDOM.nextInt(numberOfPartitions))); return; } /* * Create a map of owner id and a list of partitions it owns */ Map<String, List<PartitionOwnership>> ownerPartitionMap = activePartitionOwnershipMap.values() .stream() .collect( Collectors.groupingBy(PartitionOwnership::ownerId, mapping(Function.identity(), toList()))); ownerPartitionMap.putIfAbsent(this.ownerId, new ArrayList<>()); /* * Find the minimum number of partitions every event processor should own when the load is * evenly distributed. */ int numberOfActiveEventProcessors = ownerPartitionMap.size(); logger.info("Number of active event processors {}", ownerPartitionMap.size()); int minPartitionsPerEventProcessor = numberOfPartitions / numberOfActiveEventProcessors; /* * If the number of partitions in Event Hub is not evenly divisible by number of active event processors, * a few Event Processors may own 1 additional partition than the minimum when the load is balanced. Calculate * the number of event processors that can own additional partition. */ int numberOfEventProcessorsWithAdditionalPartition = numberOfPartitions % numberOfActiveEventProcessors; logger.info("Expected min partitions per event processor = {}, expected number of event " + "processors with additional partition = {}", minPartitionsPerEventProcessor, numberOfEventProcessorsWithAdditionalPartition); if (isLoadBalanced(minPartitionsPerEventProcessor, numberOfEventProcessorsWithAdditionalPartition, ownerPartitionMap)) { logger.info("Load is balanced"); return; } if (!shouldOwnMorePartitions(minPartitionsPerEventProcessor, ownerPartitionMap)) { logger.info("This event processor owns {} partitions and shouldn't own more", ownerPartitionMap.get(ownerId).size()); return; } logger.info( "Load is unbalanced and this event processor should own more partitions"); /* * If some partitions are unclaimed, this could be because an event processor is down and * it's partitions are now available for others to own or because event processors are just * starting up and gradually claiming partitions to own or new partitions were added to Event Hub. * Find any partition that is not actively owned and claim it. * * OR * * Find a partition to steal from another event processor. Pick the event processor that has owns the highest * number of partitions. */ String partitionToClaim = partitionIds.parallelStream() .filter(partitionId -> !activePartitionOwnershipMap.containsKey(partitionId)) .findAny() .orElseGet(() -> { logger.info("No unclaimed partitions, stealing from another event processor"); return findPartitionToSteal(ownerPartitionMap); }); claimOwnership(partitionOwnershipMap, partitionToClaim); }); }
class PartitionBasedLoadBalancer { private static final Random RANDOM = new Random(); private final ClientLogger logger = new ClientLogger(PartitionBasedLoadBalancer.class); private final String eventHubName; private final String consumerGroupName; private final PartitionManager partitionManager; private final EventHubAsyncClient eventHubAsyncClient; private final String ownerId; private final long inactiveTimeLimitInSeconds; private final PartitionPumpManager partitionPumpManager; /** * Creates an instance of PartitionBasedLoadBalancer for the given Event Hub name and consumer group. * * @param partitionManager The partition manager that this load balancer will use to read/update ownership details. * @param eventHubAsyncClient The asynchronous Event Hub client used to consume events. * @param eventHubName The Event Hub name the {@link EventProcessor} is associated with. * @param consumerGroupName The consumer group name the {@link EventProcessor} is associated with. * @param ownerId The owner identifier for the {@link EventProcessor} this load balancer is associated with. * @param inactiveTimeLimitInSeconds The time in seconds to wait for an update on an ownership record before * assuming the owner of the partition is inactive. * @param partitionPumpManager The partition pump manager that keeps track of all EventHubConsumers and partitions * that this {@link EventProcessor} is processing. */ public PartitionBasedLoadBalancer(final PartitionManager partitionManager, final EventHubAsyncClient eventHubAsyncClient, final String eventHubName, final String consumerGroupName, final String ownerId, final long inactiveTimeLimitInSeconds, final PartitionPumpManager partitionPumpManager) { this.partitionManager = partitionManager; this.eventHubAsyncClient = eventHubAsyncClient; this.eventHubName = eventHubName; this.consumerGroupName = consumerGroupName; this.ownerId = ownerId; this.inactiveTimeLimitInSeconds = inactiveTimeLimitInSeconds; this.partitionPumpManager = partitionPumpManager; } /** * This is the main method responsible for load balancing. This method is expected to be invoked by the {@link * EventProcessor} periodically. Every call to this method will result in this {@link EventProcessor} owning <b>at * most one</b> new partition. * <p> * The load is considered balanced when no active EventProcessor owns 2 partitions more than any other active * EventProcessor.Given that each invocation to this method results in ownership claim of at most one partition, * this algorithm converges gradually towards a steady state. * </p> * When a new partition is claimed, this method is also responsible for starting a partition pump that creates an * {@link EventHubAsyncConsumer} for processing events from that partition. */ public void loadBalance() { /* * Retrieve current partition ownership details from the datastore. */ final Mono<Map<String, PartitionOwnership>> partitionOwnershipMono = partitionManager .listOwnership(eventHubName, consumerGroupName) .timeout(Duration.ofSeconds(1)) .collectMap(PartitionOwnership::partitionId, Function.identity()); /* * Retrieve the list of partition ids from the Event Hub. */ final Mono<List<String>> partitionsMono = eventHubAsyncClient .getPartitionIds() .timeout(Duration.ofSeconds(1)) .collectList(); Mono.zip(partitionOwnershipMono, partitionsMono) .map(this::loadBalance) .doOnError(ex -> logger.warning("Load balancing for event processor failed - {}", ex.getMessage())) .subscribe(); } /* * This method works with the given partition ownership details and Event Hub partitions to evaluate whether the * current Event Processor should take on the responsibility of processing more partitions. */ /* * Check if partition ownership data is valid before proceeding with load balancing. */ private boolean isValid(final Map<String, PartitionOwnership> partitionOwnershipMap) { return partitionOwnershipMap.values() .stream() .noneMatch(partitionOwnership -> { return partitionOwnership.ownerId() == null || partitionOwnership.eventHubName() == null || !partitionOwnership.eventHubName().equals(this.eventHubName) || partitionOwnership.consumerGroupName() == null || !partitionOwnership.consumerGroupName().equals(this.consumerGroupName) || partitionOwnership.partitionId() == null || partitionOwnership.lastModifiedTime() == null || partitionOwnership.eTag() == null; }); } /* * Find the event processor that owns the maximum number of partitions and steal a random partition * from it. */ private String findPartitionToSteal(final Map<String, List<PartitionOwnership>> ownerPartitionMap) { List<PartitionOwnership> maxList = ownerPartitionMap.values() .stream() .max(Comparator.comparingInt(List::size)) .get(); return maxList.get(RANDOM.nextInt(maxList.size())).partitionId(); } /* * When the load is balanced, all active event processors own at least {@code minPartitionsPerEventProcessor} * and only {@code numberOfEventProcessorsWithAdditionalPartition} event processors will own 1 additional * partition. */ private boolean isLoadBalanced(final int minPartitionsPerEventProcessor, final int numberOfEventProcessorsWithAdditionalPartition, final Map<String, List<PartitionOwnership>> ownerPartitionMap) { if (ownerPartitionMap.values() .stream() .noneMatch(ownershipList -> { return ownershipList.size() < minPartitionsPerEventProcessor || ownershipList.size() > minPartitionsPerEventProcessor + 1; })) { long count = ownerPartitionMap.values() .stream() .filter(ownershipList -> ownershipList.size() == minPartitionsPerEventProcessor + 1) .count(); return count == numberOfEventProcessorsWithAdditionalPartition; } return false; } /* * This method is called after determining that the load is not balanced. This method will evaluate * if the current event processor should own more partitions. Specifically, this method returns true if the * current event processor owns less than the minimum number of partitions or if it owns the minimum number * and no other event processor owns lesser number of partitions than this event processor. */ private boolean shouldOwnMorePartitions(final int minPartitionsPerEventProcessor, final Map<String, List<PartitionOwnership>> ownerPartitionMap) { int numberOfPartitionsOwned = ownerPartitionMap.get(this.ownerId).size(); int leastPartitionsOwnedByAnyEventProcessor = ownerPartitionMap.values().stream().min(Comparator.comparingInt(List::size)).get().size(); if (numberOfPartitionsOwned < minPartitionsPerEventProcessor || numberOfPartitionsOwned == leastPartitionsOwnedByAnyEventProcessor) { return true; } return false; } /* * This method will create a new map of partition id and PartitionOwnership containing only those partitions * that are actively owned. All entries in the original map returned by PartitionManager that haven't been * modified for a duration of time greater than the allowed inactivity time limit are assumed to be owned by * dead event processors. These will not be included in the map returned by this method. */ private Map<String, PartitionOwnership> removeInactivePartitionOwnerships( final Map<String, PartitionOwnership> partitionOwnershipMap) { return partitionOwnershipMap .entrySet() .stream() .filter(entry -> { return System.currentTimeMillis() - entry.getValue().lastModifiedTime() < TimeUnit.SECONDS .toMillis(inactiveTimeLimitInSeconds); }).collect(Collectors.toMap(Entry::getKey, Entry::getValue)); } private void claimOwnership(final Map<String, PartitionOwnership> partitionOwnershipMap, final String partitionIdToClaim) { PartitionOwnership ownershipRequest = createPartitionOwnershipRequest(partitionOwnershipMap, partitionIdToClaim); partitionManager .claimOwnership(ownershipRequest) .timeout(Duration.ofSeconds(1)) .doOnError(ex -> logger .warning("Failed to claim ownership of partition {} - {}", ownershipRequest.partitionId(), ex.getMessage(), ex)) .subscribe(partitionPumpManager::startPartitionPump); } private PartitionOwnership createPartitionOwnershipRequest( final Map<String, PartitionOwnership> partitionOwnershipMap, final String partitionIdToClaim) { PartitionOwnership previousPartitionOwnership = partitionOwnershipMap.get(partitionIdToClaim); PartitionOwnership partitionOwnershipRequest = new PartitionOwnership() .ownerId(this.ownerId) .partitionId(partitionIdToClaim) .consumerGroupName(this.consumerGroupName) .eventHubName(this.eventHubName) .sequenceNumber(previousPartitionOwnership == null ? null : previousPartitionOwnership.sequenceNumber()) .offset(previousPartitionOwnership == null ? null : previousPartitionOwnership.offset()) .eTag(previousPartitionOwnership == null ? null : previousPartitionOwnership.eTag()) .ownerLevel(0L); return partitionOwnershipRequest; } }
class PartitionBasedLoadBalancer { private static final Random RANDOM = new Random(); private final ClientLogger logger = new ClientLogger(PartitionBasedLoadBalancer.class); private final String eventHubName; private final String consumerGroupName; private final PartitionManager partitionManager; private final EventHubAsyncClient eventHubAsyncClient; private final String ownerId; private final long inactiveTimeLimitInSeconds; private final PartitionPumpManager partitionPumpManager; /** * Creates an instance of PartitionBasedLoadBalancer for the given Event Hub name and consumer group. * * @param partitionManager The partition manager that this load balancer will use to read/update ownership details. * @param eventHubAsyncClient The asynchronous Event Hub client used to consume events. * @param eventHubName The Event Hub name the {@link EventProcessor} is associated with. * @param consumerGroupName The consumer group name the {@link EventProcessor} is associated with. * @param ownerId The identifier of the {@link EventProcessor} that owns this load balancer. * @param inactiveTimeLimitInSeconds The time in seconds to wait for an update on an ownership record before * assuming the owner of the partition is inactive. * @param partitionPumpManager The partition pump manager that keeps track of all EventHubConsumers and partitions * that this {@link EventProcessor} is processing. */ public PartitionBasedLoadBalancer(final PartitionManager partitionManager, final EventHubAsyncClient eventHubAsyncClient, final String eventHubName, final String consumerGroupName, final String ownerId, final long inactiveTimeLimitInSeconds, final PartitionPumpManager partitionPumpManager) { this.partitionManager = partitionManager; this.eventHubAsyncClient = eventHubAsyncClient; this.eventHubName = eventHubName; this.consumerGroupName = consumerGroupName; this.ownerId = ownerId; this.inactiveTimeLimitInSeconds = inactiveTimeLimitInSeconds; this.partitionPumpManager = partitionPumpManager; } /** * This is the main method responsible for load balancing. This method is expected to be invoked by the {@link * EventProcessor} periodically. Every call to this method will result in this {@link EventProcessor} owning <b>at * most one</b> new partition. * <p> * The load is considered balanced when no active EventProcessor owns 2 partitions more than any other active * EventProcessor. Given that each invocation to this method results in ownership claim of at most one partition, * this algorithm converges gradually towards a steady state. * </p> * When a new partition is claimed, this method is also responsible for starting a partition pump that creates an * {@link EventHubAsyncConsumer} for processing events from that partition. */ public void loadBalance() { /* * Retrieve current partition ownership details from the datastore. */ final Mono<Map<String, PartitionOwnership>> partitionOwnershipMono = partitionManager .listOwnership(eventHubName, consumerGroupName) .timeout(Duration.ofSeconds(1)) .collectMap(PartitionOwnership::partitionId, Function.identity()); /* * Retrieve the list of partition ids from the Event Hub. */ final Mono<List<String>> partitionsMono = eventHubAsyncClient .getPartitionIds() .timeout(Duration.ofSeconds(1)) .collectList(); Mono.zip(partitionOwnershipMono, partitionsMono) .flatMap(this::loadBalance) .doOnError(ex -> logger.warning("Load balancing for event processor failed - {}", ex.getMessage())) .subscribe(); } /* * This method works with the given partition ownership details and Event Hub partitions to evaluate whether the * current Event Processor should take on the responsibility of processing more partitions. */ /* * Check if partition ownership data is valid before proceeding with load balancing. */ private boolean isValid(final Map<String, PartitionOwnership> partitionOwnershipMap) { return partitionOwnershipMap.values() .stream() .noneMatch(partitionOwnership -> { return partitionOwnership.eventHubName() == null || !partitionOwnership.eventHubName().equals(this.eventHubName) || partitionOwnership.consumerGroupName() == null || !partitionOwnership.consumerGroupName().equals(this.consumerGroupName) || partitionOwnership.partitionId() == null || partitionOwnership.lastModifiedTime() == null || partitionOwnership.eTag() == null; }); } /* * Find the event processor that owns the maximum number of partitions and steal a random partition * from it. */ private String findPartitionToSteal(final Map<String, List<PartitionOwnership>> ownerPartitionMap) { Map.Entry<String, List<PartitionOwnership>> ownerWithMaxPartitions = ownerPartitionMap.entrySet() .stream() .max(Comparator.comparingInt(entry -> entry.getValue().size())) .get(); int numberOfPartitions = ownerWithMaxPartitions.getValue().size(); logger.info("Owner id {} owns {} partitions, stealing a partition from it", ownerWithMaxPartitions.getKey(), numberOfPartitions); return ownerWithMaxPartitions.getValue().get(RANDOM.nextInt(numberOfPartitions)).partitionId(); } /* * When the load is balanced, all active event processors own at least {@code minPartitionsPerEventProcessor} * and only {@code numberOfEventProcessorsWithAdditionalPartition} event processors will own 1 additional * partition. */ private boolean isLoadBalanced(final int minPartitionsPerEventProcessor, final int numberOfEventProcessorsWithAdditionalPartition, final Map<String, List<PartitionOwnership>> ownerPartitionMap) { int count = 0; for (List<PartitionOwnership> partitionOwnership : ownerPartitionMap.values()) { int numberOfPartitions = partitionOwnership.size(); if (numberOfPartitions < minPartitionsPerEventProcessor || numberOfPartitions > minPartitionsPerEventProcessor + 1) { return false; } if (numberOfPartitions == minPartitionsPerEventProcessor + 1) { count++; } } return count == numberOfEventProcessorsWithAdditionalPartition; } /* * This method is called after determining that the load is not balanced. This method will evaluate * if the current event processor should own more partitions. Specifically, this method returns true if the * current event processor owns less than the minimum number of partitions or if it owns the minimum number * and no other event processor owns lesser number of partitions than this event processor. */ private boolean shouldOwnMorePartitions(final int minPartitionsPerEventProcessor, final Map<String, List<PartitionOwnership>> ownerPartitionMap) { int numberOfPartitionsOwned = ownerPartitionMap.get(this.ownerId).size(); int leastPartitionsOwnedByAnyEventProcessor = ownerPartitionMap.values().stream().min(Comparator.comparingInt(List::size)).get().size(); return numberOfPartitionsOwned < minPartitionsPerEventProcessor || numberOfPartitionsOwned == leastPartitionsOwnedByAnyEventProcessor; } /* * This method will create a new map of partition id and PartitionOwnership containing only those partitions * that are actively owned. All entries in the original map returned by PartitionManager that haven't been * modified for a duration of time greater than the allowed inactivity time limit are assumed to be owned by * dead event processors. These will not be included in the map returned by this method. */ private Map<String, PartitionOwnership> removeInactivePartitionOwnerships( final Map<String, PartitionOwnership> partitionOwnershipMap) { return partitionOwnershipMap .entrySet() .stream() .filter(entry -> { return (System.currentTimeMillis() - entry.getValue().lastModifiedTime() < TimeUnit.SECONDS .toMillis(inactiveTimeLimitInSeconds)) && !ImplUtils.isNullOrEmpty(entry.getValue().ownerId()); }).collect(Collectors.toMap(Entry::getKey, Entry::getValue)); } private void claimOwnership(final Map<String, PartitionOwnership> partitionOwnershipMap, final String partitionIdToClaim) { logger.info("Attempting to claim ownership of partition {}", partitionIdToClaim); PartitionOwnership ownershipRequest = createPartitionOwnershipRequest(partitionOwnershipMap, partitionIdToClaim); partitionManager .claimOwnership(ownershipRequest) .timeout(Duration.ofSeconds(1)) .doOnNext(partitionOwnership -> logger.info("Successfully claimed ownership of partition {}", partitionOwnership.partitionId())) .doOnError(ex -> logger .warning("Failed to claim ownership of partition {} - {}", ownershipRequest.partitionId(), ex.getMessage(), ex)) .subscribe(partitionPumpManager::startPartitionPump); } private PartitionOwnership createPartitionOwnershipRequest( final Map<String, PartitionOwnership> partitionOwnershipMap, final String partitionIdToClaim) { PartitionOwnership previousPartitionOwnership = partitionOwnershipMap.get(partitionIdToClaim); PartitionOwnership partitionOwnershipRequest = new PartitionOwnership() .ownerId(this.ownerId) .partitionId(partitionIdToClaim) .consumerGroupName(this.consumerGroupName) .eventHubName(this.eventHubName) .sequenceNumber(previousPartitionOwnership == null ? null : previousPartitionOwnership.sequenceNumber()) .offset(previousPartitionOwnership == null ? null : previousPartitionOwnership.offset()) .eTag(previousPartitionOwnership == null ? null : previousPartitionOwnership.eTag()) .ownerLevel(0L); return partitionOwnershipRequest; } }
"A long time" is ambiguous. You may wish to consider offering context around how that is determined. "Remove ownership that have not been modified within the configured period" or similar? _(also, "ownership" is both singular and plural)_
private Mono<Void> loadBalance(final Tuple2<Map<String, PartitionOwnership>, List<String>> tuple) { Map<String, PartitionOwnership> partitionOwnershipMap = tuple.getT1(); List<String> partitionIds = tuple.getT2(); if (ImplUtils.isNullOrEmpty(partitionIds)) { return Mono.error(new IllegalStateException("There are no partitions in Event Hub " + this.eventHubName)); } if (!isValid(partitionOwnershipMap)) { return Mono.error(new IllegalStateException("Invalid partitionOwnership data from PartitionManager")); } /* * Remove all partitions ownerships that have not be modified for a long time. This means that the previous * event processor that owned the partition is probably down and the partition is now eligible to be * claimed by other event processors. */ Map<String, PartitionOwnership> activePartitionOwnershipMap = removeInactivePartitionOwnerships( partitionOwnershipMap); if (ImplUtils.isNullOrEmpty(activePartitionOwnershipMap)) { /* * If the active partition ownership map is empty, this is the first time an event processor is * running or all Event Processors are down for this Event Hub, consumer group combination. All * partitions in this Event Hub are available to claim. Choose a random partition to claim ownership. */ claimOwnership(partitionOwnershipMap, partitionIds.get(RANDOM.nextInt(partitionIds.size()))); return Mono.empty(); } /* * Create a map of owner id and a list of partitions it owns */ Map<String, List<PartitionOwnership>> ownerPartitionMap = activePartitionOwnershipMap.values() .stream() .collect( Collectors.groupingBy(PartitionOwnership::ownerId, mapping(Function.identity(), toList()))); ownerPartitionMap.putIfAbsent(this.ownerId, new ArrayList<>()); /* * Find the minimum number of partitions every event processor should own when the load is * evenly distributed. */ int minPartitionsPerEventProcessor = partitionIds.size() / ownerPartitionMap.size(); /* * If the number of partitions in Event Hub is not evenly divisible by number of active event processors, * a few Event Processors may own 1 additional partition than the minimum when the load is balanced. Calculate * the number of event processors that can own additional partition. */ int numberOfEventProcessorsWithAdditionalPartition = partitionIds.size() % ownerPartitionMap.size(); if (isLoadBalanced(minPartitionsPerEventProcessor, numberOfEventProcessorsWithAdditionalPartition, ownerPartitionMap)) { return Mono.empty(); } if (!shouldOwnMorePartitions(minPartitionsPerEventProcessor, ownerPartitionMap)) { return Mono.empty(); } /* * If some partitions are unclaimed, this could be because an event processor is down and * it's partitions are now available for others to own or because event processors are just * starting up and gradually claiming partitions to own or new partitions were added to Event Hub. * Find any partition that is not actively owned and claim it. * * OR * * Find a partition to steal from another event processor. Pick the event processor that has owns the highest * number of partitions. */ String partitionToClaim = partitionIds.parallelStream() .filter(partitionId -> !activePartitionOwnershipMap.containsKey(partitionId)) .findAny() .orElseGet(() -> findPartitionToSteal(ownerPartitionMap)); claimOwnership(partitionOwnershipMap, partitionToClaim); return Mono.empty(); }
* Remove all partitions ownerships that have not be modified for a long time. This means that the previous
private Mono<Void> loadBalance(final Tuple2<Map<String, PartitionOwnership>, List<String>> tuple) { return Mono.fromRunnable(() -> { logger.info("Starting load balancer"); Map<String, PartitionOwnership> partitionOwnershipMap = tuple.getT1(); List<String> partitionIds = tuple.getT2(); if (ImplUtils.isNullOrEmpty(partitionIds)) { throw Exceptions .propagate(new IllegalStateException("There are no partitions in Event Hub " + this.eventHubName)); } int numberOfPartitions = partitionIds.size(); logger.info("Partition manager returned {} ownership records", partitionOwnershipMap.size()); logger.info("EventHubAsyncClient returned {} partitions", numberOfPartitions); if (!isValid(partitionOwnershipMap)) { throw Exceptions .propagate(new IllegalStateException("Invalid partitionOwnership data from PartitionManager")); } /* * Remove all partitions' ownership that have not been modified for a configuration period of time. This * means * that the previous EventProcessor that owned the partition is probably down and the partition is now eligible * to be claimed by other EventProcessors. */ Map<String, PartitionOwnership> activePartitionOwnershipMap = removeInactivePartitionOwnerships( partitionOwnershipMap); logger.info("Number of active ownership records {}", activePartitionOwnershipMap.size()); if (ImplUtils.isNullOrEmpty(activePartitionOwnershipMap)) { /* * If the active partition ownership map is empty, this is the first time an event processor is * running or all Event Processors are down for this Event Hub, consumer group combination. All * partitions in this Event Hub are available to claim. Choose a random partition to claim ownership. */ claimOwnership(partitionOwnershipMap, partitionIds.get(RANDOM.nextInt(numberOfPartitions))); return; } /* * Create a map of owner id and a list of partitions it owns */ Map<String, List<PartitionOwnership>> ownerPartitionMap = activePartitionOwnershipMap.values() .stream() .collect( Collectors.groupingBy(PartitionOwnership::ownerId, mapping(Function.identity(), toList()))); ownerPartitionMap.putIfAbsent(this.ownerId, new ArrayList<>()); /* * Find the minimum number of partitions every event processor should own when the load is * evenly distributed. */ int numberOfActiveEventProcessors = ownerPartitionMap.size(); logger.info("Number of active event processors {}", ownerPartitionMap.size()); int minPartitionsPerEventProcessor = numberOfPartitions / numberOfActiveEventProcessors; /* * If the number of partitions in Event Hub is not evenly divisible by number of active event processors, * a few Event Processors may own 1 additional partition than the minimum when the load is balanced. Calculate * the number of event processors that can own additional partition. */ int numberOfEventProcessorsWithAdditionalPartition = numberOfPartitions % numberOfActiveEventProcessors; logger.info("Expected min partitions per event processor = {}, expected number of event " + "processors with additional partition = {}", minPartitionsPerEventProcessor, numberOfEventProcessorsWithAdditionalPartition); if (isLoadBalanced(minPartitionsPerEventProcessor, numberOfEventProcessorsWithAdditionalPartition, ownerPartitionMap)) { logger.info("Load is balanced"); return; } if (!shouldOwnMorePartitions(minPartitionsPerEventProcessor, ownerPartitionMap)) { logger.info("This event processor owns {} partitions and shouldn't own more", ownerPartitionMap.get(ownerId).size()); return; } logger.info( "Load is unbalanced and this event processor should own more partitions"); /* * If some partitions are unclaimed, this could be because an event processor is down and * it's partitions are now available for others to own or because event processors are just * starting up and gradually claiming partitions to own or new partitions were added to Event Hub. * Find any partition that is not actively owned and claim it. * * OR * * Find a partition to steal from another event processor. Pick the event processor that has owns the highest * number of partitions. */ String partitionToClaim = partitionIds.parallelStream() .filter(partitionId -> !activePartitionOwnershipMap.containsKey(partitionId)) .findAny() .orElseGet(() -> { logger.info("No unclaimed partitions, stealing from another event processor"); return findPartitionToSteal(ownerPartitionMap); }); claimOwnership(partitionOwnershipMap, partitionToClaim); }); }
class PartitionBasedLoadBalancer { private static final Random RANDOM = new Random(); private final ClientLogger logger = new ClientLogger(PartitionBasedLoadBalancer.class); private final String eventHubName; private final String consumerGroupName; private final PartitionManager partitionManager; private final EventHubAsyncClient eventHubAsyncClient; private final String ownerId; private final long inactiveTimeLimitInSeconds; private final PartitionPumpManager partitionPumpManager; /** * Creates an instance of PartitionBasedLoadBalancer for the given Event Hub name and consumer group. * * @param partitionManager The partition manager that this load balancer will use to read/update ownership details. * @param eventHubAsyncClient The asynchronous Event Hub client used to consume events. * @param eventHubName The Event Hub name the {@link EventProcessor} is associated with. * @param consumerGroupName The consumer group name the {@link EventProcessor} is associated with. * @param ownerId The owner identifier for the {@link EventProcessor} this load balancer is associated with. * @param inactiveTimeLimitInSeconds The time in seconds to wait for an update on an ownership record before * assuming the owner of the partition is inactive. * @param partitionPumpManager The partition pump manager that keeps track of all EventHubConsumers and partitions * that this {@link EventProcessor} is processing. */ public PartitionBasedLoadBalancer(final PartitionManager partitionManager, final EventHubAsyncClient eventHubAsyncClient, final String eventHubName, final String consumerGroupName, final String ownerId, final long inactiveTimeLimitInSeconds, final PartitionPumpManager partitionPumpManager) { this.partitionManager = partitionManager; this.eventHubAsyncClient = eventHubAsyncClient; this.eventHubName = eventHubName; this.consumerGroupName = consumerGroupName; this.ownerId = ownerId; this.inactiveTimeLimitInSeconds = inactiveTimeLimitInSeconds; this.partitionPumpManager = partitionPumpManager; } /** * This is the main method responsible for load balancing. This method is expected to be invoked by the {@link * EventProcessor} periodically. Every call to this method will result in this {@link EventProcessor} owning <b>at * most one</b> new partition. * <p> * The load is considered balanced when no active EventProcessor owns 2 partitions more than any other active * EventProcessor.Given that each invocation to this method results in ownership claim of at most one partition, * this algorithm converges gradually towards a steady state. * </p> * When a new partition is claimed, this method is also responsible for starting a partition pump that creates an * {@link EventHubAsyncConsumer} for processing events from that partition. */ public void loadBalance() { /* * Retrieve current partition ownership details from the datastore. */ final Mono<Map<String, PartitionOwnership>> partitionOwnershipMono = partitionManager .listOwnership(eventHubName, consumerGroupName) .timeout(Duration.ofSeconds(1)) .collectMap(PartitionOwnership::partitionId, Function.identity()); /* * Retrieve the list of partition ids from the Event Hub. */ final Mono<List<String>> partitionsMono = eventHubAsyncClient .getPartitionIds() .timeout(Duration.ofSeconds(1)) .collectList(); Mono.zip(partitionOwnershipMono, partitionsMono) .map(this::loadBalance) .doOnError(ex -> logger.warning("Load balancing for event processor failed - {}", ex.getMessage())) .subscribe(); } /* * This method works with the given partition ownership details and Event Hub partitions to evaluate whether the * current Event Processor should take on the responsibility of processing more partitions. */ /* * Check if partition ownership data is valid before proceeding with load balancing. */ private boolean isValid(final Map<String, PartitionOwnership> partitionOwnershipMap) { return partitionOwnershipMap.values() .stream() .noneMatch(partitionOwnership -> { return partitionOwnership.ownerId() == null || partitionOwnership.eventHubName() == null || !partitionOwnership.eventHubName().equals(this.eventHubName) || partitionOwnership.consumerGroupName() == null || !partitionOwnership.consumerGroupName().equals(this.consumerGroupName) || partitionOwnership.partitionId() == null || partitionOwnership.lastModifiedTime() == null || partitionOwnership.eTag() == null; }); } /* * Find the event processor that owns the maximum number of partitions and steal a random partition * from it. */ private String findPartitionToSteal(final Map<String, List<PartitionOwnership>> ownerPartitionMap) { List<PartitionOwnership> maxList = ownerPartitionMap.values() .stream() .max(Comparator.comparingInt(List::size)) .get(); return maxList.get(RANDOM.nextInt(maxList.size())).partitionId(); } /* * When the load is balanced, all active event processors own at least {@code minPartitionsPerEventProcessor} * and only {@code numberOfEventProcessorsWithAdditionalPartition} event processors will own 1 additional * partition. */ private boolean isLoadBalanced(final int minPartitionsPerEventProcessor, final int numberOfEventProcessorsWithAdditionalPartition, final Map<String, List<PartitionOwnership>> ownerPartitionMap) { if (ownerPartitionMap.values() .stream() .noneMatch(ownershipList -> { return ownershipList.size() < minPartitionsPerEventProcessor || ownershipList.size() > minPartitionsPerEventProcessor + 1; })) { long count = ownerPartitionMap.values() .stream() .filter(ownershipList -> ownershipList.size() == minPartitionsPerEventProcessor + 1) .count(); return count == numberOfEventProcessorsWithAdditionalPartition; } return false; } /* * This method is called after determining that the load is not balanced. This method will evaluate * if the current event processor should own more partitions. Specifically, this method returns true if the * current event processor owns less than the minimum number of partitions or if it owns the minimum number * and no other event processor owns lesser number of partitions than this event processor. */ private boolean shouldOwnMorePartitions(final int minPartitionsPerEventProcessor, final Map<String, List<PartitionOwnership>> ownerPartitionMap) { int numberOfPartitionsOwned = ownerPartitionMap.get(this.ownerId).size(); int leastPartitionsOwnedByAnyEventProcessor = ownerPartitionMap.values().stream().min(Comparator.comparingInt(List::size)).get().size(); if (numberOfPartitionsOwned < minPartitionsPerEventProcessor || numberOfPartitionsOwned == leastPartitionsOwnedByAnyEventProcessor) { return true; } return false; } /* * This method will create a new map of partition id and PartitionOwnership containing only those partitions * that are actively owned. All entries in the original map returned by PartitionManager that haven't been * modified for a duration of time greater than the allowed inactivity time limit are assumed to be owned by * dead event processors. These will not be included in the map returned by this method. */ private Map<String, PartitionOwnership> removeInactivePartitionOwnerships( final Map<String, PartitionOwnership> partitionOwnershipMap) { return partitionOwnershipMap .entrySet() .stream() .filter(entry -> { return System.currentTimeMillis() - entry.getValue().lastModifiedTime() < TimeUnit.SECONDS .toMillis(inactiveTimeLimitInSeconds); }).collect(Collectors.toMap(Entry::getKey, Entry::getValue)); } private void claimOwnership(final Map<String, PartitionOwnership> partitionOwnershipMap, final String partitionIdToClaim) { PartitionOwnership ownershipRequest = createPartitionOwnershipRequest(partitionOwnershipMap, partitionIdToClaim); partitionManager .claimOwnership(ownershipRequest) .timeout(Duration.ofSeconds(1)) .doOnError(ex -> logger .warning("Failed to claim ownership of partition {} - {}", ownershipRequest.partitionId(), ex.getMessage(), ex)) .subscribe(partitionPumpManager::startPartitionPump); } private PartitionOwnership createPartitionOwnershipRequest( final Map<String, PartitionOwnership> partitionOwnershipMap, final String partitionIdToClaim) { PartitionOwnership previousPartitionOwnership = partitionOwnershipMap.get(partitionIdToClaim); PartitionOwnership partitionOwnershipRequest = new PartitionOwnership() .ownerId(this.ownerId) .partitionId(partitionIdToClaim) .consumerGroupName(this.consumerGroupName) .eventHubName(this.eventHubName) .sequenceNumber(previousPartitionOwnership == null ? null : previousPartitionOwnership.sequenceNumber()) .offset(previousPartitionOwnership == null ? null : previousPartitionOwnership.offset()) .eTag(previousPartitionOwnership == null ? null : previousPartitionOwnership.eTag()) .ownerLevel(0L); return partitionOwnershipRequest; } }
class PartitionBasedLoadBalancer { private static final Random RANDOM = new Random(); private final ClientLogger logger = new ClientLogger(PartitionBasedLoadBalancer.class); private final String eventHubName; private final String consumerGroupName; private final PartitionManager partitionManager; private final EventHubAsyncClient eventHubAsyncClient; private final String ownerId; private final long inactiveTimeLimitInSeconds; private final PartitionPumpManager partitionPumpManager; /** * Creates an instance of PartitionBasedLoadBalancer for the given Event Hub name and consumer group. * * @param partitionManager The partition manager that this load balancer will use to read/update ownership details. * @param eventHubAsyncClient The asynchronous Event Hub client used to consume events. * @param eventHubName The Event Hub name the {@link EventProcessor} is associated with. * @param consumerGroupName The consumer group name the {@link EventProcessor} is associated with. * @param ownerId The identifier of the {@link EventProcessor} that owns this load balancer. * @param inactiveTimeLimitInSeconds The time in seconds to wait for an update on an ownership record before * assuming the owner of the partition is inactive. * @param partitionPumpManager The partition pump manager that keeps track of all EventHubConsumers and partitions * that this {@link EventProcessor} is processing. */ public PartitionBasedLoadBalancer(final PartitionManager partitionManager, final EventHubAsyncClient eventHubAsyncClient, final String eventHubName, final String consumerGroupName, final String ownerId, final long inactiveTimeLimitInSeconds, final PartitionPumpManager partitionPumpManager) { this.partitionManager = partitionManager; this.eventHubAsyncClient = eventHubAsyncClient; this.eventHubName = eventHubName; this.consumerGroupName = consumerGroupName; this.ownerId = ownerId; this.inactiveTimeLimitInSeconds = inactiveTimeLimitInSeconds; this.partitionPumpManager = partitionPumpManager; } /** * This is the main method responsible for load balancing. This method is expected to be invoked by the {@link * EventProcessor} periodically. Every call to this method will result in this {@link EventProcessor} owning <b>at * most one</b> new partition. * <p> * The load is considered balanced when no active EventProcessor owns 2 partitions more than any other active * EventProcessor. Given that each invocation to this method results in ownership claim of at most one partition, * this algorithm converges gradually towards a steady state. * </p> * When a new partition is claimed, this method is also responsible for starting a partition pump that creates an * {@link EventHubAsyncConsumer} for processing events from that partition. */ public void loadBalance() { /* * Retrieve current partition ownership details from the datastore. */ final Mono<Map<String, PartitionOwnership>> partitionOwnershipMono = partitionManager .listOwnership(eventHubName, consumerGroupName) .timeout(Duration.ofSeconds(1)) .collectMap(PartitionOwnership::partitionId, Function.identity()); /* * Retrieve the list of partition ids from the Event Hub. */ final Mono<List<String>> partitionsMono = eventHubAsyncClient .getPartitionIds() .timeout(Duration.ofSeconds(1)) .collectList(); Mono.zip(partitionOwnershipMono, partitionsMono) .flatMap(this::loadBalance) .doOnError(ex -> logger.warning("Load balancing for event processor failed - {}", ex.getMessage())) .subscribe(); } /* * This method works with the given partition ownership details and Event Hub partitions to evaluate whether the * current Event Processor should take on the responsibility of processing more partitions. */ /* * Check if partition ownership data is valid before proceeding with load balancing. */ private boolean isValid(final Map<String, PartitionOwnership> partitionOwnershipMap) { return partitionOwnershipMap.values() .stream() .noneMatch(partitionOwnership -> { return partitionOwnership.eventHubName() == null || !partitionOwnership.eventHubName().equals(this.eventHubName) || partitionOwnership.consumerGroupName() == null || !partitionOwnership.consumerGroupName().equals(this.consumerGroupName) || partitionOwnership.partitionId() == null || partitionOwnership.lastModifiedTime() == null || partitionOwnership.eTag() == null; }); } /* * Find the event processor that owns the maximum number of partitions and steal a random partition * from it. */ private String findPartitionToSteal(final Map<String, List<PartitionOwnership>> ownerPartitionMap) { Map.Entry<String, List<PartitionOwnership>> ownerWithMaxPartitions = ownerPartitionMap.entrySet() .stream() .max(Comparator.comparingInt(entry -> entry.getValue().size())) .get(); int numberOfPartitions = ownerWithMaxPartitions.getValue().size(); logger.info("Owner id {} owns {} partitions, stealing a partition from it", ownerWithMaxPartitions.getKey(), numberOfPartitions); return ownerWithMaxPartitions.getValue().get(RANDOM.nextInt(numberOfPartitions)).partitionId(); } /* * When the load is balanced, all active event processors own at least {@code minPartitionsPerEventProcessor} * and only {@code numberOfEventProcessorsWithAdditionalPartition} event processors will own 1 additional * partition. */ private boolean isLoadBalanced(final int minPartitionsPerEventProcessor, final int numberOfEventProcessorsWithAdditionalPartition, final Map<String, List<PartitionOwnership>> ownerPartitionMap) { int count = 0; for (List<PartitionOwnership> partitionOwnership : ownerPartitionMap.values()) { int numberOfPartitions = partitionOwnership.size(); if (numberOfPartitions < minPartitionsPerEventProcessor || numberOfPartitions > minPartitionsPerEventProcessor + 1) { return false; } if (numberOfPartitions == minPartitionsPerEventProcessor + 1) { count++; } } return count == numberOfEventProcessorsWithAdditionalPartition; } /* * This method is called after determining that the load is not balanced. This method will evaluate * if the current event processor should own more partitions. Specifically, this method returns true if the * current event processor owns less than the minimum number of partitions or if it owns the minimum number * and no other event processor owns lesser number of partitions than this event processor. */ private boolean shouldOwnMorePartitions(final int minPartitionsPerEventProcessor, final Map<String, List<PartitionOwnership>> ownerPartitionMap) { int numberOfPartitionsOwned = ownerPartitionMap.get(this.ownerId).size(); int leastPartitionsOwnedByAnyEventProcessor = ownerPartitionMap.values().stream().min(Comparator.comparingInt(List::size)).get().size(); return numberOfPartitionsOwned < minPartitionsPerEventProcessor || numberOfPartitionsOwned == leastPartitionsOwnedByAnyEventProcessor; } /* * This method will create a new map of partition id and PartitionOwnership containing only those partitions * that are actively owned. All entries in the original map returned by PartitionManager that haven't been * modified for a duration of time greater than the allowed inactivity time limit are assumed to be owned by * dead event processors. These will not be included in the map returned by this method. */ private Map<String, PartitionOwnership> removeInactivePartitionOwnerships( final Map<String, PartitionOwnership> partitionOwnershipMap) { return partitionOwnershipMap .entrySet() .stream() .filter(entry -> { return (System.currentTimeMillis() - entry.getValue().lastModifiedTime() < TimeUnit.SECONDS .toMillis(inactiveTimeLimitInSeconds)) && !ImplUtils.isNullOrEmpty(entry.getValue().ownerId()); }).collect(Collectors.toMap(Entry::getKey, Entry::getValue)); } private void claimOwnership(final Map<String, PartitionOwnership> partitionOwnershipMap, final String partitionIdToClaim) { logger.info("Attempting to claim ownership of partition {}", partitionIdToClaim); PartitionOwnership ownershipRequest = createPartitionOwnershipRequest(partitionOwnershipMap, partitionIdToClaim); partitionManager .claimOwnership(ownershipRequest) .timeout(Duration.ofSeconds(1)) .doOnNext(partitionOwnership -> logger.info("Successfully claimed ownership of partition {}", partitionOwnership.partitionId())) .doOnError(ex -> logger .warning("Failed to claim ownership of partition {} - {}", ownershipRequest.partitionId(), ex.getMessage(), ex)) .subscribe(partitionPumpManager::startPartitionPump); } private PartitionOwnership createPartitionOwnershipRequest( final Map<String, PartitionOwnership> partitionOwnershipMap, final String partitionIdToClaim) { PartitionOwnership previousPartitionOwnership = partitionOwnershipMap.get(partitionIdToClaim); PartitionOwnership partitionOwnershipRequest = new PartitionOwnership() .ownerId(this.ownerId) .partitionId(partitionIdToClaim) .consumerGroupName(this.consumerGroupName) .eventHubName(this.eventHubName) .sequenceNumber(previousPartitionOwnership == null ? null : previousPartitionOwnership.sequenceNumber()) .offset(previousPartitionOwnership == null ? null : previousPartitionOwnership.offset()) .eTag(previousPartitionOwnership == null ? null : previousPartitionOwnership.eTag()) .ownerLevel(0L); return partitionOwnershipRequest; } }
nit: This may be a lack of understanding of language idioms, but you're making several calls to the `size()` methods in this logic. Maybe it is worth considering reading them once and caching in a local variable for efficiency?
private Mono<Void> loadBalance(final Tuple2<Map<String, PartitionOwnership>, List<String>> tuple) { Map<String, PartitionOwnership> partitionOwnershipMap = tuple.getT1(); List<String> partitionIds = tuple.getT2(); if (ImplUtils.isNullOrEmpty(partitionIds)) { return Mono.error(new IllegalStateException("There are no partitions in Event Hub " + this.eventHubName)); } if (!isValid(partitionOwnershipMap)) { return Mono.error(new IllegalStateException("Invalid partitionOwnership data from PartitionManager")); } /* * Remove all partitions ownerships that have not be modified for a long time. This means that the previous * event processor that owned the partition is probably down and the partition is now eligible to be * claimed by other event processors. */ Map<String, PartitionOwnership> activePartitionOwnershipMap = removeInactivePartitionOwnerships( partitionOwnershipMap); if (ImplUtils.isNullOrEmpty(activePartitionOwnershipMap)) { /* * If the active partition ownership map is empty, this is the first time an event processor is * running or all Event Processors are down for this Event Hub, consumer group combination. All * partitions in this Event Hub are available to claim. Choose a random partition to claim ownership. */ claimOwnership(partitionOwnershipMap, partitionIds.get(RANDOM.nextInt(partitionIds.size()))); return Mono.empty(); } /* * Create a map of owner id and a list of partitions it owns */ Map<String, List<PartitionOwnership>> ownerPartitionMap = activePartitionOwnershipMap.values() .stream() .collect( Collectors.groupingBy(PartitionOwnership::ownerId, mapping(Function.identity(), toList()))); ownerPartitionMap.putIfAbsent(this.ownerId, new ArrayList<>()); /* * Find the minimum number of partitions every event processor should own when the load is * evenly distributed. */ int minPartitionsPerEventProcessor = partitionIds.size() / ownerPartitionMap.size(); /* * If the number of partitions in Event Hub is not evenly divisible by number of active event processors, * a few Event Processors may own 1 additional partition than the minimum when the load is balanced. Calculate * the number of event processors that can own additional partition. */ int numberOfEventProcessorsWithAdditionalPartition = partitionIds.size() % ownerPartitionMap.size(); if (isLoadBalanced(minPartitionsPerEventProcessor, numberOfEventProcessorsWithAdditionalPartition, ownerPartitionMap)) { return Mono.empty(); } if (!shouldOwnMorePartitions(minPartitionsPerEventProcessor, ownerPartitionMap)) { return Mono.empty(); } /* * If some partitions are unclaimed, this could be because an event processor is down and * it's partitions are now available for others to own or because event processors are just * starting up and gradually claiming partitions to own or new partitions were added to Event Hub. * Find any partition that is not actively owned and claim it. * * OR * * Find a partition to steal from another event processor. Pick the event processor that has owns the highest * number of partitions. */ String partitionToClaim = partitionIds.parallelStream() .filter(partitionId -> !activePartitionOwnershipMap.containsKey(partitionId)) .findAny() .orElseGet(() -> findPartitionToSteal(ownerPartitionMap)); claimOwnership(partitionOwnershipMap, partitionToClaim); return Mono.empty(); }
int minPartitionsPerEventProcessor = partitionIds.size() / ownerPartitionMap.size();
private Mono<Void> loadBalance(final Tuple2<Map<String, PartitionOwnership>, List<String>> tuple) { return Mono.fromRunnable(() -> { logger.info("Starting load balancer"); Map<String, PartitionOwnership> partitionOwnershipMap = tuple.getT1(); List<String> partitionIds = tuple.getT2(); if (ImplUtils.isNullOrEmpty(partitionIds)) { throw Exceptions .propagate(new IllegalStateException("There are no partitions in Event Hub " + this.eventHubName)); } int numberOfPartitions = partitionIds.size(); logger.info("Partition manager returned {} ownership records", partitionOwnershipMap.size()); logger.info("EventHubAsyncClient returned {} partitions", numberOfPartitions); if (!isValid(partitionOwnershipMap)) { throw Exceptions .propagate(new IllegalStateException("Invalid partitionOwnership data from PartitionManager")); } /* * Remove all partitions' ownership that have not been modified for a configuration period of time. This * means * that the previous EventProcessor that owned the partition is probably down and the partition is now eligible * to be claimed by other EventProcessors. */ Map<String, PartitionOwnership> activePartitionOwnershipMap = removeInactivePartitionOwnerships( partitionOwnershipMap); logger.info("Number of active ownership records {}", activePartitionOwnershipMap.size()); if (ImplUtils.isNullOrEmpty(activePartitionOwnershipMap)) { /* * If the active partition ownership map is empty, this is the first time an event processor is * running or all Event Processors are down for this Event Hub, consumer group combination. All * partitions in this Event Hub are available to claim. Choose a random partition to claim ownership. */ claimOwnership(partitionOwnershipMap, partitionIds.get(RANDOM.nextInt(numberOfPartitions))); return; } /* * Create a map of owner id and a list of partitions it owns */ Map<String, List<PartitionOwnership>> ownerPartitionMap = activePartitionOwnershipMap.values() .stream() .collect( Collectors.groupingBy(PartitionOwnership::ownerId, mapping(Function.identity(), toList()))); ownerPartitionMap.putIfAbsent(this.ownerId, new ArrayList<>()); /* * Find the minimum number of partitions every event processor should own when the load is * evenly distributed. */ int numberOfActiveEventProcessors = ownerPartitionMap.size(); logger.info("Number of active event processors {}", ownerPartitionMap.size()); int minPartitionsPerEventProcessor = numberOfPartitions / numberOfActiveEventProcessors; /* * If the number of partitions in Event Hub is not evenly divisible by number of active event processors, * a few Event Processors may own 1 additional partition than the minimum when the load is balanced. Calculate * the number of event processors that can own additional partition. */ int numberOfEventProcessorsWithAdditionalPartition = numberOfPartitions % numberOfActiveEventProcessors; logger.info("Expected min partitions per event processor = {}, expected number of event " + "processors with additional partition = {}", minPartitionsPerEventProcessor, numberOfEventProcessorsWithAdditionalPartition); if (isLoadBalanced(minPartitionsPerEventProcessor, numberOfEventProcessorsWithAdditionalPartition, ownerPartitionMap)) { logger.info("Load is balanced"); return; } if (!shouldOwnMorePartitions(minPartitionsPerEventProcessor, ownerPartitionMap)) { logger.info("This event processor owns {} partitions and shouldn't own more", ownerPartitionMap.get(ownerId).size()); return; } logger.info( "Load is unbalanced and this event processor should own more partitions"); /* * If some partitions are unclaimed, this could be because an event processor is down and * it's partitions are now available for others to own or because event processors are just * starting up and gradually claiming partitions to own or new partitions were added to Event Hub. * Find any partition that is not actively owned and claim it. * * OR * * Find a partition to steal from another event processor. Pick the event processor that has owns the highest * number of partitions. */ String partitionToClaim = partitionIds.parallelStream() .filter(partitionId -> !activePartitionOwnershipMap.containsKey(partitionId)) .findAny() .orElseGet(() -> { logger.info("No unclaimed partitions, stealing from another event processor"); return findPartitionToSteal(ownerPartitionMap); }); claimOwnership(partitionOwnershipMap, partitionToClaim); }); }
class PartitionBasedLoadBalancer { private static final Random RANDOM = new Random(); private final ClientLogger logger = new ClientLogger(PartitionBasedLoadBalancer.class); private final String eventHubName; private final String consumerGroupName; private final PartitionManager partitionManager; private final EventHubAsyncClient eventHubAsyncClient; private final String ownerId; private final long inactiveTimeLimitInSeconds; private final PartitionPumpManager partitionPumpManager; /** * Creates an instance of PartitionBasedLoadBalancer for the given Event Hub name and consumer group. * * @param partitionManager The partition manager that this load balancer will use to read/update ownership details. * @param eventHubAsyncClient The asynchronous Event Hub client used to consume events. * @param eventHubName The Event Hub name the {@link EventProcessor} is associated with. * @param consumerGroupName The consumer group name the {@link EventProcessor} is associated with. * @param ownerId The owner identifier for the {@link EventProcessor} this load balancer is associated with. * @param inactiveTimeLimitInSeconds The time in seconds to wait for an update on an ownership record before * assuming the owner of the partition is inactive. * @param partitionPumpManager The partition pump manager that keeps track of all EventHubConsumers and partitions * that this {@link EventProcessor} is processing. */ public PartitionBasedLoadBalancer(final PartitionManager partitionManager, final EventHubAsyncClient eventHubAsyncClient, final String eventHubName, final String consumerGroupName, final String ownerId, final long inactiveTimeLimitInSeconds, final PartitionPumpManager partitionPumpManager) { this.partitionManager = partitionManager; this.eventHubAsyncClient = eventHubAsyncClient; this.eventHubName = eventHubName; this.consumerGroupName = consumerGroupName; this.ownerId = ownerId; this.inactiveTimeLimitInSeconds = inactiveTimeLimitInSeconds; this.partitionPumpManager = partitionPumpManager; } /** * This is the main method responsible for load balancing. This method is expected to be invoked by the {@link * EventProcessor} periodically. Every call to this method will result in this {@link EventProcessor} owning <b>at * most one</b> new partition. * <p> * The load is considered balanced when no active EventProcessor owns 2 partitions more than any other active * EventProcessor.Given that each invocation to this method results in ownership claim of at most one partition, * this algorithm converges gradually towards a steady state. * </p> * When a new partition is claimed, this method is also responsible for starting a partition pump that creates an * {@link EventHubAsyncConsumer} for processing events from that partition. */ public void loadBalance() { /* * Retrieve current partition ownership details from the datastore. */ final Mono<Map<String, PartitionOwnership>> partitionOwnershipMono = partitionManager .listOwnership(eventHubName, consumerGroupName) .timeout(Duration.ofSeconds(1)) .collectMap(PartitionOwnership::partitionId, Function.identity()); /* * Retrieve the list of partition ids from the Event Hub. */ final Mono<List<String>> partitionsMono = eventHubAsyncClient .getPartitionIds() .timeout(Duration.ofSeconds(1)) .collectList(); Mono.zip(partitionOwnershipMono, partitionsMono) .map(this::loadBalance) .doOnError(ex -> logger.warning("Load balancing for event processor failed - {}", ex.getMessage())) .subscribe(); } /* * This method works with the given partition ownership details and Event Hub partitions to evaluate whether the * current Event Processor should take on the responsibility of processing more partitions. */ /* * Check if partition ownership data is valid before proceeding with load balancing. */ private boolean isValid(final Map<String, PartitionOwnership> partitionOwnershipMap) { return partitionOwnershipMap.values() .stream() .noneMatch(partitionOwnership -> { return partitionOwnership.ownerId() == null || partitionOwnership.eventHubName() == null || !partitionOwnership.eventHubName().equals(this.eventHubName) || partitionOwnership.consumerGroupName() == null || !partitionOwnership.consumerGroupName().equals(this.consumerGroupName) || partitionOwnership.partitionId() == null || partitionOwnership.lastModifiedTime() == null || partitionOwnership.eTag() == null; }); } /* * Find the event processor that owns the maximum number of partitions and steal a random partition * from it. */ private String findPartitionToSteal(final Map<String, List<PartitionOwnership>> ownerPartitionMap) { List<PartitionOwnership> maxList = ownerPartitionMap.values() .stream() .max(Comparator.comparingInt(List::size)) .get(); return maxList.get(RANDOM.nextInt(maxList.size())).partitionId(); } /* * When the load is balanced, all active event processors own at least {@code minPartitionsPerEventProcessor} * and only {@code numberOfEventProcessorsWithAdditionalPartition} event processors will own 1 additional * partition. */ private boolean isLoadBalanced(final int minPartitionsPerEventProcessor, final int numberOfEventProcessorsWithAdditionalPartition, final Map<String, List<PartitionOwnership>> ownerPartitionMap) { if (ownerPartitionMap.values() .stream() .noneMatch(ownershipList -> { return ownershipList.size() < minPartitionsPerEventProcessor || ownershipList.size() > minPartitionsPerEventProcessor + 1; })) { long count = ownerPartitionMap.values() .stream() .filter(ownershipList -> ownershipList.size() == minPartitionsPerEventProcessor + 1) .count(); return count == numberOfEventProcessorsWithAdditionalPartition; } return false; } /* * This method is called after determining that the load is not balanced. This method will evaluate * if the current event processor should own more partitions. Specifically, this method returns true if the * current event processor owns less than the minimum number of partitions or if it owns the minimum number * and no other event processor owns lesser number of partitions than this event processor. */ private boolean shouldOwnMorePartitions(final int minPartitionsPerEventProcessor, final Map<String, List<PartitionOwnership>> ownerPartitionMap) { int numberOfPartitionsOwned = ownerPartitionMap.get(this.ownerId).size(); int leastPartitionsOwnedByAnyEventProcessor = ownerPartitionMap.values().stream().min(Comparator.comparingInt(List::size)).get().size(); if (numberOfPartitionsOwned < minPartitionsPerEventProcessor || numberOfPartitionsOwned == leastPartitionsOwnedByAnyEventProcessor) { return true; } return false; } /* * This method will create a new map of partition id and PartitionOwnership containing only those partitions * that are actively owned. All entries in the original map returned by PartitionManager that haven't been * modified for a duration of time greater than the allowed inactivity time limit are assumed to be owned by * dead event processors. These will not be included in the map returned by this method. */ private Map<String, PartitionOwnership> removeInactivePartitionOwnerships( final Map<String, PartitionOwnership> partitionOwnershipMap) { return partitionOwnershipMap .entrySet() .stream() .filter(entry -> { return System.currentTimeMillis() - entry.getValue().lastModifiedTime() < TimeUnit.SECONDS .toMillis(inactiveTimeLimitInSeconds); }).collect(Collectors.toMap(Entry::getKey, Entry::getValue)); } private void claimOwnership(final Map<String, PartitionOwnership> partitionOwnershipMap, final String partitionIdToClaim) { PartitionOwnership ownershipRequest = createPartitionOwnershipRequest(partitionOwnershipMap, partitionIdToClaim); partitionManager .claimOwnership(ownershipRequest) .timeout(Duration.ofSeconds(1)) .doOnError(ex -> logger .warning("Failed to claim ownership of partition {} - {}", ownershipRequest.partitionId(), ex.getMessage(), ex)) .subscribe(partitionPumpManager::startPartitionPump); } private PartitionOwnership createPartitionOwnershipRequest( final Map<String, PartitionOwnership> partitionOwnershipMap, final String partitionIdToClaim) { PartitionOwnership previousPartitionOwnership = partitionOwnershipMap.get(partitionIdToClaim); PartitionOwnership partitionOwnershipRequest = new PartitionOwnership() .ownerId(this.ownerId) .partitionId(partitionIdToClaim) .consumerGroupName(this.consumerGroupName) .eventHubName(this.eventHubName) .sequenceNumber(previousPartitionOwnership == null ? null : previousPartitionOwnership.sequenceNumber()) .offset(previousPartitionOwnership == null ? null : previousPartitionOwnership.offset()) .eTag(previousPartitionOwnership == null ? null : previousPartitionOwnership.eTag()) .ownerLevel(0L); return partitionOwnershipRequest; } }
class PartitionBasedLoadBalancer { private static final Random RANDOM = new Random(); private final ClientLogger logger = new ClientLogger(PartitionBasedLoadBalancer.class); private final String eventHubName; private final String consumerGroupName; private final PartitionManager partitionManager; private final EventHubAsyncClient eventHubAsyncClient; private final String ownerId; private final long inactiveTimeLimitInSeconds; private final PartitionPumpManager partitionPumpManager; /** * Creates an instance of PartitionBasedLoadBalancer for the given Event Hub name and consumer group. * * @param partitionManager The partition manager that this load balancer will use to read/update ownership details. * @param eventHubAsyncClient The asynchronous Event Hub client used to consume events. * @param eventHubName The Event Hub name the {@link EventProcessor} is associated with. * @param consumerGroupName The consumer group name the {@link EventProcessor} is associated with. * @param ownerId The identifier of the {@link EventProcessor} that owns this load balancer. * @param inactiveTimeLimitInSeconds The time in seconds to wait for an update on an ownership record before * assuming the owner of the partition is inactive. * @param partitionPumpManager The partition pump manager that keeps track of all EventHubConsumers and partitions * that this {@link EventProcessor} is processing. */ public PartitionBasedLoadBalancer(final PartitionManager partitionManager, final EventHubAsyncClient eventHubAsyncClient, final String eventHubName, final String consumerGroupName, final String ownerId, final long inactiveTimeLimitInSeconds, final PartitionPumpManager partitionPumpManager) { this.partitionManager = partitionManager; this.eventHubAsyncClient = eventHubAsyncClient; this.eventHubName = eventHubName; this.consumerGroupName = consumerGroupName; this.ownerId = ownerId; this.inactiveTimeLimitInSeconds = inactiveTimeLimitInSeconds; this.partitionPumpManager = partitionPumpManager; } /** * This is the main method responsible for load balancing. This method is expected to be invoked by the {@link * EventProcessor} periodically. Every call to this method will result in this {@link EventProcessor} owning <b>at * most one</b> new partition. * <p> * The load is considered balanced when no active EventProcessor owns 2 partitions more than any other active * EventProcessor. Given that each invocation to this method results in ownership claim of at most one partition, * this algorithm converges gradually towards a steady state. * </p> * When a new partition is claimed, this method is also responsible for starting a partition pump that creates an * {@link EventHubAsyncConsumer} for processing events from that partition. */ public void loadBalance() { /* * Retrieve current partition ownership details from the datastore. */ final Mono<Map<String, PartitionOwnership>> partitionOwnershipMono = partitionManager .listOwnership(eventHubName, consumerGroupName) .timeout(Duration.ofSeconds(1)) .collectMap(PartitionOwnership::partitionId, Function.identity()); /* * Retrieve the list of partition ids from the Event Hub. */ final Mono<List<String>> partitionsMono = eventHubAsyncClient .getPartitionIds() .timeout(Duration.ofSeconds(1)) .collectList(); Mono.zip(partitionOwnershipMono, partitionsMono) .flatMap(this::loadBalance) .doOnError(ex -> logger.warning("Load balancing for event processor failed - {}", ex.getMessage())) .subscribe(); } /* * This method works with the given partition ownership details and Event Hub partitions to evaluate whether the * current Event Processor should take on the responsibility of processing more partitions. */ /* * Check if partition ownership data is valid before proceeding with load balancing. */ private boolean isValid(final Map<String, PartitionOwnership> partitionOwnershipMap) { return partitionOwnershipMap.values() .stream() .noneMatch(partitionOwnership -> { return partitionOwnership.eventHubName() == null || !partitionOwnership.eventHubName().equals(this.eventHubName) || partitionOwnership.consumerGroupName() == null || !partitionOwnership.consumerGroupName().equals(this.consumerGroupName) || partitionOwnership.partitionId() == null || partitionOwnership.lastModifiedTime() == null || partitionOwnership.eTag() == null; }); } /* * Find the event processor that owns the maximum number of partitions and steal a random partition * from it. */ private String findPartitionToSteal(final Map<String, List<PartitionOwnership>> ownerPartitionMap) { Map.Entry<String, List<PartitionOwnership>> ownerWithMaxPartitions = ownerPartitionMap.entrySet() .stream() .max(Comparator.comparingInt(entry -> entry.getValue().size())) .get(); int numberOfPartitions = ownerWithMaxPartitions.getValue().size(); logger.info("Owner id {} owns {} partitions, stealing a partition from it", ownerWithMaxPartitions.getKey(), numberOfPartitions); return ownerWithMaxPartitions.getValue().get(RANDOM.nextInt(numberOfPartitions)).partitionId(); } /* * When the load is balanced, all active event processors own at least {@code minPartitionsPerEventProcessor} * and only {@code numberOfEventProcessorsWithAdditionalPartition} event processors will own 1 additional * partition. */ private boolean isLoadBalanced(final int minPartitionsPerEventProcessor, final int numberOfEventProcessorsWithAdditionalPartition, final Map<String, List<PartitionOwnership>> ownerPartitionMap) { int count = 0; for (List<PartitionOwnership> partitionOwnership : ownerPartitionMap.values()) { int numberOfPartitions = partitionOwnership.size(); if (numberOfPartitions < minPartitionsPerEventProcessor || numberOfPartitions > minPartitionsPerEventProcessor + 1) { return false; } if (numberOfPartitions == minPartitionsPerEventProcessor + 1) { count++; } } return count == numberOfEventProcessorsWithAdditionalPartition; } /* * This method is called after determining that the load is not balanced. This method will evaluate * if the current event processor should own more partitions. Specifically, this method returns true if the * current event processor owns less than the minimum number of partitions or if it owns the minimum number * and no other event processor owns lesser number of partitions than this event processor. */ private boolean shouldOwnMorePartitions(final int minPartitionsPerEventProcessor, final Map<String, List<PartitionOwnership>> ownerPartitionMap) { int numberOfPartitionsOwned = ownerPartitionMap.get(this.ownerId).size(); int leastPartitionsOwnedByAnyEventProcessor = ownerPartitionMap.values().stream().min(Comparator.comparingInt(List::size)).get().size(); return numberOfPartitionsOwned < minPartitionsPerEventProcessor || numberOfPartitionsOwned == leastPartitionsOwnedByAnyEventProcessor; } /* * This method will create a new map of partition id and PartitionOwnership containing only those partitions * that are actively owned. All entries in the original map returned by PartitionManager that haven't been * modified for a duration of time greater than the allowed inactivity time limit are assumed to be owned by * dead event processors. These will not be included in the map returned by this method. */ private Map<String, PartitionOwnership> removeInactivePartitionOwnerships( final Map<String, PartitionOwnership> partitionOwnershipMap) { return partitionOwnershipMap .entrySet() .stream() .filter(entry -> { return (System.currentTimeMillis() - entry.getValue().lastModifiedTime() < TimeUnit.SECONDS .toMillis(inactiveTimeLimitInSeconds)) && !ImplUtils.isNullOrEmpty(entry.getValue().ownerId()); }).collect(Collectors.toMap(Entry::getKey, Entry::getValue)); } private void claimOwnership(final Map<String, PartitionOwnership> partitionOwnershipMap, final String partitionIdToClaim) { logger.info("Attempting to claim ownership of partition {}", partitionIdToClaim); PartitionOwnership ownershipRequest = createPartitionOwnershipRequest(partitionOwnershipMap, partitionIdToClaim); partitionManager .claimOwnership(ownershipRequest) .timeout(Duration.ofSeconds(1)) .doOnNext(partitionOwnership -> logger.info("Successfully claimed ownership of partition {}", partitionOwnership.partitionId())) .doOnError(ex -> logger .warning("Failed to claim ownership of partition {} - {}", ownershipRequest.partitionId(), ex.getMessage(), ex)) .subscribe(partitionPumpManager::startPartitionPump); } private PartitionOwnership createPartitionOwnershipRequest( final Map<String, PartitionOwnership> partitionOwnershipMap, final String partitionIdToClaim) { PartitionOwnership previousPartitionOwnership = partitionOwnershipMap.get(partitionIdToClaim); PartitionOwnership partitionOwnershipRequest = new PartitionOwnership() .ownerId(this.ownerId) .partitionId(partitionIdToClaim) .consumerGroupName(this.consumerGroupName) .eventHubName(this.eventHubName) .sequenceNumber(previousPartitionOwnership == null ? null : previousPartitionOwnership.sequenceNumber()) .offset(previousPartitionOwnership == null ? null : previousPartitionOwnership.offset()) .eTag(previousPartitionOwnership == null ? null : previousPartitionOwnership.eTag()) .ownerLevel(0L); return partitionOwnershipRequest; } }
Why is this returning `Mono<Void>`? The work itself is synchronous until we return Mono.empty() at the very end. One way to make this more async is to return a `Mono.fromRunnable(() -> { // work in here. });` So the work is run asynchronously and completes when it ends.
private Mono<Void> loadBalance(final Tuple2<Map<String, PartitionOwnership>, List<String>> tuple) { Map<String, PartitionOwnership> partitionOwnershipMap = tuple.getT1(); List<String> partitionIds = tuple.getT2(); if (ImplUtils.isNullOrEmpty(partitionIds)) { return Mono.error(new IllegalStateException("There are no partitions in Event Hub " + this.eventHubName)); } if (!isValid(partitionOwnershipMap)) { return Mono.error(new IllegalStateException("Invalid partitionOwnership data from PartitionManager")); } /* * Remove all partitions' ownership that have not be modified for a configuration period of time. This means * that the previous EventProcessor that owned the partition is probably down and the partition is now eligible * to be claimed by other EventProcessors. */ Map<String, PartitionOwnership> activePartitionOwnershipMap = removeInactivePartitionOwnerships( partitionOwnershipMap); int numberOfPartitions = partitionIds.size(); if (ImplUtils.isNullOrEmpty(activePartitionOwnershipMap)) { /* * If the active partition ownership map is empty, this is the first time an event processor is * running or all Event Processors are down for this Event Hub, consumer group combination. All * partitions in this Event Hub are available to claim. Choose a random partition to claim ownership. */ claimOwnership(partitionOwnershipMap, partitionIds.get(RANDOM.nextInt(numberOfPartitions))); return Mono.empty(); } /* * Create a map of owner id and a list of partitions it owns */ Map<String, List<PartitionOwnership>> ownerPartitionMap = activePartitionOwnershipMap.values() .stream() .collect( Collectors.groupingBy(PartitionOwnership::ownerId, mapping(Function.identity(), toList()))); ownerPartitionMap.putIfAbsent(this.ownerId, new ArrayList<>()); /* * Find the minimum number of partitions every event processor should own when the load is * evenly distributed. */ int numberOfActiveEventProcessors = ownerPartitionMap.size(); int minPartitionsPerEventProcessor = numberOfPartitions / numberOfActiveEventProcessors; /* * If the number of partitions in Event Hub is not evenly divisible by number of active event processors, * a few Event Processors may own 1 additional partition than the minimum when the load is balanced. Calculate * the number of event processors that can own additional partition. */ int numberOfEventProcessorsWithAdditionalPartition = numberOfPartitions % numberOfActiveEventProcessors; if (isLoadBalanced(minPartitionsPerEventProcessor, numberOfEventProcessorsWithAdditionalPartition, ownerPartitionMap)) { return Mono.empty(); } if (!shouldOwnMorePartitions(minPartitionsPerEventProcessor, ownerPartitionMap)) { return Mono.empty(); } /* * If some partitions are unclaimed, this could be because an event processor is down and * it's partitions are now available for others to own or because event processors are just * starting up and gradually claiming partitions to own or new partitions were added to Event Hub. * Find any partition that is not actively owned and claim it. * * OR * * Find a partition to steal from another event processor. Pick the event processor that has owns the highest * number of partitions. */ String partitionToClaim = partitionIds.parallelStream() .filter(partitionId -> !activePartitionOwnershipMap.containsKey(partitionId)) .findAny() .orElseGet(() -> findPartitionToSteal(ownerPartitionMap)); claimOwnership(partitionOwnershipMap, partitionToClaim); return Mono.empty(); }
Map<String, PartitionOwnership> partitionOwnershipMap = tuple.getT1();
private Mono<Void> loadBalance(final Tuple2<Map<String, PartitionOwnership>, List<String>> tuple) { return Mono.fromRunnable(() -> { logger.info("Starting load balancer"); Map<String, PartitionOwnership> partitionOwnershipMap = tuple.getT1(); List<String> partitionIds = tuple.getT2(); if (ImplUtils.isNullOrEmpty(partitionIds)) { throw Exceptions .propagate(new IllegalStateException("There are no partitions in Event Hub " + this.eventHubName)); } int numberOfPartitions = partitionIds.size(); logger.info("Partition manager returned {} ownership records", partitionOwnershipMap.size()); logger.info("EventHubAsyncClient returned {} partitions", numberOfPartitions); if (!isValid(partitionOwnershipMap)) { throw Exceptions .propagate(new IllegalStateException("Invalid partitionOwnership data from PartitionManager")); } /* * Remove all partitions' ownership that have not been modified for a configuration period of time. This * means * that the previous EventProcessor that owned the partition is probably down and the partition is now eligible * to be claimed by other EventProcessors. */ Map<String, PartitionOwnership> activePartitionOwnershipMap = removeInactivePartitionOwnerships( partitionOwnershipMap); logger.info("Number of active ownership records {}", activePartitionOwnershipMap.size()); if (ImplUtils.isNullOrEmpty(activePartitionOwnershipMap)) { /* * If the active partition ownership map is empty, this is the first time an event processor is * running or all Event Processors are down for this Event Hub, consumer group combination. All * partitions in this Event Hub are available to claim. Choose a random partition to claim ownership. */ claimOwnership(partitionOwnershipMap, partitionIds.get(RANDOM.nextInt(numberOfPartitions))); return; } /* * Create a map of owner id and a list of partitions it owns */ Map<String, List<PartitionOwnership>> ownerPartitionMap = activePartitionOwnershipMap.values() .stream() .collect( Collectors.groupingBy(PartitionOwnership::ownerId, mapping(Function.identity(), toList()))); ownerPartitionMap.putIfAbsent(this.ownerId, new ArrayList<>()); /* * Find the minimum number of partitions every event processor should own when the load is * evenly distributed. */ int numberOfActiveEventProcessors = ownerPartitionMap.size(); logger.info("Number of active event processors {}", ownerPartitionMap.size()); int minPartitionsPerEventProcessor = numberOfPartitions / numberOfActiveEventProcessors; /* * If the number of partitions in Event Hub is not evenly divisible by number of active event processors, * a few Event Processors may own 1 additional partition than the minimum when the load is balanced. Calculate * the number of event processors that can own additional partition. */ int numberOfEventProcessorsWithAdditionalPartition = numberOfPartitions % numberOfActiveEventProcessors; logger.info("Expected min partitions per event processor = {}, expected number of event " + "processors with additional partition = {}", minPartitionsPerEventProcessor, numberOfEventProcessorsWithAdditionalPartition); if (isLoadBalanced(minPartitionsPerEventProcessor, numberOfEventProcessorsWithAdditionalPartition, ownerPartitionMap)) { logger.info("Load is balanced"); return; } if (!shouldOwnMorePartitions(minPartitionsPerEventProcessor, ownerPartitionMap)) { logger.info("This event processor owns {} partitions and shouldn't own more", ownerPartitionMap.get(ownerId).size()); return; } logger.info( "Load is unbalanced and this event processor should own more partitions"); /* * If some partitions are unclaimed, this could be because an event processor is down and * it's partitions are now available for others to own or because event processors are just * starting up and gradually claiming partitions to own or new partitions were added to Event Hub. * Find any partition that is not actively owned and claim it. * * OR * * Find a partition to steal from another event processor. Pick the event processor that has owns the highest * number of partitions. */ String partitionToClaim = partitionIds.parallelStream() .filter(partitionId -> !activePartitionOwnershipMap.containsKey(partitionId)) .findAny() .orElseGet(() -> { logger.info("No unclaimed partitions, stealing from another event processor"); return findPartitionToSteal(ownerPartitionMap); }); claimOwnership(partitionOwnershipMap, partitionToClaim); }); }
class PartitionBasedLoadBalancer { private static final Random RANDOM = new Random(); private final ClientLogger logger = new ClientLogger(PartitionBasedLoadBalancer.class); private final String eventHubName; private final String consumerGroupName; private final PartitionManager partitionManager; private final EventHubAsyncClient eventHubAsyncClient; private final String ownerId; private final long inactiveTimeLimitInSeconds; private final PartitionPumpManager partitionPumpManager; /** * Creates an instance of PartitionBasedLoadBalancer for the given Event Hub name and consumer group. * * @param partitionManager The partition manager that this load balancer will use to read/update ownership details. * @param eventHubAsyncClient The asynchronous Event Hub client used to consume events. * @param eventHubName The Event Hub name the {@link EventProcessor} is associated with. * @param consumerGroupName The consumer group name the {@link EventProcessor} is associated with. * @param ownerId The owner identifier for the {@link EventProcessor} this load balancer is associated with. * @param inactiveTimeLimitInSeconds The time in seconds to wait for an update on an ownership record before * assuming the owner of the partition is inactive. * @param partitionPumpManager The partition pump manager that keeps track of all EventHubConsumers and partitions * that this {@link EventProcessor} is processing. */ public PartitionBasedLoadBalancer(final PartitionManager partitionManager, final EventHubAsyncClient eventHubAsyncClient, final String eventHubName, final String consumerGroupName, final String ownerId, final long inactiveTimeLimitInSeconds, final PartitionPumpManager partitionPumpManager) { this.partitionManager = partitionManager; this.eventHubAsyncClient = eventHubAsyncClient; this.eventHubName = eventHubName; this.consumerGroupName = consumerGroupName; this.ownerId = ownerId; this.inactiveTimeLimitInSeconds = inactiveTimeLimitInSeconds; this.partitionPumpManager = partitionPumpManager; } /** * This is the main method responsible for load balancing. This method is expected to be invoked by the {@link * EventProcessor} periodically. Every call to this method will result in this {@link EventProcessor} owning <b>at * most one</b> new partition. * <p> * The load is considered balanced when no active EventProcessor owns 2 partitions more than any other active * EventProcessor.Given that each invocation to this method results in ownership claim of at most one partition, * this algorithm converges gradually towards a steady state. * </p> * When a new partition is claimed, this method is also responsible for starting a partition pump that creates an * {@link EventHubAsyncConsumer} for processing events from that partition. */ public void loadBalance() { /* * Retrieve current partition ownership details from the datastore. */ final Mono<Map<String, PartitionOwnership>> partitionOwnershipMono = partitionManager .listOwnership(eventHubName, consumerGroupName) .timeout(Duration.ofSeconds(1)) .collectMap(PartitionOwnership::partitionId, Function.identity()); /* * Retrieve the list of partition ids from the Event Hub. */ final Mono<List<String>> partitionsMono = eventHubAsyncClient .getPartitionIds() .timeout(Duration.ofSeconds(1)) .collectList(); Mono.zip(partitionOwnershipMono, partitionsMono) .map(this::loadBalance) .doOnError(ex -> logger.warning("Load balancing for event processor failed - {}", ex.getMessage())) .subscribe(); } /* * This method works with the given partition ownership details and Event Hub partitions to evaluate whether the * current Event Processor should take on the responsibility of processing more partitions. */ /* * Check if partition ownership data is valid before proceeding with load balancing. */ private boolean isValid(final Map<String, PartitionOwnership> partitionOwnershipMap) { return partitionOwnershipMap.values() .stream() .noneMatch(partitionOwnership -> { return partitionOwnership.ownerId() == null || partitionOwnership.eventHubName() == null || !partitionOwnership.eventHubName().equals(this.eventHubName) || partitionOwnership.consumerGroupName() == null || !partitionOwnership.consumerGroupName().equals(this.consumerGroupName) || partitionOwnership.partitionId() == null || partitionOwnership.lastModifiedTime() == null || partitionOwnership.eTag() == null; }); } /* * Find the event processor that owns the maximum number of partitions and steal a random partition * from it. */ private String findPartitionToSteal(final Map<String, List<PartitionOwnership>> ownerPartitionMap) { List<PartitionOwnership> maxList = ownerPartitionMap.values() .stream() .max(Comparator.comparingInt(List::size)) .get(); return maxList.get(RANDOM.nextInt(maxList.size())).partitionId(); } /* * When the load is balanced, all active event processors own at least {@code minPartitionsPerEventProcessor} * and only {@code numberOfEventProcessorsWithAdditionalPartition} event processors will own 1 additional * partition. */ private boolean isLoadBalanced(final int minPartitionsPerEventProcessor, final int numberOfEventProcessorsWithAdditionalPartition, final Map<String, List<PartitionOwnership>> ownerPartitionMap) { int count = 0; for (List<PartitionOwnership> partitionOwnership : ownerPartitionMap.values()) { int numberOfPartitions = partitionOwnership.size(); if (numberOfPartitions < minPartitionsPerEventProcessor || numberOfPartitions > minPartitionsPerEventProcessor + 1) { return false; } if (numberOfPartitions == minPartitionsPerEventProcessor + 1) { count++; } } return count == numberOfEventProcessorsWithAdditionalPartition; } /* * This method is called after determining that the load is not balanced. This method will evaluate * if the current event processor should own more partitions. Specifically, this method returns true if the * current event processor owns less than the minimum number of partitions or if it owns the minimum number * and no other event processor owns lesser number of partitions than this event processor. */ private boolean shouldOwnMorePartitions(final int minPartitionsPerEventProcessor, final Map<String, List<PartitionOwnership>> ownerPartitionMap) { int numberOfPartitionsOwned = ownerPartitionMap.get(this.ownerId).size(); int leastPartitionsOwnedByAnyEventProcessor = ownerPartitionMap.values().stream().min(Comparator.comparingInt(List::size)).get().size(); return numberOfPartitionsOwned < minPartitionsPerEventProcessor || numberOfPartitionsOwned == leastPartitionsOwnedByAnyEventProcessor; } /* * This method will create a new map of partition id and PartitionOwnership containing only those partitions * that are actively owned. All entries in the original map returned by PartitionManager that haven't been * modified for a duration of time greater than the allowed inactivity time limit are assumed to be owned by * dead event processors. These will not be included in the map returned by this method. */ private Map<String, PartitionOwnership> removeInactivePartitionOwnerships( final Map<String, PartitionOwnership> partitionOwnershipMap) { return partitionOwnershipMap .entrySet() .stream() .filter(entry -> { return System.currentTimeMillis() - entry.getValue().lastModifiedTime() < TimeUnit.SECONDS .toMillis(inactiveTimeLimitInSeconds); }).collect(Collectors.toMap(Entry::getKey, Entry::getValue)); } private void claimOwnership(final Map<String, PartitionOwnership> partitionOwnershipMap, final String partitionIdToClaim) { PartitionOwnership ownershipRequest = createPartitionOwnershipRequest(partitionOwnershipMap, partitionIdToClaim); partitionManager .claimOwnership(ownershipRequest) .timeout(Duration.ofSeconds(1)) .doOnError(ex -> logger .warning("Failed to claim ownership of partition {} - {}", ownershipRequest.partitionId(), ex.getMessage(), ex)) .subscribe(partitionPumpManager::startPartitionPump); } private PartitionOwnership createPartitionOwnershipRequest( final Map<String, PartitionOwnership> partitionOwnershipMap, final String partitionIdToClaim) { PartitionOwnership previousPartitionOwnership = partitionOwnershipMap.get(partitionIdToClaim); PartitionOwnership partitionOwnershipRequest = new PartitionOwnership() .ownerId(this.ownerId) .partitionId(partitionIdToClaim) .consumerGroupName(this.consumerGroupName) .eventHubName(this.eventHubName) .sequenceNumber(previousPartitionOwnership == null ? null : previousPartitionOwnership.sequenceNumber()) .offset(previousPartitionOwnership == null ? null : previousPartitionOwnership.offset()) .eTag(previousPartitionOwnership == null ? null : previousPartitionOwnership.eTag()) .ownerLevel(0L); return partitionOwnershipRequest; } }
class PartitionBasedLoadBalancer { private static final Random RANDOM = new Random(); private final ClientLogger logger = new ClientLogger(PartitionBasedLoadBalancer.class); private final String eventHubName; private final String consumerGroupName; private final PartitionManager partitionManager; private final EventHubAsyncClient eventHubAsyncClient; private final String ownerId; private final long inactiveTimeLimitInSeconds; private final PartitionPumpManager partitionPumpManager; /** * Creates an instance of PartitionBasedLoadBalancer for the given Event Hub name and consumer group. * * @param partitionManager The partition manager that this load balancer will use to read/update ownership details. * @param eventHubAsyncClient The asynchronous Event Hub client used to consume events. * @param eventHubName The Event Hub name the {@link EventProcessor} is associated with. * @param consumerGroupName The consumer group name the {@link EventProcessor} is associated with. * @param ownerId The identifier of the {@link EventProcessor} that owns this load balancer. * @param inactiveTimeLimitInSeconds The time in seconds to wait for an update on an ownership record before * assuming the owner of the partition is inactive. * @param partitionPumpManager The partition pump manager that keeps track of all EventHubConsumers and partitions * that this {@link EventProcessor} is processing. */ public PartitionBasedLoadBalancer(final PartitionManager partitionManager, final EventHubAsyncClient eventHubAsyncClient, final String eventHubName, final String consumerGroupName, final String ownerId, final long inactiveTimeLimitInSeconds, final PartitionPumpManager partitionPumpManager) { this.partitionManager = partitionManager; this.eventHubAsyncClient = eventHubAsyncClient; this.eventHubName = eventHubName; this.consumerGroupName = consumerGroupName; this.ownerId = ownerId; this.inactiveTimeLimitInSeconds = inactiveTimeLimitInSeconds; this.partitionPumpManager = partitionPumpManager; } /** * This is the main method responsible for load balancing. This method is expected to be invoked by the {@link * EventProcessor} periodically. Every call to this method will result in this {@link EventProcessor} owning <b>at * most one</b> new partition. * <p> * The load is considered balanced when no active EventProcessor owns 2 partitions more than any other active * EventProcessor. Given that each invocation to this method results in ownership claim of at most one partition, * this algorithm converges gradually towards a steady state. * </p> * When a new partition is claimed, this method is also responsible for starting a partition pump that creates an * {@link EventHubAsyncConsumer} for processing events from that partition. */ public void loadBalance() { /* * Retrieve current partition ownership details from the datastore. */ final Mono<Map<String, PartitionOwnership>> partitionOwnershipMono = partitionManager .listOwnership(eventHubName, consumerGroupName) .timeout(Duration.ofSeconds(1)) .collectMap(PartitionOwnership::partitionId, Function.identity()); /* * Retrieve the list of partition ids from the Event Hub. */ final Mono<List<String>> partitionsMono = eventHubAsyncClient .getPartitionIds() .timeout(Duration.ofSeconds(1)) .collectList(); Mono.zip(partitionOwnershipMono, partitionsMono) .flatMap(this::loadBalance) .doOnError(ex -> logger.warning("Load balancing for event processor failed - {}", ex.getMessage())) .subscribe(); } /* * This method works with the given partition ownership details and Event Hub partitions to evaluate whether the * current Event Processor should take on the responsibility of processing more partitions. */ /* * Check if partition ownership data is valid before proceeding with load balancing. */ private boolean isValid(final Map<String, PartitionOwnership> partitionOwnershipMap) { return partitionOwnershipMap.values() .stream() .noneMatch(partitionOwnership -> { return partitionOwnership.eventHubName() == null || !partitionOwnership.eventHubName().equals(this.eventHubName) || partitionOwnership.consumerGroupName() == null || !partitionOwnership.consumerGroupName().equals(this.consumerGroupName) || partitionOwnership.partitionId() == null || partitionOwnership.lastModifiedTime() == null || partitionOwnership.eTag() == null; }); } /* * Find the event processor that owns the maximum number of partitions and steal a random partition * from it. */ private String findPartitionToSteal(final Map<String, List<PartitionOwnership>> ownerPartitionMap) { Map.Entry<String, List<PartitionOwnership>> ownerWithMaxPartitions = ownerPartitionMap.entrySet() .stream() .max(Comparator.comparingInt(entry -> entry.getValue().size())) .get(); int numberOfPartitions = ownerWithMaxPartitions.getValue().size(); logger.info("Owner id {} owns {} partitions, stealing a partition from it", ownerWithMaxPartitions.getKey(), numberOfPartitions); return ownerWithMaxPartitions.getValue().get(RANDOM.nextInt(numberOfPartitions)).partitionId(); } /* * When the load is balanced, all active event processors own at least {@code minPartitionsPerEventProcessor} * and only {@code numberOfEventProcessorsWithAdditionalPartition} event processors will own 1 additional * partition. */ private boolean isLoadBalanced(final int minPartitionsPerEventProcessor, final int numberOfEventProcessorsWithAdditionalPartition, final Map<String, List<PartitionOwnership>> ownerPartitionMap) { int count = 0; for (List<PartitionOwnership> partitionOwnership : ownerPartitionMap.values()) { int numberOfPartitions = partitionOwnership.size(); if (numberOfPartitions < minPartitionsPerEventProcessor || numberOfPartitions > minPartitionsPerEventProcessor + 1) { return false; } if (numberOfPartitions == minPartitionsPerEventProcessor + 1) { count++; } } return count == numberOfEventProcessorsWithAdditionalPartition; } /* * This method is called after determining that the load is not balanced. This method will evaluate * if the current event processor should own more partitions. Specifically, this method returns true if the * current event processor owns less than the minimum number of partitions or if it owns the minimum number * and no other event processor owns lesser number of partitions than this event processor. */ private boolean shouldOwnMorePartitions(final int minPartitionsPerEventProcessor, final Map<String, List<PartitionOwnership>> ownerPartitionMap) { int numberOfPartitionsOwned = ownerPartitionMap.get(this.ownerId).size(); int leastPartitionsOwnedByAnyEventProcessor = ownerPartitionMap.values().stream().min(Comparator.comparingInt(List::size)).get().size(); return numberOfPartitionsOwned < minPartitionsPerEventProcessor || numberOfPartitionsOwned == leastPartitionsOwnedByAnyEventProcessor; } /* * This method will create a new map of partition id and PartitionOwnership containing only those partitions * that are actively owned. All entries in the original map returned by PartitionManager that haven't been * modified for a duration of time greater than the allowed inactivity time limit are assumed to be owned by * dead event processors. These will not be included in the map returned by this method. */ private Map<String, PartitionOwnership> removeInactivePartitionOwnerships( final Map<String, PartitionOwnership> partitionOwnershipMap) { return partitionOwnershipMap .entrySet() .stream() .filter(entry -> { return (System.currentTimeMillis() - entry.getValue().lastModifiedTime() < TimeUnit.SECONDS .toMillis(inactiveTimeLimitInSeconds)) && !ImplUtils.isNullOrEmpty(entry.getValue().ownerId()); }).collect(Collectors.toMap(Entry::getKey, Entry::getValue)); } private void claimOwnership(final Map<String, PartitionOwnership> partitionOwnershipMap, final String partitionIdToClaim) { logger.info("Attempting to claim ownership of partition {}", partitionIdToClaim); PartitionOwnership ownershipRequest = createPartitionOwnershipRequest(partitionOwnershipMap, partitionIdToClaim); partitionManager .claimOwnership(ownershipRequest) .timeout(Duration.ofSeconds(1)) .doOnNext(partitionOwnership -> logger.info("Successfully claimed ownership of partition {}", partitionOwnership.partitionId())) .doOnError(ex -> logger .warning("Failed to claim ownership of partition {} - {}", ownershipRequest.partitionId(), ex.getMessage(), ex)) .subscribe(partitionPumpManager::startPartitionPump); } private PartitionOwnership createPartitionOwnershipRequest( final Map<String, PartitionOwnership> partitionOwnershipMap, final String partitionIdToClaim) { PartitionOwnership previousPartitionOwnership = partitionOwnershipMap.get(partitionIdToClaim); PartitionOwnership partitionOwnershipRequest = new PartitionOwnership() .ownerId(this.ownerId) .partitionId(partitionIdToClaim) .consumerGroupName(this.consumerGroupName) .eventHubName(this.eventHubName) .sequenceNumber(previousPartitionOwnership == null ? null : previousPartitionOwnership.sequenceNumber()) .offset(previousPartitionOwnership == null ? null : previousPartitionOwnership.offset()) .eTag(previousPartitionOwnership == null ? null : previousPartitionOwnership.eTag()) .ownerLevel(0L); return partitionOwnershipRequest; } }
Lets say we have an Event Hub with 3 partitions and we follow the below steps- * Create Event Processor instance `processor1` * Call `processor1.start()` * After some delay call `processor1.stop()` * Now `processor1` owns all the 3 partitions and the load is balanced * If we call `processor1.start()` again, `isLoadBalanced()` will return true and here if we returns `Mono.empty()`, it means that we will stuck in the loop forever(if I understood correctly)? I think in this case we should create a new pump and start receiving events again from all the partitions?
private Mono<Void> loadBalance(final Tuple2<Map<String, PartitionOwnership>, List<String>> tuple) { Map<String, PartitionOwnership> partitionOwnershipMap = tuple.getT1(); List<String> partitionIds = tuple.getT2(); if (ImplUtils.isNullOrEmpty(partitionIds)) { return Mono.error(new IllegalStateException("There are no partitions in Event Hub " + this.eventHubName)); } if (!isValid(partitionOwnershipMap)) { return Mono.error(new IllegalStateException("Invalid partitionOwnership data from PartitionManager")); } /* * Remove all partitions' ownership that have not be modified for a configuration period of time. This means * that the previous EventProcessor that owned the partition is probably down and the partition is now eligible * to be claimed by other EventProcessors. */ Map<String, PartitionOwnership> activePartitionOwnershipMap = removeInactivePartitionOwnerships( partitionOwnershipMap); int numberOfPartitions = partitionIds.size(); if (ImplUtils.isNullOrEmpty(activePartitionOwnershipMap)) { /* * If the active partition ownership map is empty, this is the first time an event processor is * running or all Event Processors are down for this Event Hub, consumer group combination. All * partitions in this Event Hub are available to claim. Choose a random partition to claim ownership. */ claimOwnership(partitionOwnershipMap, partitionIds.get(RANDOM.nextInt(numberOfPartitions))); return Mono.empty(); } /* * Create a map of owner id and a list of partitions it owns */ Map<String, List<PartitionOwnership>> ownerPartitionMap = activePartitionOwnershipMap.values() .stream() .collect( Collectors.groupingBy(PartitionOwnership::ownerId, mapping(Function.identity(), toList()))); ownerPartitionMap.putIfAbsent(this.ownerId, new ArrayList<>()); /* * Find the minimum number of partitions every event processor should own when the load is * evenly distributed. */ int numberOfActiveEventProcessors = ownerPartitionMap.size(); int minPartitionsPerEventProcessor = numberOfPartitions / numberOfActiveEventProcessors; /* * If the number of partitions in Event Hub is not evenly divisible by number of active event processors, * a few Event Processors may own 1 additional partition than the minimum when the load is balanced. Calculate * the number of event processors that can own additional partition. */ int numberOfEventProcessorsWithAdditionalPartition = numberOfPartitions % numberOfActiveEventProcessors; if (isLoadBalanced(minPartitionsPerEventProcessor, numberOfEventProcessorsWithAdditionalPartition, ownerPartitionMap)) { return Mono.empty(); } if (!shouldOwnMorePartitions(minPartitionsPerEventProcessor, ownerPartitionMap)) { return Mono.empty(); } /* * If some partitions are unclaimed, this could be because an event processor is down and * it's partitions are now available for others to own or because event processors are just * starting up and gradually claiming partitions to own or new partitions were added to Event Hub. * Find any partition that is not actively owned and claim it. * * OR * * Find a partition to steal from another event processor. Pick the event processor that has owns the highest * number of partitions. */ String partitionToClaim = partitionIds.parallelStream() .filter(partitionId -> !activePartitionOwnershipMap.containsKey(partitionId)) .findAny() .orElseGet(() -> findPartitionToSteal(ownerPartitionMap)); claimOwnership(partitionOwnershipMap, partitionToClaim); return Mono.empty(); }
return Mono.empty();
private Mono<Void> loadBalance(final Tuple2<Map<String, PartitionOwnership>, List<String>> tuple) { return Mono.fromRunnable(() -> { logger.info("Starting load balancer"); Map<String, PartitionOwnership> partitionOwnershipMap = tuple.getT1(); List<String> partitionIds = tuple.getT2(); if (ImplUtils.isNullOrEmpty(partitionIds)) { throw Exceptions .propagate(new IllegalStateException("There are no partitions in Event Hub " + this.eventHubName)); } int numberOfPartitions = partitionIds.size(); logger.info("Partition manager returned {} ownership records", partitionOwnershipMap.size()); logger.info("EventHubAsyncClient returned {} partitions", numberOfPartitions); if (!isValid(partitionOwnershipMap)) { throw Exceptions .propagate(new IllegalStateException("Invalid partitionOwnership data from PartitionManager")); } /* * Remove all partitions' ownership that have not been modified for a configuration period of time. This * means * that the previous EventProcessor that owned the partition is probably down and the partition is now eligible * to be claimed by other EventProcessors. */ Map<String, PartitionOwnership> activePartitionOwnershipMap = removeInactivePartitionOwnerships( partitionOwnershipMap); logger.info("Number of active ownership records {}", activePartitionOwnershipMap.size()); if (ImplUtils.isNullOrEmpty(activePartitionOwnershipMap)) { /* * If the active partition ownership map is empty, this is the first time an event processor is * running or all Event Processors are down for this Event Hub, consumer group combination. All * partitions in this Event Hub are available to claim. Choose a random partition to claim ownership. */ claimOwnership(partitionOwnershipMap, partitionIds.get(RANDOM.nextInt(numberOfPartitions))); return; } /* * Create a map of owner id and a list of partitions it owns */ Map<String, List<PartitionOwnership>> ownerPartitionMap = activePartitionOwnershipMap.values() .stream() .collect( Collectors.groupingBy(PartitionOwnership::ownerId, mapping(Function.identity(), toList()))); ownerPartitionMap.putIfAbsent(this.ownerId, new ArrayList<>()); /* * Find the minimum number of partitions every event processor should own when the load is * evenly distributed. */ int numberOfActiveEventProcessors = ownerPartitionMap.size(); logger.info("Number of active event processors {}", ownerPartitionMap.size()); int minPartitionsPerEventProcessor = numberOfPartitions / numberOfActiveEventProcessors; /* * If the number of partitions in Event Hub is not evenly divisible by number of active event processors, * a few Event Processors may own 1 additional partition than the minimum when the load is balanced. Calculate * the number of event processors that can own additional partition. */ int numberOfEventProcessorsWithAdditionalPartition = numberOfPartitions % numberOfActiveEventProcessors; logger.info("Expected min partitions per event processor = {}, expected number of event " + "processors with additional partition = {}", minPartitionsPerEventProcessor, numberOfEventProcessorsWithAdditionalPartition); if (isLoadBalanced(minPartitionsPerEventProcessor, numberOfEventProcessorsWithAdditionalPartition, ownerPartitionMap)) { logger.info("Load is balanced"); return; } if (!shouldOwnMorePartitions(minPartitionsPerEventProcessor, ownerPartitionMap)) { logger.info("This event processor owns {} partitions and shouldn't own more", ownerPartitionMap.get(ownerId).size()); return; } logger.info( "Load is unbalanced and this event processor should own more partitions"); /* * If some partitions are unclaimed, this could be because an event processor is down and * it's partitions are now available for others to own or because event processors are just * starting up and gradually claiming partitions to own or new partitions were added to Event Hub. * Find any partition that is not actively owned and claim it. * * OR * * Find a partition to steal from another event processor. Pick the event processor that has owns the highest * number of partitions. */ String partitionToClaim = partitionIds.parallelStream() .filter(partitionId -> !activePartitionOwnershipMap.containsKey(partitionId)) .findAny() .orElseGet(() -> { logger.info("No unclaimed partitions, stealing from another event processor"); return findPartitionToSteal(ownerPartitionMap); }); claimOwnership(partitionOwnershipMap, partitionToClaim); }); }
class PartitionBasedLoadBalancer { private static final Random RANDOM = new Random(); private final ClientLogger logger = new ClientLogger(PartitionBasedLoadBalancer.class); private final String eventHubName; private final String consumerGroupName; private final PartitionManager partitionManager; private final EventHubAsyncClient eventHubAsyncClient; private final String ownerId; private final long inactiveTimeLimitInSeconds; private final PartitionPumpManager partitionPumpManager; /** * Creates an instance of PartitionBasedLoadBalancer for the given Event Hub name and consumer group. * * @param partitionManager The partition manager that this load balancer will use to read/update ownership details. * @param eventHubAsyncClient The asynchronous Event Hub client used to consume events. * @param eventHubName The Event Hub name the {@link EventProcessor} is associated with. * @param consumerGroupName The consumer group name the {@link EventProcessor} is associated with. * @param ownerId The owner identifier for the {@link EventProcessor} this load balancer is associated with. * @param inactiveTimeLimitInSeconds The time in seconds to wait for an update on an ownership record before * assuming the owner of the partition is inactive. * @param partitionPumpManager The partition pump manager that keeps track of all EventHubConsumers and partitions * that this {@link EventProcessor} is processing. */ public PartitionBasedLoadBalancer(final PartitionManager partitionManager, final EventHubAsyncClient eventHubAsyncClient, final String eventHubName, final String consumerGroupName, final String ownerId, final long inactiveTimeLimitInSeconds, final PartitionPumpManager partitionPumpManager) { this.partitionManager = partitionManager; this.eventHubAsyncClient = eventHubAsyncClient; this.eventHubName = eventHubName; this.consumerGroupName = consumerGroupName; this.ownerId = ownerId; this.inactiveTimeLimitInSeconds = inactiveTimeLimitInSeconds; this.partitionPumpManager = partitionPumpManager; } /** * This is the main method responsible for load balancing. This method is expected to be invoked by the {@link * EventProcessor} periodically. Every call to this method will result in this {@link EventProcessor} owning <b>at * most one</b> new partition. * <p> * The load is considered balanced when no active EventProcessor owns 2 partitions more than any other active * EventProcessor.Given that each invocation to this method results in ownership claim of at most one partition, * this algorithm converges gradually towards a steady state. * </p> * When a new partition is claimed, this method is also responsible for starting a partition pump that creates an * {@link EventHubAsyncConsumer} for processing events from that partition. */ public void loadBalance() { /* * Retrieve current partition ownership details from the datastore. */ final Mono<Map<String, PartitionOwnership>> partitionOwnershipMono = partitionManager .listOwnership(eventHubName, consumerGroupName) .timeout(Duration.ofSeconds(1)) .collectMap(PartitionOwnership::partitionId, Function.identity()); /* * Retrieve the list of partition ids from the Event Hub. */ final Mono<List<String>> partitionsMono = eventHubAsyncClient .getPartitionIds() .timeout(Duration.ofSeconds(1)) .collectList(); Mono.zip(partitionOwnershipMono, partitionsMono) .map(this::loadBalance) .doOnError(ex -> logger.warning("Load balancing for event processor failed - {}", ex.getMessage())) .subscribe(); } /* * This method works with the given partition ownership details and Event Hub partitions to evaluate whether the * current Event Processor should take on the responsibility of processing more partitions. */ /* * Check if partition ownership data is valid before proceeding with load balancing. */ private boolean isValid(final Map<String, PartitionOwnership> partitionOwnershipMap) { return partitionOwnershipMap.values() .stream() .noneMatch(partitionOwnership -> { return partitionOwnership.ownerId() == null || partitionOwnership.eventHubName() == null || !partitionOwnership.eventHubName().equals(this.eventHubName) || partitionOwnership.consumerGroupName() == null || !partitionOwnership.consumerGroupName().equals(this.consumerGroupName) || partitionOwnership.partitionId() == null || partitionOwnership.lastModifiedTime() == null || partitionOwnership.eTag() == null; }); } /* * Find the event processor that owns the maximum number of partitions and steal a random partition * from it. */ private String findPartitionToSteal(final Map<String, List<PartitionOwnership>> ownerPartitionMap) { List<PartitionOwnership> maxList = ownerPartitionMap.values() .stream() .max(Comparator.comparingInt(List::size)) .get(); return maxList.get(RANDOM.nextInt(maxList.size())).partitionId(); } /* * When the load is balanced, all active event processors own at least {@code minPartitionsPerEventProcessor} * and only {@code numberOfEventProcessorsWithAdditionalPartition} event processors will own 1 additional * partition. */ private boolean isLoadBalanced(final int minPartitionsPerEventProcessor, final int numberOfEventProcessorsWithAdditionalPartition, final Map<String, List<PartitionOwnership>> ownerPartitionMap) { int count = 0; for (List<PartitionOwnership> partitionOwnership : ownerPartitionMap.values()) { int numberOfPartitions = partitionOwnership.size(); if (numberOfPartitions < minPartitionsPerEventProcessor || numberOfPartitions > minPartitionsPerEventProcessor + 1) { return false; } if (numberOfPartitions == minPartitionsPerEventProcessor + 1) { count++; } } return count == numberOfEventProcessorsWithAdditionalPartition; } /* * This method is called after determining that the load is not balanced. This method will evaluate * if the current event processor should own more partitions. Specifically, this method returns true if the * current event processor owns less than the minimum number of partitions or if it owns the minimum number * and no other event processor owns lesser number of partitions than this event processor. */ private boolean shouldOwnMorePartitions(final int minPartitionsPerEventProcessor, final Map<String, List<PartitionOwnership>> ownerPartitionMap) { int numberOfPartitionsOwned = ownerPartitionMap.get(this.ownerId).size(); int leastPartitionsOwnedByAnyEventProcessor = ownerPartitionMap.values().stream().min(Comparator.comparingInt(List::size)).get().size(); return numberOfPartitionsOwned < minPartitionsPerEventProcessor || numberOfPartitionsOwned == leastPartitionsOwnedByAnyEventProcessor; } /* * This method will create a new map of partition id and PartitionOwnership containing only those partitions * that are actively owned. All entries in the original map returned by PartitionManager that haven't been * modified for a duration of time greater than the allowed inactivity time limit are assumed to be owned by * dead event processors. These will not be included in the map returned by this method. */ private Map<String, PartitionOwnership> removeInactivePartitionOwnerships( final Map<String, PartitionOwnership> partitionOwnershipMap) { return partitionOwnershipMap .entrySet() .stream() .filter(entry -> { return System.currentTimeMillis() - entry.getValue().lastModifiedTime() < TimeUnit.SECONDS .toMillis(inactiveTimeLimitInSeconds); }).collect(Collectors.toMap(Entry::getKey, Entry::getValue)); } private void claimOwnership(final Map<String, PartitionOwnership> partitionOwnershipMap, final String partitionIdToClaim) { PartitionOwnership ownershipRequest = createPartitionOwnershipRequest(partitionOwnershipMap, partitionIdToClaim); partitionManager .claimOwnership(ownershipRequest) .timeout(Duration.ofSeconds(1)) .doOnError(ex -> logger .warning("Failed to claim ownership of partition {} - {}", ownershipRequest.partitionId(), ex.getMessage(), ex)) .subscribe(partitionPumpManager::startPartitionPump); } private PartitionOwnership createPartitionOwnershipRequest( final Map<String, PartitionOwnership> partitionOwnershipMap, final String partitionIdToClaim) { PartitionOwnership previousPartitionOwnership = partitionOwnershipMap.get(partitionIdToClaim); PartitionOwnership partitionOwnershipRequest = new PartitionOwnership() .ownerId(this.ownerId) .partitionId(partitionIdToClaim) .consumerGroupName(this.consumerGroupName) .eventHubName(this.eventHubName) .sequenceNumber(previousPartitionOwnership == null ? null : previousPartitionOwnership.sequenceNumber()) .offset(previousPartitionOwnership == null ? null : previousPartitionOwnership.offset()) .eTag(previousPartitionOwnership == null ? null : previousPartitionOwnership.eTag()) .ownerLevel(0L); return partitionOwnershipRequest; } }
class PartitionBasedLoadBalancer { private static final Random RANDOM = new Random(); private final ClientLogger logger = new ClientLogger(PartitionBasedLoadBalancer.class); private final String eventHubName; private final String consumerGroupName; private final PartitionManager partitionManager; private final EventHubAsyncClient eventHubAsyncClient; private final String ownerId; private final long inactiveTimeLimitInSeconds; private final PartitionPumpManager partitionPumpManager; /** * Creates an instance of PartitionBasedLoadBalancer for the given Event Hub name and consumer group. * * @param partitionManager The partition manager that this load balancer will use to read/update ownership details. * @param eventHubAsyncClient The asynchronous Event Hub client used to consume events. * @param eventHubName The Event Hub name the {@link EventProcessor} is associated with. * @param consumerGroupName The consumer group name the {@link EventProcessor} is associated with. * @param ownerId The identifier of the {@link EventProcessor} that owns this load balancer. * @param inactiveTimeLimitInSeconds The time in seconds to wait for an update on an ownership record before * assuming the owner of the partition is inactive. * @param partitionPumpManager The partition pump manager that keeps track of all EventHubConsumers and partitions * that this {@link EventProcessor} is processing. */ public PartitionBasedLoadBalancer(final PartitionManager partitionManager, final EventHubAsyncClient eventHubAsyncClient, final String eventHubName, final String consumerGroupName, final String ownerId, final long inactiveTimeLimitInSeconds, final PartitionPumpManager partitionPumpManager) { this.partitionManager = partitionManager; this.eventHubAsyncClient = eventHubAsyncClient; this.eventHubName = eventHubName; this.consumerGroupName = consumerGroupName; this.ownerId = ownerId; this.inactiveTimeLimitInSeconds = inactiveTimeLimitInSeconds; this.partitionPumpManager = partitionPumpManager; } /** * This is the main method responsible for load balancing. This method is expected to be invoked by the {@link * EventProcessor} periodically. Every call to this method will result in this {@link EventProcessor} owning <b>at * most one</b> new partition. * <p> * The load is considered balanced when no active EventProcessor owns 2 partitions more than any other active * EventProcessor. Given that each invocation to this method results in ownership claim of at most one partition, * this algorithm converges gradually towards a steady state. * </p> * When a new partition is claimed, this method is also responsible for starting a partition pump that creates an * {@link EventHubAsyncConsumer} for processing events from that partition. */ public void loadBalance() { /* * Retrieve current partition ownership details from the datastore. */ final Mono<Map<String, PartitionOwnership>> partitionOwnershipMono = partitionManager .listOwnership(eventHubName, consumerGroupName) .timeout(Duration.ofSeconds(1)) .collectMap(PartitionOwnership::partitionId, Function.identity()); /* * Retrieve the list of partition ids from the Event Hub. */ final Mono<List<String>> partitionsMono = eventHubAsyncClient .getPartitionIds() .timeout(Duration.ofSeconds(1)) .collectList(); Mono.zip(partitionOwnershipMono, partitionsMono) .flatMap(this::loadBalance) .doOnError(ex -> logger.warning("Load balancing for event processor failed - {}", ex.getMessage())) .subscribe(); } /* * This method works with the given partition ownership details and Event Hub partitions to evaluate whether the * current Event Processor should take on the responsibility of processing more partitions. */ /* * Check if partition ownership data is valid before proceeding with load balancing. */ private boolean isValid(final Map<String, PartitionOwnership> partitionOwnershipMap) { return partitionOwnershipMap.values() .stream() .noneMatch(partitionOwnership -> { return partitionOwnership.eventHubName() == null || !partitionOwnership.eventHubName().equals(this.eventHubName) || partitionOwnership.consumerGroupName() == null || !partitionOwnership.consumerGroupName().equals(this.consumerGroupName) || partitionOwnership.partitionId() == null || partitionOwnership.lastModifiedTime() == null || partitionOwnership.eTag() == null; }); } /* * Find the event processor that owns the maximum number of partitions and steal a random partition * from it. */ private String findPartitionToSteal(final Map<String, List<PartitionOwnership>> ownerPartitionMap) { Map.Entry<String, List<PartitionOwnership>> ownerWithMaxPartitions = ownerPartitionMap.entrySet() .stream() .max(Comparator.comparingInt(entry -> entry.getValue().size())) .get(); int numberOfPartitions = ownerWithMaxPartitions.getValue().size(); logger.info("Owner id {} owns {} partitions, stealing a partition from it", ownerWithMaxPartitions.getKey(), numberOfPartitions); return ownerWithMaxPartitions.getValue().get(RANDOM.nextInt(numberOfPartitions)).partitionId(); } /* * When the load is balanced, all active event processors own at least {@code minPartitionsPerEventProcessor} * and only {@code numberOfEventProcessorsWithAdditionalPartition} event processors will own 1 additional * partition. */ private boolean isLoadBalanced(final int minPartitionsPerEventProcessor, final int numberOfEventProcessorsWithAdditionalPartition, final Map<String, List<PartitionOwnership>> ownerPartitionMap) { int count = 0; for (List<PartitionOwnership> partitionOwnership : ownerPartitionMap.values()) { int numberOfPartitions = partitionOwnership.size(); if (numberOfPartitions < minPartitionsPerEventProcessor || numberOfPartitions > minPartitionsPerEventProcessor + 1) { return false; } if (numberOfPartitions == minPartitionsPerEventProcessor + 1) { count++; } } return count == numberOfEventProcessorsWithAdditionalPartition; } /* * This method is called after determining that the load is not balanced. This method will evaluate * if the current event processor should own more partitions. Specifically, this method returns true if the * current event processor owns less than the minimum number of partitions or if it owns the minimum number * and no other event processor owns lesser number of partitions than this event processor. */ private boolean shouldOwnMorePartitions(final int minPartitionsPerEventProcessor, final Map<String, List<PartitionOwnership>> ownerPartitionMap) { int numberOfPartitionsOwned = ownerPartitionMap.get(this.ownerId).size(); int leastPartitionsOwnedByAnyEventProcessor = ownerPartitionMap.values().stream().min(Comparator.comparingInt(List::size)).get().size(); return numberOfPartitionsOwned < minPartitionsPerEventProcessor || numberOfPartitionsOwned == leastPartitionsOwnedByAnyEventProcessor; } /* * This method will create a new map of partition id and PartitionOwnership containing only those partitions * that are actively owned. All entries in the original map returned by PartitionManager that haven't been * modified for a duration of time greater than the allowed inactivity time limit are assumed to be owned by * dead event processors. These will not be included in the map returned by this method. */ private Map<String, PartitionOwnership> removeInactivePartitionOwnerships( final Map<String, PartitionOwnership> partitionOwnershipMap) { return partitionOwnershipMap .entrySet() .stream() .filter(entry -> { return (System.currentTimeMillis() - entry.getValue().lastModifiedTime() < TimeUnit.SECONDS .toMillis(inactiveTimeLimitInSeconds)) && !ImplUtils.isNullOrEmpty(entry.getValue().ownerId()); }).collect(Collectors.toMap(Entry::getKey, Entry::getValue)); } private void claimOwnership(final Map<String, PartitionOwnership> partitionOwnershipMap, final String partitionIdToClaim) { logger.info("Attempting to claim ownership of partition {}", partitionIdToClaim); PartitionOwnership ownershipRequest = createPartitionOwnershipRequest(partitionOwnershipMap, partitionIdToClaim); partitionManager .claimOwnership(ownershipRequest) .timeout(Duration.ofSeconds(1)) .doOnNext(partitionOwnership -> logger.info("Successfully claimed ownership of partition {}", partitionOwnership.partitionId())) .doOnError(ex -> logger .warning("Failed to claim ownership of partition {} - {}", ownershipRequest.partitionId(), ex.getMessage(), ex)) .subscribe(partitionPumpManager::startPartitionPump); } private PartitionOwnership createPartitionOwnershipRequest( final Map<String, PartitionOwnership> partitionOwnershipMap, final String partitionIdToClaim) { PartitionOwnership previousPartitionOwnership = partitionOwnershipMap.get(partitionIdToClaim); PartitionOwnership partitionOwnershipRequest = new PartitionOwnership() .ownerId(this.ownerId) .partitionId(partitionIdToClaim) .consumerGroupName(this.consumerGroupName) .eventHubName(this.eventHubName) .sequenceNumber(previousPartitionOwnership == null ? null : previousPartitionOwnership.sequenceNumber()) .offset(previousPartitionOwnership == null ? null : previousPartitionOwnership.offset()) .eTag(previousPartitionOwnership == null ? null : previousPartitionOwnership.eTag()) .ownerLevel(0L); return partitionOwnershipRequest; } }
I think the `CLEAR` enum should be used and remove the statement about clearing a range of 4MB, clear is allowed to be as large as the file size. Additionally, clearing will throw an exception is the Content-Length header is set.
public Mono<Response<FileUploadInfo>> clearRange(long length) { FileRange range = new FileRange(0, length - 1); return azureFileStorageClient.files().uploadRangeWithRestResponseAsync(shareName, filePath, range.toString(), FileRangeWriteType.UPDATE, 0, null, null, null, Context.NONE) .map(this::uploadResponse); }
return azureFileStorageClient.files().uploadRangeWithRestResponseAsync(shareName, filePath, range.toString(), FileRangeWriteType.UPDATE, 0, null, null, null, Context.NONE)
new FileRange(0, length - 1); return azureFileStorageClient.files().uploadRangeWithRestResponseAsync(shareName, filePath, range.toString(), FileRangeWriteType.UPDATE, length, data, null, null, context) .map(this::uploadResponse); } /** * Uploads a range of bytes to specific of a file in storage file service. Upload operations performs an in-place * write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Upload data "default" starting from 1024 bytes. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.upload
class FileAsyncClient { private final ClientLogger logger = new ClientLogger(FileAsyncClient.class); private static final long FILE_DEFAULT_BLOCK_SIZE = 4 * 1024 * 1024L; private static final long DOWNLOAD_UPLOAD_CHUNK_TIMEOUT = 300; private final AzureFileStorageImpl azureFileStorageClient; private final String shareName; private final String filePath; private final String snapshot; /** * Creates a FileAsyncClient that sends requests to the storage file at {@link AzureFileStorageImpl * endpoint}. Each service call goes through the {@link HttpPipeline pipeline} in the {@code client}. * * @param azureFileStorageClient Client that interacts with the service interfaces * @param shareName Name of the share * @param filePath Path to the file * @param snapshot The snapshot of the share */ FileAsyncClient(AzureFileStorageImpl azureFileStorageClient, String shareName, String filePath, String snapshot) { this.shareName = shareName; this.filePath = filePath; this.snapshot = snapshot; this.azureFileStorageClient = azureFileStorageClient; } /** * Creates a FileAsyncClient that sends requests to the storage account at {@code endpoint}. Each service call goes * through the {@code httpPipeline}. * * @param endpoint URL for the Storage File service * @param httpPipeline HttpPipeline that HTTP requests and response flow through * @param shareName Name of the share * @param filePath Path to the file * @param snapshot Optional snapshot of the share */ FileAsyncClient(URL endpoint, HttpPipeline httpPipeline, String shareName, String filePath, String snapshot) { this.shareName = shareName; this.filePath = filePath; this.snapshot = snapshot; this.azureFileStorageClient = new AzureFileStorageBuilder().pipeline(httpPipeline) .url(endpoint.toString()) .build(); } /** * Get the url of the storage file client. * * @return the URL of the storage file client * @throws RuntimeException If the file is using a malformed URL. */ public URL getFileUrl() { try { return new URL(azureFileStorageClient.getUrl()); } catch (MalformedURLException e) { throw logger.logExceptionAsError(new RuntimeException(String.format("Invalid URL on %s: %s" + getClass().getSimpleName(), azureFileStorageClient.getUrl()), e)); } } /** * Creates a file in the storage account and returns a response of {@link FileInfo} to interact with it. * * <p><strong>Code Samples</strong></p> * * <p>Create the file with size 1KB.</p> * * {@codesnippet com.azure.storage.file.fileClient.create} * * <p>For more information, see the * <a href="https: * * @param maxSize The maximum size in bytes for the file, up to 1 TiB. * @return A response containing the file info and the status of creating the file. * @throws StorageErrorException If the file has already existed, the parent directory does not exist or fileName is * an invalid resource name. */ public Mono<Response<FileInfo>> create(long maxSize) { return create(maxSize, null, null); } /** * Creates a file in the storage account and returns a response of FileInfo to interact with it. * * <p><strong>Code Samples</strong></p> * * <p>Create the file with length of 1024 bytes, some headers and metadata.</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.create * * <p>For more information, see the * <a href="https: * * @param maxSize The maximum size in bytes for the file, up to 1 TiB. * @param httpHeaders Additional parameters for the operation. * @param metadata Optional name-value pairs associated with the file as metadata. Metadata names must adhere to the * naming rules. * @return A response containing the directory info and the status of creating the directory. * @throws StorageErrorException If the directory has already existed, the parent directory does not exist or * directory is an invalid resource name. * @see <a href="https: */ public Mono<Response<FileInfo>> create(long maxSize, FileHTTPHeaders httpHeaders, Map<String, String> metadata) { String fileAttributes = "None"; String filePermission = "inherit"; String fileCreationTime = "now"; String fileLastWriteTime = "now"; return azureFileStorageClient.files().createWithRestResponseAsync(shareName, filePath, maxSize, fileAttributes, fileCreationTime, fileLastWriteTime, null, metadata, filePermission, null, httpHeaders, Context.NONE) .map(this::createFileInfoResponse); } /** * Copies a blob or file to a destination file within the storage account. * * <p><strong>Code Samples</strong></p> * * <p>Copy file from source url to the {@code filePath} </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.startCopy * * <p>For more information, see the * <a href="https: * * @param sourceUrl Specifies the URL of the source file or blob, up to 2 KB in length. * @param metadata Optional name-value pairs associated with the file as metadata. Metadata names must adhere to the * naming rules. * @return A response containing the file copy info and the status of copying the file. * @see <a href="https: */ public Mono<Response<FileCopyInfo>> startCopy(String sourceUrl, Map<String, String> metadata) { return azureFileStorageClient.files().startCopyWithRestResponseAsync(shareName, filePath, sourceUrl, null, metadata, Context.NONE) .map(this::startCopyResponse); } /** * Aborts a pending Copy File operation, and leaves a destination file with zero length and full metadata. * * <p><strong>Code Samples</strong></p> * * <p>Abort copy file from copy id("someCopyId") </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.abortCopy * * <p>For more information, see the * <a href="https: * * @param copyId Specifies the copy id which has copying pending status associate with it. * @return A response containing the status of aborting copy the file. */ public Mono<VoidResponse> abortCopy(String copyId) { return azureFileStorageClient.files().abortCopyWithRestResponseAsync(shareName, filePath, copyId, Context.NONE) .map(VoidResponse::new); } /** * Downloads a file from the system, including its metadata and properties * * <p><strong>Code Samples</strong></p> * * <p>Download the file to current folder. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.downloadToFile * * <p>For more information, see the * <a href="https: * * @param downloadFilePath The path where store the downloaded file * @return An empty response. */ public Mono<Void> downloadToFile(String downloadFilePath) { return downloadToFile(downloadFilePath, null); } /** * Downloads a file from the system, including its metadata and properties * * <p><strong>Code Samples</strong></p> * * <p>Download the file from 1024 to 2048 bytes to current folder. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.downloadToFile * * <p>For more information, see the * <a href="https: * * @param downloadFilePath The path where store the downloaded file * @param range Optional byte range which returns file data only from the specified range. * @return An empty response. */ public Mono<Void> downloadToFile(String downloadFilePath, FileRange range) { AsynchronousFileChannel channel = channelSetup(downloadFilePath); return sliceFileRange(range) .flatMap(chunk -> downloadWithProperties(chunk, false) .map(dar -> dar.value().body()) .subscribeOn(Schedulers.elastic()) .flatMap(fbb -> FluxUtil.bytebufStreamToFile(fbb, channel, chunk.start() - (range == null ? 0 : range.start())) .subscribeOn(Schedulers.elastic()) .timeout(Duration.ofSeconds(DOWNLOAD_UPLOAD_CHUNK_TIMEOUT)) .retry(3, throwable -> throwable instanceof IOException || throwable instanceof TimeoutException))) .then() .doOnTerminate(() -> channelCleanUp(channel)); } private AsynchronousFileChannel channelSetup(String filePath) { try { return AsynchronousFileChannel.open(Paths.get(filePath), StandardOpenOption.READ, StandardOpenOption.WRITE); } catch (IOException e) { throw logger.logExceptionAsError(new UncheckedIOException(e)); } } private void channelCleanUp(AsynchronousFileChannel channel) { try { channel.close(); } catch (IOException e) { throw logger.logExceptionAsError(new UncheckedIOException(e)); } } private Flux<FileRange> sliceFileRange(FileRange fileRange) { long offset = fileRange == null ? 0L : fileRange.start(); Mono<Long> end; if (fileRange != null) { end = Mono.just(fileRange.end()); } else { end = Mono.empty(); } end = end.switchIfEmpty(getProperties().map(rb -> rb.value().contentLength())); return end .map(e -> { List<FileRange> chunks = new ArrayList<>(); for (long pos = offset; pos < e; pos += FILE_DEFAULT_BLOCK_SIZE) { long count = FILE_DEFAULT_BLOCK_SIZE; if (pos + count > e) { count = e - pos; } chunks.add(new FileRange(pos, pos + count - 1)); } return chunks; }) .flatMapMany(Flux::fromIterable); } /** * Downloads a file from the system, including its metadata and properties * * <p><strong>Code Samples</strong></p> * * <p>Download the file with its metadata and properties. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.downloadWithProperties} * * <p>For more information, see the * <a href="https: * * @return A response that only contains headers and response status code */ public Mono<Response<FileDownloadInfo>> downloadWithProperties() { return downloadWithProperties(null, null); } /** * Downloads a file from the system, including its metadata and properties * * <p><strong>Code Samples</strong></p> * * <p>Download the file from 1024 to 2048 bytes with its metadata and properties and without the contentMD5. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.downloadWithProperties * * <p>For more information, see the * <a href="https: * * @param range Optional byte range which returns file data only from the specified range. * @param rangeGetContentMD5 Optional boolean which the service returns the MD5 hash for the range when it sets to * true, as long as the range is less than or equal to 4 MB in size. * @return A response that only contains headers and response status code */ public Mono<Response<FileDownloadInfo>> downloadWithProperties(FileRange range, Boolean rangeGetContentMD5) { String rangeString = range == null ? null : range.toString(); return azureFileStorageClient.files().downloadWithRestResponseAsync(shareName, filePath, null, rangeString, rangeGetContentMD5, Context.NONE) .map(this::downloadWithPropertiesResponse); } /** * Deletes the file associate with the client. * * <p><strong>Code Samples</strong></p> * * <p>Delete the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.delete} * * <p>For more information, see the * <a href="https: * * @return A response that only contains headers and response status code * @throws StorageErrorException If the directory doesn't exist or the file doesn't exist. */ public Mono<VoidResponse> delete() { return azureFileStorageClient.files().deleteWithRestResponseAsync(shareName, filePath, Context.NONE) .map(VoidResponse::new); } /** * Retrieves the properties of the storage account's file. The properties includes file metadata, last modified * date, is server encrypted, and eTag. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve file properties</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.getProperties} * * <p>For more information, see the * <a href="https: * * @return Storage file properties */ public Mono<Response<FileProperties>> getProperties() { return azureFileStorageClient.files().getPropertiesWithRestResponseAsync(shareName, filePath, snapshot, null, Context.NONE) .map(this::getPropertiesResponse); } /** * Sets the user-defined httpHeaders to associate to the file. * * <p>If {@code null} is passed for the httpHeaders it will clear the httpHeaders associated to the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the httpHeaders of contentType of "text/plain"</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setHttpHeaders * * <p>Clear the metadata of the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setHttpHeaders * * <p>For more information, see the * <a href="https: * * @param newFileSize New file size of the file * @param httpHeaders Resizes a file to the specified size. If the specified byte value is less than the current * size of the file, then all ranges above the specified byte value are cleared. * @return Response of the information about the file * @throws IllegalArgumentException thrown if parameters fail the validation. */ public Mono<Response<FileInfo>> setHttpHeaders(long newFileSize, FileHTTPHeaders httpHeaders) { String fileAttributes = "None"; String filePermission = "inherit"; String fileCreationTime = "preserve"; String fileLastWriteTime = "preserve"; return azureFileStorageClient.files().setHTTPHeadersWithRestResponseAsync(shareName, filePath, fileAttributes, fileCreationTime, fileLastWriteTime, null, newFileSize, filePermission, null, httpHeaders, Context.NONE) .map(this::setHttpHeadersResponse); } /** * Sets the user-defined metadata to associate to the file. * * <p>If {@code null} is passed for the metadata it will clear the metadata associated to the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the metadata to "file:updatedMetadata"</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setMetadata * * <p>Clear the metadata of the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setMetadata * * <p>For more information, see the * <a href="https: * * @param metadata Options.Metadata to set on the file, if null is passed the metadata for the file is cleared * @return information about the file * @throws StorageErrorException If the file doesn't exist or the metadata contains invalid keys */ public Mono<Response<FileMetadataInfo>> setMetadata(Map<String, String> metadata) { return azureFileStorageClient.files().setMetadataWithRestResponseAsync(shareName, filePath, null, metadata, Context.NONE) .map(this::setMetadataResponse); } /** * Uploads a range of bytes to the beginning of a file in storage file service. Upload operations performs an * in-place write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Upload "default" to the file in Storage File Service. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.upload * * <p>For more information, see the * <a href="https: * * @param data The data which will upload to the storage file. * @param length Specifies the number of bytes being transmitted in the request body. * @return A response that only contains headers and response status code * @throws StorageErrorException If you attempt to upload a range that is larger than 4 MB, the service returns * status code 413 (Request Entity Too Large) */ public Mono<Response<FileUploadInfo>> upload(Flux<ByteBuf> data, long length) { FileRange range = * * <p>For more information, see the * <a href="https: * * @param data The data which will upload to the storage file. * @param offset Optional starting point of the upload range. It will start from the beginning if it is {@code * null} * @param length Specifies the number of bytes being transmitted in the request body. * @return A response that only contains headers and response status code * @throws StorageErrorException If you attempt to upload a range that is larger than 4 MB, the service returns * status code 413 (Request Entity Too Large) */ public Mono<Response<FileUploadInfo>> upload(Flux<ByteBuf> data, long length, long offset) { FileRange range = new FileRange(offset, offset + length - 1); return azureFileStorageClient.files().uploadRangeWithRestResponseAsync(shareName, filePath, range.toString(), FileRangeWriteType.UPDATE, length, data, null, null, Context.NONE) .map(this::uploadResponse); } /** * Clear a range of bytes to specific of a file in storage file service. Clear operations performs an in-place * write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Clear the range from 0 bytes with length of 1024. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.clearRange * * <p>For more information, see the * <a href="https: * * @param length Specifies the number of bytes being cleared in the request body. * @return A response that only contains headers and response status code * @throws StorageErrorException If you attempt to clear a range that is larger than 4 MB, the service returns * status code 413 (Request Entity Too Large) */ public Mono<Response<FileUploadInfo>> clearRange(long length) { FileRange range = new FileRange(0, length - 1); return azureFileStorageClient.files().uploadRangeWithRestResponseAsync(shareName, filePath, range.toString(), FileRangeWriteType.UPDATE, 0, null, null, null, Context.NONE) .map(this::uploadResponse); } /** * Clear a range of bytes to specific of a file in storage file service. Clear operations performs an in-place * write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Clear the range starting from 1024 with length of 1024. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.clearRange * * <p>For more information, see the * <a href="https: * * @param length Specifies the number of bytes being cleared in the request body. * @param offset Optional starting point of the upload range. It will start from the beginning if it is {@code * null} * @return A response that only contains headers and response status code * @throws StorageErrorException If you attempt to clear a range that is larger than 4 MB, the service returns * status code 413 (Request Entity Too Large) */ public Mono<Response<FileUploadInfo>> clearRange(long length, long offset) { FileRange range = new FileRange(offset, offset + length - 1); return azureFileStorageClient.files().uploadRangeWithRestResponseAsync(shareName, filePath, range.toString(), FileRangeWriteType.UPDATE, 0, null, null, null, Context.NONE) .map(this::uploadResponse); } /** * Uploads file to storage file service. * * <p><strong>Code Samples</strong></p> * * <p> Upload the file from the source file path. </p> * * (@codesnippet com.azure.storage.file.fileAsyncClient.uploadFromFile
class FileAsyncClient { private final ClientLogger logger = new ClientLogger(FileAsyncClient.class); private static final long FILE_DEFAULT_BLOCK_SIZE = 4 * 1024 * 1024L; private static final long DOWNLOAD_UPLOAD_CHUNK_TIMEOUT = 300; private final AzureFileStorageImpl azureFileStorageClient; private final String shareName; private final String filePath; private final String snapshot; /** * Creates a FileAsyncClient that sends requests to the storage file at {@link AzureFileStorageImpl * endpoint}. Each service call goes through the {@link HttpPipeline pipeline} in the {@code client}. * * @param azureFileStorageClient Client that interacts with the service interfaces * @param shareName Name of the share * @param filePath Path to the file * @param snapshot The snapshot of the share */ FileAsyncClient(AzureFileStorageImpl azureFileStorageClient, String shareName, String filePath, String snapshot) { Objects.requireNonNull(shareName); Objects.requireNonNull(filePath); this.shareName = shareName; this.filePath = filePath; this.snapshot = snapshot; this.azureFileStorageClient = azureFileStorageClient; } /** * Creates a FileAsyncClient that sends requests to the storage account at {@code endpoint}. Each service call goes * through the {@code httpPipeline}. * * @param endpoint URL for the Storage File service * @param httpPipeline HttpPipeline that HTTP requests and response flow through * @param shareName Name of the share * @param filePath Path to the file * @param snapshot Optional snapshot of the share */ FileAsyncClient(URL endpoint, HttpPipeline httpPipeline, String shareName, String filePath, String snapshot) { Objects.requireNonNull(shareName); Objects.requireNonNull(filePath); this.shareName = shareName; this.filePath = filePath; this.snapshot = snapshot; this.azureFileStorageClient = new AzureFileStorageBuilder().pipeline(httpPipeline) .url(endpoint.toString()) .build(); } /** * Get the url of the storage file client. * * @return the URL of the storage file client * @throws RuntimeException If the file is using a malformed URL. */ public URL getFileUrl() { try { return new URL(azureFileStorageClient.getUrl()); } catch (MalformedURLException e) { throw logger.logExceptionAsError(new RuntimeException(String.format("Invalid URL on %s: %s" + getClass().getSimpleName(), azureFileStorageClient.getUrl()), e)); } } /** * Creates a file in the storage account and returns a response of {@link FileInfo} to interact with it. * * <p><strong>Code Samples</strong></p> * * <p>Create the file with size 1KB.</p> * * {@codesnippet com.azure.storage.file.fileClient.create} * * <p>For more information, see the * <a href="https: * * @param maxSize The maximum size in bytes for the file, up to 1 TiB. * @return A response containing the file info and the status of creating the file. * @throws StorageErrorException If the file has already existed, the parent directory does not exist or fileName is * an invalid resource name. */ public Mono<FileInfo> create(long maxSize) { return createWithResponse(maxSize, null, null).flatMap(FluxUtil::toMono); } /** * Creates a file in the storage account and returns a response of FileInfo to interact with it. * * <p><strong>Code Samples</strong></p> * * <p>Create the file with length of 1024 bytes, some headers and metadata.</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.createWithResponse * * <p>For more information, see the * <a href="https: * * @param maxSize The maximum size in bytes for the file, up to 1 TiB. * @param httpHeaders Additional parameters for the operation. * @param metadata Optional name-value pairs associated with the file as metadata. Metadata names must adhere to the naming rules. * @return A response containing the {@link FileInfo file info} and the status of creating the file. * @throws StorageErrorException If the directory has already existed, the parent directory does not exist or directory is an invalid resource name. */ public Mono<Response<FileInfo>> createWithResponse(long maxSize, FileHTTPHeaders httpHeaders, Map<String, String> metadata) { return withContext(context -> createWithResponse(maxSize, httpHeaders, metadata, context)); } Mono<Response<FileInfo>> createWithResponse(long maxSize, FileHTTPHeaders httpHeaders, Map<String, String> metadata, Context context) { String fileAttributes = "None"; String filePermission = "inherit"; String fileCreationTime = "now"; String fileLastWriteTime = "now"; return azureFileStorageClient.files().createWithRestResponseAsync(shareName, filePath, maxSize, fileAttributes, fileCreationTime, fileLastWriteTime, null, metadata, filePermission, null, httpHeaders, context) .map(this::createFileInfoResponse); } /** * Copies a blob or file to a destination file within the storage account. * * <p><strong>Code Samples</strong></p> * * <p>Copy file from source url to the {@code filePath} </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.startCopy * * <p>For more information, see the * <a href="https: * * @param sourceUrl Specifies the URL of the source file or blob, up to 2 KB in length. * @param metadata Optional name-value pairs associated with the file as metadata. Metadata names must adhere to the naming rules. * @return The {@link FileCopyInfo file copy info}. * @see <a href="https: */ public Mono<FileCopyInfo> startCopy(String sourceUrl, Map<String, String> metadata) { return startCopyWithResponse(sourceUrl, metadata).flatMap(FluxUtil::toMono); } /** * Copies a blob or file to a destination file within the storage account. * * <p><strong>Code Samples</strong></p> * * <p>Copy file from source url to the {@code filePath} </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.startCopyWithResponse * * <p>For more information, see the * <a href="https: * * @param sourceUrl Specifies the URL of the source file or blob, up to 2 KB in length. * @param metadata Optional name-value pairs associated with the file as metadata. Metadata names must adhere to the naming rules. * @return A response containing the {@link FileCopyInfo file copy info} and the status of copying the file. * @see <a href="https: */ public Mono<Response<FileCopyInfo>> startCopyWithResponse(String sourceUrl, Map<String, String> metadata) { return withContext(context -> startCopyWithResponse(sourceUrl, metadata, context)); } Mono<Response<FileCopyInfo>> startCopyWithResponse(String sourceUrl, Map<String, String> metadata, Context context) { return azureFileStorageClient.files().startCopyWithRestResponseAsync(shareName, filePath, sourceUrl, null, metadata, context) .map(this::startCopyResponse); } /** * Aborts a pending Copy File operation, and leaves a destination file with zero length and full metadata. * * <p><strong>Code Samples</strong></p> * * <p>Abort copy file from copy id("someCopyId") </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.abortCopy * * <p>For more information, see the * <a href="https: * * @param copyId Specifies the copy id which has copying pending status associate with it. * @return An empty response. */ public Mono<Void> abortCopy(String copyId) { return abortCopyWithResponse(copyId).flatMap(FluxUtil::toMono); } /** * Aborts a pending Copy File operation, and leaves a destination file with zero length and full metadata. * * <p><strong>Code Samples</strong></p> * * <p>Abort copy file from copy id("someCopyId") </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.abortCopyWithResponse * * <p>For more information, see the * <a href="https: * * @param copyId Specifies the copy id which has copying pending status associate with it. * @return A response containing the status of aborting copy the file. */ public Mono<VoidResponse> abortCopyWithResponse(String copyId) { return withContext(context -> abortCopyWithResponse(copyId, context)); } Mono<VoidResponse> abortCopyWithResponse(String copyId, Context context) { return azureFileStorageClient.files().abortCopyWithRestResponseAsync(shareName, filePath, copyId, context) .map(VoidResponse::new); } /** * Downloads a file from the system, including its metadata and properties * * <p><strong>Code Samples</strong></p> * * <p>Download the file to current folder. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.downloadToFile * * <p>For more information, see the * <a href="https: * * @param downloadFilePath The path where store the downloaded file * @return An empty response. */ public Mono<Void> downloadToFile(String downloadFilePath) { return downloadToFile(downloadFilePath, null); } /** * Downloads a file from the system, including its metadata and properties * * <p><strong>Code Samples</strong></p> * * <p>Download the file from 1024 to 2048 bytes to current folder. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.downloadToFile * * <p>For more information, see the * <a href="https: * * @param downloadFilePath The path where store the downloaded file * @param range Optional byte range which returns file data only from the specified range. * @return An empty response. */ public Mono<Void> downloadToFile(String downloadFilePath, FileRange range) { AsynchronousFileChannel channel = channelSetup(downloadFilePath); return sliceFileRange(range) .flatMap(chunk -> downloadWithPropertiesWithResponse(chunk, false) .map(dar -> dar.value().body()) .subscribeOn(Schedulers.elastic()) .flatMap(fbb -> FluxUtil.writeFile(fbb, channel, chunk.start() - (range == null ? 0 : range.start())) .subscribeOn(Schedulers.elastic()) .timeout(Duration.ofSeconds(DOWNLOAD_UPLOAD_CHUNK_TIMEOUT)) .retry(3, throwable -> throwable instanceof IOException || throwable instanceof TimeoutException))) .then() .doOnTerminate(() -> channelCleanUp(channel)); } private AsynchronousFileChannel channelSetup(String filePath) { try { return AsynchronousFileChannel.open(Paths.get(filePath), StandardOpenOption.READ, StandardOpenOption.WRITE); } catch (IOException e) { throw logger.logExceptionAsError(new UncheckedIOException(e)); } } private void channelCleanUp(AsynchronousFileChannel channel) { try { channel.close(); } catch (IOException e) { throw logger.logExceptionAsError(new UncheckedIOException(e)); } } private Flux<FileRange> sliceFileRange(FileRange fileRange) { long offset = fileRange == null ? 0L : fileRange.start(); Mono<Long> end; if (fileRange != null) { end = Mono.just(fileRange.end()); } else { end = Mono.empty(); } end = end.switchIfEmpty(getProperties().map(rb -> rb.contentLength())); return end .map(e -> { List<FileRange> chunks = new ArrayList<>(); for (long pos = offset; pos < e; pos += FILE_DEFAULT_BLOCK_SIZE) { long count = FILE_DEFAULT_BLOCK_SIZE; if (pos + count > e) { count = e - pos; } chunks.add(new FileRange(pos, pos + count - 1)); } return chunks; }) .flatMapMany(Flux::fromIterable); } /** * Downloads a file from the system, including its metadata and properties * * <p><strong>Code Samples</strong></p> * * <p>Download the file with its metadata and properties. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.downloadWithProperties} * * <p>For more information, see the * <a href="https: * * @return The {@link FileDownloadInfo file download Info} */ public Mono<FileDownloadInfo> downloadWithProperties() { return downloadWithPropertiesWithResponse(null, null).flatMap(FluxUtil::toMono); } /** * Downloads a file from the system, including its metadata and properties * * <p><strong>Code Samples</strong></p> * * <p>Download the file from 1024 to 2048 bytes with its metadata and properties and without the contentMD5. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.downloadWithPropertiesWithResponse * * <p>For more information, see the * <a href="https: * * @param range Optional byte range which returns file data only from the specified range. * @param rangeGetContentMD5 Optional boolean which the service returns the MD5 hash for the range when it sets * to true, as long as the range is less than or equal to 4 MB in size. * @return A response containing the {@link FileDownloadInfo file download Info} with headers and response status code */ public Mono<Response<FileDownloadInfo>> downloadWithPropertiesWithResponse(FileRange range, Boolean rangeGetContentMD5) { return withContext(context -> downloadWithPropertiesWithResponse(range, rangeGetContentMD5, context)); } Mono<Response<FileDownloadInfo>> downloadWithPropertiesWithResponse(FileRange range, Boolean rangeGetContentMD5, Context context) { String rangeString = range == null ? null : range.toString(); return azureFileStorageClient.files().downloadWithRestResponseAsync(shareName, filePath, null, rangeString, rangeGetContentMD5, context) .map(this::downloadWithPropertiesResponse); } /** * Deletes the file associate with the client. * * <p><strong>Code Samples</strong></p> * * <p>Delete the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.delete} * * <p>For more information, see the * <a href="https: * @return An empty response * @throws StorageErrorException If the directory doesn't exist or the file doesn't exist. */ public Mono<Void> delete() { return deleteWithResponse(null).flatMap(FluxUtil::toMono); } /** * Deletes the file associate with the client. * * <p><strong>Code Samples</strong></p> * * <p>Delete the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.deleteWithResponse} * * <p>For more information, see the * <a href="https: * * @return A response that only contains headers and response status code * @throws StorageErrorException If the directory doesn't exist or the file doesn't exist. */ public Mono<VoidResponse> deleteWithResponse() { return withContext(context -> deleteWithResponse(context)); } Mono<VoidResponse> deleteWithResponse(Context context) { return azureFileStorageClient.files().deleteWithRestResponseAsync(shareName, filePath, context) .map(VoidResponse::new); } /** * Retrieves the properties of the storage account's file. The properties includes file metadata, last modified * date, is server encrypted, and eTag. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve file properties</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.getProperties} * * <p>For more information, see the * <a href="https: * * @return {@link FileProperties Storage file properties} */ public Mono<FileProperties> getProperties() { return getPropertiesWithResponse().flatMap(FluxUtil::toMono); } /** * Retrieves the properties of the storage account's file. * The properties includes file metadata, last modified date, is server encrypted, and eTag. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve file properties</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.getPropertiesWithResponse} * * <p>For more information, see the * <a href="https: * * @return A response containing the {@link FileProperties storage file properties} and response status code */ public Mono<Response<FileProperties>> getPropertiesWithResponse() { return withContext(context -> getPropertiesWithResponse(context)); } Mono<Response<FileProperties>> getPropertiesWithResponse(Context context) { return azureFileStorageClient.files().getPropertiesWithRestResponseAsync(shareName, filePath, snapshot, null, context) .map(this::getPropertiesResponse); } /** * Sets the user-defined httpHeaders to associate to the file. * * <p>If {@code null} is passed for the httpHeaders it will clear the httpHeaders associated to the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the httpHeaders of contentType of "text/plain"</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setHttpHeaders * * <p>Clear the metadata of the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setHttpHeaders * * <p>For more information, see the * <a href="https: * * @param newFileSize New file size of the file * @param httpHeaders Resizes a file to the specified size. If the specified byte value is less than the current size of the file, then all ranges above the specified byte value are cleared. * @return The {@link FileInfo file info} * @throws IllegalArgumentException thrown if parameters fail the validation. */ public Mono<FileInfo> setHttpHeaders(long newFileSize, FileHTTPHeaders httpHeaders) { return setHttpHeadersWithResponse(newFileSize, httpHeaders).flatMap(FluxUtil::toMono); } /** * Sets the user-defined httpHeaders to associate to the file. * * <p>If {@code null} is passed for the httpHeaders it will clear the httpHeaders associated to the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the httpHeaders of contentType of "text/plain"</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setHttpHeadersWithResponse * * <p>Clear the metadata of the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setHttpHeadersWithResponse * * <p>For more information, see the * <a href="https: * * @param newFileSize New file size of the file * @param httpHeaders Resizes a file to the specified size. If the specified byte value is less than the current size of the file, then all ranges above the specified byte value are cleared. * @return Response containing the {@link FileInfo file info} and response status code * @throws IllegalArgumentException thrown if parameters fail the validation. */ public Mono<Response<FileInfo>> setHttpHeadersWithResponse(long newFileSize, FileHTTPHeaders httpHeaders) { return withContext(context -> setHttpHeadersWithResponse(newFileSize, httpHeaders, context)); } Mono<Response<FileInfo>> setHttpHeadersWithResponse(long newFileSize, FileHTTPHeaders httpHeaders, Context context) { String fileAttributes = "None"; String filePermission = "inherit"; String fileCreationTime = "preserve"; String fileLastWriteTime = "preserve"; return azureFileStorageClient.files().setHTTPHeadersWithRestResponseAsync(shareName, filePath, fileAttributes, fileCreationTime, fileLastWriteTime, null, newFileSize, filePermission, null, httpHeaders, context) .map(this::setHttpHeadersResponse); } /** * Sets the user-defined metadata to associate to the file. * * <p>If {@code null} is passed for the metadata it will clear the metadata associated to the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the metadata to "file:updatedMetadata"</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setMetadata * * <p>Clear the metadata of the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setMetadataWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata Options.Metadata to set on the file, if null is passed the metadata for the file is cleared * @return {@link FileMetadataInfo file meta info} * @throws StorageErrorException If the file doesn't exist or the metadata contains invalid keys */ public Mono<FileMetadataInfo> setMetadata(Map<String, String> metadata) { return setMetadataWithResponse(metadata).flatMap(FluxUtil::toMono); } /** * Sets the user-defined metadata to associate to the file. * * <p>If {@code null} is passed for the metadata it will clear the metadata associated to the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the metadata to "file:updatedMetadata"</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setMetadataWithResponse * * <p>Clear the metadata of the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setMetadataWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata Options.Metadata to set on the file, if null is passed the metadata for the file is cleared * @return A response containing the {@link FileMetadataInfo file meta info} and status code * @throws StorageErrorException If the file doesn't exist or the metadata contains invalid keys */ public Mono<Response<FileMetadataInfo>> setMetadataWithResponse(Map<String, String> metadata) { return withContext(context -> setMetadataWithResponse(metadata, context)); } Mono<Response<FileMetadataInfo>> setMetadataWithResponse(Map<String, String> metadata, Context context) { return azureFileStorageClient.files().setMetadataWithRestResponseAsync(shareName, filePath, null, metadata, context) .map(this::setMetadataResponse); } /** * Uploads a range of bytes to the beginning of a file in storage file service. Upload operations performs an * in-place write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Upload data "default" to the file in Storage File Service. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.upload * * <p>For more information, see the * <a href="https: * * @param data The data which will upload to the storage file. * @param length Specifies the number of bytes being transmitted in the request body. * @return A response that only contains headers and response status code */ public Mono<FileUploadInfo> upload(Flux<ByteBuffer> data, long length) { return uploadWithResponse(data, length).flatMap(FluxUtil::toMono); } /** * Uploads a range of bytes to the beginning of a file in storage file service. Upload operations performs an in-place write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Upload "default" to the file. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.uploadWithResponse * * <p>For more information, see the * <a href="https: * * @param data The data which will upload to the storage file. * @param length Specifies the number of bytes being transmitted in the request body. When the FileRangeWriteType is set to clear, the value of this header must be set to zero.. * @return A response containing the {@link FileUploadInfo file upload info} with headers and response status code * @throws StorageErrorException If you attempt to upload a range that is larger than 4 MB, the service returns status code 413 (Request Entity Too Large) */ public Mono<Response<FileUploadInfo>> uploadWithResponse(Flux<ByteBuffer> data, long length) { return withContext(context -> uploadWithResponse(data, length, context)); } Mono<Response<FileUploadInfo>> uploadWithResponse(Flux<ByteBuffer> data, long length, Context context) { FileRange range = * * <p>For more information, see the * <a href="https: * * @param data The data which will upload to the storage file. * @param length Specifies the number of bytes being transmitted in the request body. * @param offset Optional starting point of the upload range. It will start from the beginning if it is {@code * null} * @return The {@link FileUploadInfo file upload info} * @throws StorageErrorException If you attempt to upload a range that is larger than 4 MB, the service returns status code 413 (Request Entity Too Large) */ public Mono<FileUploadInfo> upload(Flux<ByteBuffer> data, long length, long offset) { return uploadWithResponse(data, length, offset).flatMap(FluxUtil::toMono); } /** * Uploads a range of bytes to specific of a file in storage file service. Upload operations performs an in-place write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Upload the file from 1024 to 2048 bytes with its metadata and properties and without the contentMD5. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.uploadWithResponse * * <p>For more information, see the * <a href="https: * * @param data The data which will upload to the storage file. * @param offset Optional starting point of the upload range. It will start from the beginning if it is {@code null} * @param length Specifies the number of bytes being transmitted in the request body. When the FileRangeWriteType is set to clear, the value of this header must be set to zero. * @return A response containing the {@link FileUploadInfo file upload info} with headers and response status code * @throws StorageErrorException If you attempt to upload a range that is larger than 4 MB, the service returns status code 413 (Request Entity Too Large) */ public Mono<Response<FileUploadInfo>> uploadWithResponse(Flux<ByteBuffer> data, long length, long offset) { return withContext(context -> uploadWithResponse(data, length, offset, context)); } Mono<Response<FileUploadInfo>> uploadWithResponse(Flux<ByteBuffer> data, long length, long offset, Context context) { FileRange range = new FileRange(offset, offset + length - 1); return azureFileStorageClient.files().uploadRangeWithRestResponseAsync(shareName, filePath, range.toString(), FileRangeWriteType.UPDATE, length, data, null, null, context) .map(this::uploadResponse); } /** * Clear a range of bytes to specific of a file in storage file service. Clear operations performs an in-place * write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Clears the first 1024 bytes. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.clearRange * * <p>For more information, see the * <a href="https: * * @param length Specifies the number of bytes being cleared. * @return The {@link FileUploadInfo file upload info} */ public Mono<FileUploadInfo> clearRange(long length) { return clearRangeWithResponse(length, 0).flatMap(FluxUtil::toMono); } /** * Clear a range of bytes to specific of a file in storage file service. Clear operations performs an in-place * write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Clear the range starting from 1024 with length of 1024. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.clearRange * * <p>For more information, see the * <a href="https: * * @param length Specifies the number of bytes being cleared in the request body. * @param offset Optional starting point of the upload range. It will start from the beginning if it is {@code * null} * @return A response of {@link FileUploadInfo file upload info} that only contains headers and response status code */ public Mono<Response<FileUploadInfo>> clearRangeWithResponse(long length, long offset) { return withContext(context -> clearRangeWithResponse(length, offset, context)); } Mono<Response<FileUploadInfo>> clearRangeWithResponse(long length, long offset, Context context) { FileRange range = new FileRange(offset, offset + length - 1); return azureFileStorageClient.files().uploadRangeWithRestResponseAsync(shareName, filePath, range.toString(), FileRangeWriteType.CLEAR, 0L, null, null, null, context) .map(this::uploadResponse); } /** * Uploads file to storage file service. * * <p><strong>Code Samples</strong></p> * * <p> Upload the file from the source file path. </p> * * (@codesnippet com.azure.storage.file.fileAsyncClient.uploadFromFile
Should be `CLEAR`
public Mono<Response<FileUploadInfo>> clearRange(long length, long offset) { FileRange range = new FileRange(offset, offset + length - 1); return azureFileStorageClient.files().uploadRangeWithRestResponseAsync(shareName, filePath, range.toString(), FileRangeWriteType.UPDATE, 0, null, null, null, Context.NONE) .map(this::uploadResponse); }
return azureFileStorageClient.files().uploadRangeWithRestResponseAsync(shareName, filePath, range.toString(), FileRangeWriteType.UPDATE, 0, null, null, null, Context.NONE)
new FileRange(offset, offset + length - 1); return azureFileStorageClient.files().uploadRangeWithRestResponseAsync(shareName, filePath, range.toString(), FileRangeWriteType.UPDATE, length, data, null, null, context) .map(this::uploadResponse); } /** * Clear a range of bytes to specific of a file in storage file service. Clear operations performs an in-place * write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Clears the first 1024 bytes. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.clearRange
class FileAsyncClient { private final ClientLogger logger = new ClientLogger(FileAsyncClient.class); private static final long FILE_DEFAULT_BLOCK_SIZE = 4 * 1024 * 1024L; private static final long DOWNLOAD_UPLOAD_CHUNK_TIMEOUT = 300; private final AzureFileStorageImpl azureFileStorageClient; private final String shareName; private final String filePath; private final String snapshot; /** * Creates a FileAsyncClient that sends requests to the storage file at {@link AzureFileStorageImpl * endpoint}. Each service call goes through the {@link HttpPipeline pipeline} in the {@code client}. * * @param azureFileStorageClient Client that interacts with the service interfaces * @param shareName Name of the share * @param filePath Path to the file * @param snapshot The snapshot of the share */ FileAsyncClient(AzureFileStorageImpl azureFileStorageClient, String shareName, String filePath, String snapshot) { this.shareName = shareName; this.filePath = filePath; this.snapshot = snapshot; this.azureFileStorageClient = azureFileStorageClient; } /** * Creates a FileAsyncClient that sends requests to the storage account at {@code endpoint}. Each service call goes * through the {@code httpPipeline}. * * @param endpoint URL for the Storage File service * @param httpPipeline HttpPipeline that HTTP requests and response flow through * @param shareName Name of the share * @param filePath Path to the file * @param snapshot Optional snapshot of the share */ FileAsyncClient(URL endpoint, HttpPipeline httpPipeline, String shareName, String filePath, String snapshot) { this.shareName = shareName; this.filePath = filePath; this.snapshot = snapshot; this.azureFileStorageClient = new AzureFileStorageBuilder().pipeline(httpPipeline) .url(endpoint.toString()) .build(); } /** * Get the url of the storage file client. * * @return the URL of the storage file client * @throws RuntimeException If the file is using a malformed URL. */ public URL getFileUrl() { try { return new URL(azureFileStorageClient.getUrl()); } catch (MalformedURLException e) { throw logger.logExceptionAsError(new RuntimeException(String.format("Invalid URL on %s: %s" + getClass().getSimpleName(), azureFileStorageClient.getUrl()), e)); } } /** * Creates a file in the storage account and returns a response of {@link FileInfo} to interact with it. * * <p><strong>Code Samples</strong></p> * * <p>Create the file with size 1KB.</p> * * {@codesnippet com.azure.storage.file.fileClient.create} * * <p>For more information, see the * <a href="https: * * @param maxSize The maximum size in bytes for the file, up to 1 TiB. * @return A response containing the file info and the status of creating the file. * @throws StorageErrorException If the file has already existed, the parent directory does not exist or fileName is * an invalid resource name. */ public Mono<Response<FileInfo>> create(long maxSize) { return create(maxSize, null, null); } /** * Creates a file in the storage account and returns a response of FileInfo to interact with it. * * <p><strong>Code Samples</strong></p> * * <p>Create the file with length of 1024 bytes, some headers and metadata.</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.create * * <p>For more information, see the * <a href="https: * * @param maxSize The maximum size in bytes for the file, up to 1 TiB. * @param httpHeaders Additional parameters for the operation. * @param metadata Optional name-value pairs associated with the file as metadata. Metadata names must adhere to the * naming rules. * @return A response containing the directory info and the status of creating the directory. * @throws StorageErrorException If the directory has already existed, the parent directory does not exist or * directory is an invalid resource name. * @see <a href="https: */ public Mono<Response<FileInfo>> create(long maxSize, FileHTTPHeaders httpHeaders, Map<String, String> metadata) { String fileAttributes = "None"; String filePermission = "inherit"; String fileCreationTime = "now"; String fileLastWriteTime = "now"; return azureFileStorageClient.files().createWithRestResponseAsync(shareName, filePath, maxSize, fileAttributes, fileCreationTime, fileLastWriteTime, null, metadata, filePermission, null, httpHeaders, Context.NONE) .map(this::createFileInfoResponse); } /** * Copies a blob or file to a destination file within the storage account. * * <p><strong>Code Samples</strong></p> * * <p>Copy file from source url to the {@code filePath} </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.startCopy * * <p>For more information, see the * <a href="https: * * @param sourceUrl Specifies the URL of the source file or blob, up to 2 KB in length. * @param metadata Optional name-value pairs associated with the file as metadata. Metadata names must adhere to the * naming rules. * @return A response containing the file copy info and the status of copying the file. * @see <a href="https: */ public Mono<Response<FileCopyInfo>> startCopy(String sourceUrl, Map<String, String> metadata) { return azureFileStorageClient.files().startCopyWithRestResponseAsync(shareName, filePath, sourceUrl, null, metadata, Context.NONE) .map(this::startCopyResponse); } /** * Aborts a pending Copy File operation, and leaves a destination file with zero length and full metadata. * * <p><strong>Code Samples</strong></p> * * <p>Abort copy file from copy id("someCopyId") </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.abortCopy * * <p>For more information, see the * <a href="https: * * @param copyId Specifies the copy id which has copying pending status associate with it. * @return A response containing the status of aborting copy the file. */ public Mono<VoidResponse> abortCopy(String copyId) { return azureFileStorageClient.files().abortCopyWithRestResponseAsync(shareName, filePath, copyId, Context.NONE) .map(VoidResponse::new); } /** * Downloads a file from the system, including its metadata and properties * * <p><strong>Code Samples</strong></p> * * <p>Download the file to current folder. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.downloadToFile * * <p>For more information, see the * <a href="https: * * @param downloadFilePath The path where store the downloaded file * @return An empty response. */ public Mono<Void> downloadToFile(String downloadFilePath) { return downloadToFile(downloadFilePath, null); } /** * Downloads a file from the system, including its metadata and properties * * <p><strong>Code Samples</strong></p> * * <p>Download the file from 1024 to 2048 bytes to current folder. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.downloadToFile * * <p>For more information, see the * <a href="https: * * @param downloadFilePath The path where store the downloaded file * @param range Optional byte range which returns file data only from the specified range. * @return An empty response. */ public Mono<Void> downloadToFile(String downloadFilePath, FileRange range) { AsynchronousFileChannel channel = channelSetup(downloadFilePath); return sliceFileRange(range) .flatMap(chunk -> downloadWithProperties(chunk, false) .map(dar -> dar.value().body()) .subscribeOn(Schedulers.elastic()) .flatMap(fbb -> FluxUtil.bytebufStreamToFile(fbb, channel, chunk.start() - (range == null ? 0 : range.start())) .subscribeOn(Schedulers.elastic()) .timeout(Duration.ofSeconds(DOWNLOAD_UPLOAD_CHUNK_TIMEOUT)) .retry(3, throwable -> throwable instanceof IOException || throwable instanceof TimeoutException))) .then() .doOnTerminate(() -> channelCleanUp(channel)); } private AsynchronousFileChannel channelSetup(String filePath) { try { return AsynchronousFileChannel.open(Paths.get(filePath), StandardOpenOption.READ, StandardOpenOption.WRITE); } catch (IOException e) { throw logger.logExceptionAsError(new UncheckedIOException(e)); } } private void channelCleanUp(AsynchronousFileChannel channel) { try { channel.close(); } catch (IOException e) { throw logger.logExceptionAsError(new UncheckedIOException(e)); } } private Flux<FileRange> sliceFileRange(FileRange fileRange) { long offset = fileRange == null ? 0L : fileRange.start(); Mono<Long> end; if (fileRange != null) { end = Mono.just(fileRange.end()); } else { end = Mono.empty(); } end = end.switchIfEmpty(getProperties().map(rb -> rb.value().contentLength())); return end .map(e -> { List<FileRange> chunks = new ArrayList<>(); for (long pos = offset; pos < e; pos += FILE_DEFAULT_BLOCK_SIZE) { long count = FILE_DEFAULT_BLOCK_SIZE; if (pos + count > e) { count = e - pos; } chunks.add(new FileRange(pos, pos + count - 1)); } return chunks; }) .flatMapMany(Flux::fromIterable); } /** * Downloads a file from the system, including its metadata and properties * * <p><strong>Code Samples</strong></p> * * <p>Download the file with its metadata and properties. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.downloadWithProperties} * * <p>For more information, see the * <a href="https: * * @return A response that only contains headers and response status code */ public Mono<Response<FileDownloadInfo>> downloadWithProperties() { return downloadWithProperties(null, null); } /** * Downloads a file from the system, including its metadata and properties * * <p><strong>Code Samples</strong></p> * * <p>Download the file from 1024 to 2048 bytes with its metadata and properties and without the contentMD5. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.downloadWithProperties * * <p>For more information, see the * <a href="https: * * @param range Optional byte range which returns file data only from the specified range. * @param rangeGetContentMD5 Optional boolean which the service returns the MD5 hash for the range when it sets to * true, as long as the range is less than or equal to 4 MB in size. * @return A response that only contains headers and response status code */ public Mono<Response<FileDownloadInfo>> downloadWithProperties(FileRange range, Boolean rangeGetContentMD5) { String rangeString = range == null ? null : range.toString(); return azureFileStorageClient.files().downloadWithRestResponseAsync(shareName, filePath, null, rangeString, rangeGetContentMD5, Context.NONE) .map(this::downloadWithPropertiesResponse); } /** * Deletes the file associate with the client. * * <p><strong>Code Samples</strong></p> * * <p>Delete the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.delete} * * <p>For more information, see the * <a href="https: * * @return A response that only contains headers and response status code * @throws StorageErrorException If the directory doesn't exist or the file doesn't exist. */ public Mono<VoidResponse> delete() { return azureFileStorageClient.files().deleteWithRestResponseAsync(shareName, filePath, Context.NONE) .map(VoidResponse::new); } /** * Retrieves the properties of the storage account's file. The properties includes file metadata, last modified * date, is server encrypted, and eTag. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve file properties</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.getProperties} * * <p>For more information, see the * <a href="https: * * @return Storage file properties */ public Mono<Response<FileProperties>> getProperties() { return azureFileStorageClient.files().getPropertiesWithRestResponseAsync(shareName, filePath, snapshot, null, Context.NONE) .map(this::getPropertiesResponse); } /** * Sets the user-defined httpHeaders to associate to the file. * * <p>If {@code null} is passed for the httpHeaders it will clear the httpHeaders associated to the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the httpHeaders of contentType of "text/plain"</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setHttpHeaders * * <p>Clear the metadata of the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setHttpHeaders * * <p>For more information, see the * <a href="https: * * @param newFileSize New file size of the file * @param httpHeaders Resizes a file to the specified size. If the specified byte value is less than the current * size of the file, then all ranges above the specified byte value are cleared. * @return Response of the information about the file * @throws IllegalArgumentException thrown if parameters fail the validation. */ public Mono<Response<FileInfo>> setHttpHeaders(long newFileSize, FileHTTPHeaders httpHeaders) { String fileAttributes = "None"; String filePermission = "inherit"; String fileCreationTime = "preserve"; String fileLastWriteTime = "preserve"; return azureFileStorageClient.files().setHTTPHeadersWithRestResponseAsync(shareName, filePath, fileAttributes, fileCreationTime, fileLastWriteTime, null, newFileSize, filePermission, null, httpHeaders, Context.NONE) .map(this::setHttpHeadersResponse); } /** * Sets the user-defined metadata to associate to the file. * * <p>If {@code null} is passed for the metadata it will clear the metadata associated to the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the metadata to "file:updatedMetadata"</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setMetadata * * <p>Clear the metadata of the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setMetadata * * <p>For more information, see the * <a href="https: * * @param metadata Options.Metadata to set on the file, if null is passed the metadata for the file is cleared * @return information about the file * @throws StorageErrorException If the file doesn't exist or the metadata contains invalid keys */ public Mono<Response<FileMetadataInfo>> setMetadata(Map<String, String> metadata) { return azureFileStorageClient.files().setMetadataWithRestResponseAsync(shareName, filePath, null, metadata, Context.NONE) .map(this::setMetadataResponse); } /** * Uploads a range of bytes to the beginning of a file in storage file service. Upload operations performs an * in-place write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Upload "default" to the file in Storage File Service. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.upload * * <p>For more information, see the * <a href="https: * * @param data The data which will upload to the storage file. * @param length Specifies the number of bytes being transmitted in the request body. * @return A response that only contains headers and response status code * @throws StorageErrorException If you attempt to upload a range that is larger than 4 MB, the service returns * status code 413 (Request Entity Too Large) */ public Mono<Response<FileUploadInfo>> upload(Flux<ByteBuf> data, long length) { FileRange range = new FileRange(0, length - 1); return azureFileStorageClient.files().uploadRangeWithRestResponseAsync(shareName, filePath, range.toString(), FileRangeWriteType.UPDATE, length, data, null, null, Context.NONE) .map(this::uploadResponse); } /** * Uploads a range of bytes to specific of a file in storage file service. Upload operations performs an in-place * write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Upload date "default" starting from 1024 bytes. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.upload * * <p>For more information, see the * <a href="https: * * @param data The data which will upload to the storage file. * @param offset Optional starting point of the upload range. It will start from the beginning if it is {@code * null} * @param length Specifies the number of bytes being transmitted in the request body. * @return A response that only contains headers and response status code * @throws StorageErrorException If you attempt to upload a range that is larger than 4 MB, the service returns * status code 413 (Request Entity Too Large) */ public Mono<Response<FileUploadInfo>> upload(Flux<ByteBuf> data, long length, long offset) { FileRange range = * * <p>For more information, see the * <a href="https: * * @param length Specifies the number of bytes being cleared in the request body. * @return A response that only contains headers and response status code * @throws StorageErrorException If you attempt to clear a range that is larger than 4 MB, the service returns * status code 413 (Request Entity Too Large) */ public Mono<Response<FileUploadInfo>> clearRange(long length) { FileRange range = new FileRange(0, length - 1); return azureFileStorageClient.files().uploadRangeWithRestResponseAsync(shareName, filePath, range.toString(), FileRangeWriteType.UPDATE, 0, null, null, null, Context.NONE) .map(this::uploadResponse); } /** * Clear a range of bytes to specific of a file in storage file service. Clear operations performs an in-place * write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Clear the range starting from 1024 with length of 1024. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.clearRange * * <p>For more information, see the * <a href="https: * * @param length Specifies the number of bytes being cleared in the request body. * @param offset Optional starting point of the upload range. It will start from the beginning if it is {@code * null} * @return A response that only contains headers and response status code * @throws StorageErrorException If you attempt to clear a range that is larger than 4 MB, the service returns * status code 413 (Request Entity Too Large) */ public Mono<Response<FileUploadInfo>> clearRange(long length, long offset) { FileRange range = new FileRange(offset, offset + length - 1); return azureFileStorageClient.files().uploadRangeWithRestResponseAsync(shareName, filePath, range.toString(), FileRangeWriteType.UPDATE, 0, null, null, null, Context.NONE) .map(this::uploadResponse); } /** * Uploads file to storage file service. * * <p><strong>Code Samples</strong></p> * * <p> Upload the file from the source file path. </p> * * (@codesnippet com.azure.storage.file.fileAsyncClient.uploadFromFile
class FileAsyncClient { private final ClientLogger logger = new ClientLogger(FileAsyncClient.class); private static final long FILE_DEFAULT_BLOCK_SIZE = 4 * 1024 * 1024L; private static final long DOWNLOAD_UPLOAD_CHUNK_TIMEOUT = 300; private final AzureFileStorageImpl azureFileStorageClient; private final String shareName; private final String filePath; private final String snapshot; /** * Creates a FileAsyncClient that sends requests to the storage file at {@link AzureFileStorageImpl * endpoint}. Each service call goes through the {@link HttpPipeline pipeline} in the {@code client}. * * @param azureFileStorageClient Client that interacts with the service interfaces * @param shareName Name of the share * @param filePath Path to the file * @param snapshot The snapshot of the share */ FileAsyncClient(AzureFileStorageImpl azureFileStorageClient, String shareName, String filePath, String snapshot) { Objects.requireNonNull(shareName); Objects.requireNonNull(filePath); this.shareName = shareName; this.filePath = filePath; this.snapshot = snapshot; this.azureFileStorageClient = azureFileStorageClient; } /** * Creates a FileAsyncClient that sends requests to the storage account at {@code endpoint}. Each service call goes * through the {@code httpPipeline}. * * @param endpoint URL for the Storage File service * @param httpPipeline HttpPipeline that HTTP requests and response flow through * @param shareName Name of the share * @param filePath Path to the file * @param snapshot Optional snapshot of the share */ FileAsyncClient(URL endpoint, HttpPipeline httpPipeline, String shareName, String filePath, String snapshot) { Objects.requireNonNull(shareName); Objects.requireNonNull(filePath); this.shareName = shareName; this.filePath = filePath; this.snapshot = snapshot; this.azureFileStorageClient = new AzureFileStorageBuilder().pipeline(httpPipeline) .url(endpoint.toString()) .build(); } /** * Get the url of the storage file client. * * @return the URL of the storage file client * @throws RuntimeException If the file is using a malformed URL. */ public URL getFileUrl() { try { return new URL(azureFileStorageClient.getUrl()); } catch (MalformedURLException e) { throw logger.logExceptionAsError(new RuntimeException(String.format("Invalid URL on %s: %s" + getClass().getSimpleName(), azureFileStorageClient.getUrl()), e)); } } /** * Creates a file in the storage account and returns a response of {@link FileInfo} to interact with it. * * <p><strong>Code Samples</strong></p> * * <p>Create the file with size 1KB.</p> * * {@codesnippet com.azure.storage.file.fileClient.create} * * <p>For more information, see the * <a href="https: * * @param maxSize The maximum size in bytes for the file, up to 1 TiB. * @return A response containing the file info and the status of creating the file. * @throws StorageErrorException If the file has already existed, the parent directory does not exist or fileName is * an invalid resource name. */ public Mono<FileInfo> create(long maxSize) { return createWithResponse(maxSize, null, null).flatMap(FluxUtil::toMono); } /** * Creates a file in the storage account and returns a response of FileInfo to interact with it. * * <p><strong>Code Samples</strong></p> * * <p>Create the file with length of 1024 bytes, some headers and metadata.</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.createWithResponse * * <p>For more information, see the * <a href="https: * * @param maxSize The maximum size in bytes for the file, up to 1 TiB. * @param httpHeaders Additional parameters for the operation. * @param metadata Optional name-value pairs associated with the file as metadata. Metadata names must adhere to the naming rules. * @return A response containing the {@link FileInfo file info} and the status of creating the file. * @throws StorageErrorException If the directory has already existed, the parent directory does not exist or directory is an invalid resource name. */ public Mono<Response<FileInfo>> createWithResponse(long maxSize, FileHTTPHeaders httpHeaders, Map<String, String> metadata) { return withContext(context -> createWithResponse(maxSize, httpHeaders, metadata, context)); } Mono<Response<FileInfo>> createWithResponse(long maxSize, FileHTTPHeaders httpHeaders, Map<String, String> metadata, Context context) { String fileAttributes = "None"; String filePermission = "inherit"; String fileCreationTime = "now"; String fileLastWriteTime = "now"; return azureFileStorageClient.files().createWithRestResponseAsync(shareName, filePath, maxSize, fileAttributes, fileCreationTime, fileLastWriteTime, null, metadata, filePermission, null, httpHeaders, context) .map(this::createFileInfoResponse); } /** * Copies a blob or file to a destination file within the storage account. * * <p><strong>Code Samples</strong></p> * * <p>Copy file from source url to the {@code filePath} </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.startCopy * * <p>For more information, see the * <a href="https: * * @param sourceUrl Specifies the URL of the source file or blob, up to 2 KB in length. * @param metadata Optional name-value pairs associated with the file as metadata. Metadata names must adhere to the naming rules. * @return The {@link FileCopyInfo file copy info}. * @see <a href="https: */ public Mono<FileCopyInfo> startCopy(String sourceUrl, Map<String, String> metadata) { return startCopyWithResponse(sourceUrl, metadata).flatMap(FluxUtil::toMono); } /** * Copies a blob or file to a destination file within the storage account. * * <p><strong>Code Samples</strong></p> * * <p>Copy file from source url to the {@code filePath} </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.startCopyWithResponse * * <p>For more information, see the * <a href="https: * * @param sourceUrl Specifies the URL of the source file or blob, up to 2 KB in length. * @param metadata Optional name-value pairs associated with the file as metadata. Metadata names must adhere to the naming rules. * @return A response containing the {@link FileCopyInfo file copy info} and the status of copying the file. * @see <a href="https: */ public Mono<Response<FileCopyInfo>> startCopyWithResponse(String sourceUrl, Map<String, String> metadata) { return withContext(context -> startCopyWithResponse(sourceUrl, metadata, context)); } Mono<Response<FileCopyInfo>> startCopyWithResponse(String sourceUrl, Map<String, String> metadata, Context context) { return azureFileStorageClient.files().startCopyWithRestResponseAsync(shareName, filePath, sourceUrl, null, metadata, context) .map(this::startCopyResponse); } /** * Aborts a pending Copy File operation, and leaves a destination file with zero length and full metadata. * * <p><strong>Code Samples</strong></p> * * <p>Abort copy file from copy id("someCopyId") </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.abortCopy * * <p>For more information, see the * <a href="https: * * @param copyId Specifies the copy id which has copying pending status associate with it. * @return An empty response. */ public Mono<Void> abortCopy(String copyId) { return abortCopyWithResponse(copyId).flatMap(FluxUtil::toMono); } /** * Aborts a pending Copy File operation, and leaves a destination file with zero length and full metadata. * * <p><strong>Code Samples</strong></p> * * <p>Abort copy file from copy id("someCopyId") </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.abortCopyWithResponse * * <p>For more information, see the * <a href="https: * * @param copyId Specifies the copy id which has copying pending status associate with it. * @return A response containing the status of aborting copy the file. */ public Mono<VoidResponse> abortCopyWithResponse(String copyId) { return withContext(context -> abortCopyWithResponse(copyId, context)); } Mono<VoidResponse> abortCopyWithResponse(String copyId, Context context) { return azureFileStorageClient.files().abortCopyWithRestResponseAsync(shareName, filePath, copyId, context) .map(VoidResponse::new); } /** * Downloads a file from the system, including its metadata and properties * * <p><strong>Code Samples</strong></p> * * <p>Download the file to current folder. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.downloadToFile * * <p>For more information, see the * <a href="https: * * @param downloadFilePath The path where store the downloaded file * @return An empty response. */ public Mono<Void> downloadToFile(String downloadFilePath) { return downloadToFile(downloadFilePath, null); } /** * Downloads a file from the system, including its metadata and properties * * <p><strong>Code Samples</strong></p> * * <p>Download the file from 1024 to 2048 bytes to current folder. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.downloadToFile * * <p>For more information, see the * <a href="https: * * @param downloadFilePath The path where store the downloaded file * @param range Optional byte range which returns file data only from the specified range. * @return An empty response. */ public Mono<Void> downloadToFile(String downloadFilePath, FileRange range) { AsynchronousFileChannel channel = channelSetup(downloadFilePath); return sliceFileRange(range) .flatMap(chunk -> downloadWithPropertiesWithResponse(chunk, false) .map(dar -> dar.value().body()) .subscribeOn(Schedulers.elastic()) .flatMap(fbb -> FluxUtil.writeFile(fbb, channel, chunk.start() - (range == null ? 0 : range.start())) .subscribeOn(Schedulers.elastic()) .timeout(Duration.ofSeconds(DOWNLOAD_UPLOAD_CHUNK_TIMEOUT)) .retry(3, throwable -> throwable instanceof IOException || throwable instanceof TimeoutException))) .then() .doOnTerminate(() -> channelCleanUp(channel)); } private AsynchronousFileChannel channelSetup(String filePath) { try { return AsynchronousFileChannel.open(Paths.get(filePath), StandardOpenOption.READ, StandardOpenOption.WRITE); } catch (IOException e) { throw logger.logExceptionAsError(new UncheckedIOException(e)); } } private void channelCleanUp(AsynchronousFileChannel channel) { try { channel.close(); } catch (IOException e) { throw logger.logExceptionAsError(new UncheckedIOException(e)); } } private Flux<FileRange> sliceFileRange(FileRange fileRange) { long offset = fileRange == null ? 0L : fileRange.start(); Mono<Long> end; if (fileRange != null) { end = Mono.just(fileRange.end()); } else { end = Mono.empty(); } end = end.switchIfEmpty(getProperties().map(rb -> rb.contentLength())); return end .map(e -> { List<FileRange> chunks = new ArrayList<>(); for (long pos = offset; pos < e; pos += FILE_DEFAULT_BLOCK_SIZE) { long count = FILE_DEFAULT_BLOCK_SIZE; if (pos + count > e) { count = e - pos; } chunks.add(new FileRange(pos, pos + count - 1)); } return chunks; }) .flatMapMany(Flux::fromIterable); } /** * Downloads a file from the system, including its metadata and properties * * <p><strong>Code Samples</strong></p> * * <p>Download the file with its metadata and properties. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.downloadWithProperties} * * <p>For more information, see the * <a href="https: * * @return The {@link FileDownloadInfo file download Info} */ public Mono<FileDownloadInfo> downloadWithProperties() { return downloadWithPropertiesWithResponse(null, null).flatMap(FluxUtil::toMono); } /** * Downloads a file from the system, including its metadata and properties * * <p><strong>Code Samples</strong></p> * * <p>Download the file from 1024 to 2048 bytes with its metadata and properties and without the contentMD5. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.downloadWithPropertiesWithResponse * * <p>For more information, see the * <a href="https: * * @param range Optional byte range which returns file data only from the specified range. * @param rangeGetContentMD5 Optional boolean which the service returns the MD5 hash for the range when it sets * to true, as long as the range is less than or equal to 4 MB in size. * @return A response containing the {@link FileDownloadInfo file download Info} with headers and response status code */ public Mono<Response<FileDownloadInfo>> downloadWithPropertiesWithResponse(FileRange range, Boolean rangeGetContentMD5) { return withContext(context -> downloadWithPropertiesWithResponse(range, rangeGetContentMD5, context)); } Mono<Response<FileDownloadInfo>> downloadWithPropertiesWithResponse(FileRange range, Boolean rangeGetContentMD5, Context context) { String rangeString = range == null ? null : range.toString(); return azureFileStorageClient.files().downloadWithRestResponseAsync(shareName, filePath, null, rangeString, rangeGetContentMD5, context) .map(this::downloadWithPropertiesResponse); } /** * Deletes the file associate with the client. * * <p><strong>Code Samples</strong></p> * * <p>Delete the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.delete} * * <p>For more information, see the * <a href="https: * @return An empty response * @throws StorageErrorException If the directory doesn't exist or the file doesn't exist. */ public Mono<Void> delete() { return deleteWithResponse(null).flatMap(FluxUtil::toMono); } /** * Deletes the file associate with the client. * * <p><strong>Code Samples</strong></p> * * <p>Delete the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.deleteWithResponse} * * <p>For more information, see the * <a href="https: * * @return A response that only contains headers and response status code * @throws StorageErrorException If the directory doesn't exist or the file doesn't exist. */ public Mono<VoidResponse> deleteWithResponse() { return withContext(context -> deleteWithResponse(context)); } Mono<VoidResponse> deleteWithResponse(Context context) { return azureFileStorageClient.files().deleteWithRestResponseAsync(shareName, filePath, context) .map(VoidResponse::new); } /** * Retrieves the properties of the storage account's file. The properties includes file metadata, last modified * date, is server encrypted, and eTag. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve file properties</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.getProperties} * * <p>For more information, see the * <a href="https: * * @return {@link FileProperties Storage file properties} */ public Mono<FileProperties> getProperties() { return getPropertiesWithResponse().flatMap(FluxUtil::toMono); } /** * Retrieves the properties of the storage account's file. * The properties includes file metadata, last modified date, is server encrypted, and eTag. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve file properties</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.getPropertiesWithResponse} * * <p>For more information, see the * <a href="https: * * @return A response containing the {@link FileProperties storage file properties} and response status code */ public Mono<Response<FileProperties>> getPropertiesWithResponse() { return withContext(context -> getPropertiesWithResponse(context)); } Mono<Response<FileProperties>> getPropertiesWithResponse(Context context) { return azureFileStorageClient.files().getPropertiesWithRestResponseAsync(shareName, filePath, snapshot, null, context) .map(this::getPropertiesResponse); } /** * Sets the user-defined httpHeaders to associate to the file. * * <p>If {@code null} is passed for the httpHeaders it will clear the httpHeaders associated to the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the httpHeaders of contentType of "text/plain"</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setHttpHeaders * * <p>Clear the metadata of the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setHttpHeaders * * <p>For more information, see the * <a href="https: * * @param newFileSize New file size of the file * @param httpHeaders Resizes a file to the specified size. If the specified byte value is less than the current size of the file, then all ranges above the specified byte value are cleared. * @return The {@link FileInfo file info} * @throws IllegalArgumentException thrown if parameters fail the validation. */ public Mono<FileInfo> setHttpHeaders(long newFileSize, FileHTTPHeaders httpHeaders) { return setHttpHeadersWithResponse(newFileSize, httpHeaders).flatMap(FluxUtil::toMono); } /** * Sets the user-defined httpHeaders to associate to the file. * * <p>If {@code null} is passed for the httpHeaders it will clear the httpHeaders associated to the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the httpHeaders of contentType of "text/plain"</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setHttpHeadersWithResponse * * <p>Clear the metadata of the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setHttpHeadersWithResponse * * <p>For more information, see the * <a href="https: * * @param newFileSize New file size of the file * @param httpHeaders Resizes a file to the specified size. If the specified byte value is less than the current size of the file, then all ranges above the specified byte value are cleared. * @return Response containing the {@link FileInfo file info} and response status code * @throws IllegalArgumentException thrown if parameters fail the validation. */ public Mono<Response<FileInfo>> setHttpHeadersWithResponse(long newFileSize, FileHTTPHeaders httpHeaders) { return withContext(context -> setHttpHeadersWithResponse(newFileSize, httpHeaders, context)); } Mono<Response<FileInfo>> setHttpHeadersWithResponse(long newFileSize, FileHTTPHeaders httpHeaders, Context context) { String fileAttributes = "None"; String filePermission = "inherit"; String fileCreationTime = "preserve"; String fileLastWriteTime = "preserve"; return azureFileStorageClient.files().setHTTPHeadersWithRestResponseAsync(shareName, filePath, fileAttributes, fileCreationTime, fileLastWriteTime, null, newFileSize, filePermission, null, httpHeaders, context) .map(this::setHttpHeadersResponse); } /** * Sets the user-defined metadata to associate to the file. * * <p>If {@code null} is passed for the metadata it will clear the metadata associated to the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the metadata to "file:updatedMetadata"</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setMetadata * * <p>Clear the metadata of the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setMetadataWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata Options.Metadata to set on the file, if null is passed the metadata for the file is cleared * @return {@link FileMetadataInfo file meta info} * @throws StorageErrorException If the file doesn't exist or the metadata contains invalid keys */ public Mono<FileMetadataInfo> setMetadata(Map<String, String> metadata) { return setMetadataWithResponse(metadata).flatMap(FluxUtil::toMono); } /** * Sets the user-defined metadata to associate to the file. * * <p>If {@code null} is passed for the metadata it will clear the metadata associated to the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the metadata to "file:updatedMetadata"</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setMetadataWithResponse * * <p>Clear the metadata of the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setMetadataWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata Options.Metadata to set on the file, if null is passed the metadata for the file is cleared * @return A response containing the {@link FileMetadataInfo file meta info} and status code * @throws StorageErrorException If the file doesn't exist or the metadata contains invalid keys */ public Mono<Response<FileMetadataInfo>> setMetadataWithResponse(Map<String, String> metadata) { return withContext(context -> setMetadataWithResponse(metadata, context)); } Mono<Response<FileMetadataInfo>> setMetadataWithResponse(Map<String, String> metadata, Context context) { return azureFileStorageClient.files().setMetadataWithRestResponseAsync(shareName, filePath, null, metadata, context) .map(this::setMetadataResponse); } /** * Uploads a range of bytes to the beginning of a file in storage file service. Upload operations performs an * in-place write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Upload data "default" to the file in Storage File Service. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.upload * * <p>For more information, see the * <a href="https: * * @param data The data which will upload to the storage file. * @param length Specifies the number of bytes being transmitted in the request body. * @return A response that only contains headers and response status code */ public Mono<FileUploadInfo> upload(Flux<ByteBuffer> data, long length) { return uploadWithResponse(data, length).flatMap(FluxUtil::toMono); } /** * Uploads a range of bytes to the beginning of a file in storage file service. Upload operations performs an in-place write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Upload "default" to the file. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.uploadWithResponse * * <p>For more information, see the * <a href="https: * * @param data The data which will upload to the storage file. * @param length Specifies the number of bytes being transmitted in the request body. When the FileRangeWriteType is set to clear, the value of this header must be set to zero.. * @return A response containing the {@link FileUploadInfo file upload info} with headers and response status code * @throws StorageErrorException If you attempt to upload a range that is larger than 4 MB, the service returns status code 413 (Request Entity Too Large) */ public Mono<Response<FileUploadInfo>> uploadWithResponse(Flux<ByteBuffer> data, long length) { return withContext(context -> uploadWithResponse(data, length, context)); } Mono<Response<FileUploadInfo>> uploadWithResponse(Flux<ByteBuffer> data, long length, Context context) { FileRange range = new FileRange(0, length - 1); return azureFileStorageClient.files().uploadRangeWithRestResponseAsync(shareName, filePath, range.toString(), FileRangeWriteType.UPDATE, length, data, null, null, context) .map(this::uploadResponse); } /** * Uploads a range of bytes to specific of a file in storage file service. Upload operations performs an in-place * write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Upload data "default" starting from 1024 bytes. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.upload * * <p>For more information, see the * <a href="https: * * @param data The data which will upload to the storage file. * @param length Specifies the number of bytes being transmitted in the request body. * @param offset Optional starting point of the upload range. It will start from the beginning if it is {@code * null} * @return The {@link FileUploadInfo file upload info} * @throws StorageErrorException If you attempt to upload a range that is larger than 4 MB, the service returns status code 413 (Request Entity Too Large) */ public Mono<FileUploadInfo> upload(Flux<ByteBuffer> data, long length, long offset) { return uploadWithResponse(data, length, offset).flatMap(FluxUtil::toMono); } /** * Uploads a range of bytes to specific of a file in storage file service. Upload operations performs an in-place write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Upload the file from 1024 to 2048 bytes with its metadata and properties and without the contentMD5. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.uploadWithResponse * * <p>For more information, see the * <a href="https: * * @param data The data which will upload to the storage file. * @param offset Optional starting point of the upload range. It will start from the beginning if it is {@code null} * @param length Specifies the number of bytes being transmitted in the request body. When the FileRangeWriteType is set to clear, the value of this header must be set to zero. * @return A response containing the {@link FileUploadInfo file upload info} with headers and response status code * @throws StorageErrorException If you attempt to upload a range that is larger than 4 MB, the service returns status code 413 (Request Entity Too Large) */ public Mono<Response<FileUploadInfo>> uploadWithResponse(Flux<ByteBuffer> data, long length, long offset) { return withContext(context -> uploadWithResponse(data, length, offset, context)); } Mono<Response<FileUploadInfo>> uploadWithResponse(Flux<ByteBuffer> data, long length, long offset, Context context) { FileRange range = * * <p>For more information, see the * <a href="https: * * @param length Specifies the number of bytes being cleared. * @return The {@link FileUploadInfo file upload info} */ public Mono<FileUploadInfo> clearRange(long length) { return clearRangeWithResponse(length, 0).flatMap(FluxUtil::toMono); } /** * Clear a range of bytes to specific of a file in storage file service. Clear operations performs an in-place * write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Clear the range starting from 1024 with length of 1024. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.clearRange * * <p>For more information, see the * <a href="https: * * @param length Specifies the number of bytes being cleared in the request body. * @param offset Optional starting point of the upload range. It will start from the beginning if it is {@code * null} * @return A response of {@link FileUploadInfo file upload info} that only contains headers and response status code */ public Mono<Response<FileUploadInfo>> clearRangeWithResponse(long length, long offset) { return withContext(context -> clearRangeWithResponse(length, offset, context)); } Mono<Response<FileUploadInfo>> clearRangeWithResponse(long length, long offset, Context context) { FileRange range = new FileRange(offset, offset + length - 1); return azureFileStorageClient.files().uploadRangeWithRestResponseAsync(shareName, filePath, range.toString(), FileRangeWriteType.CLEAR, 0L, null, null, null, context) .map(this::uploadResponse); } /** * Uploads file to storage file service. * * <p><strong>Code Samples</strong></p> * * <p> Upload the file from the source file path. </p> * * (@codesnippet com.azure.storage.file.fileAsyncClient.uploadFromFile
Done.
public Mono<Response<FileUploadInfo>> clearRange(long length) { FileRange range = new FileRange(0, length - 1); return azureFileStorageClient.files().uploadRangeWithRestResponseAsync(shareName, filePath, range.toString(), FileRangeWriteType.UPDATE, 0, null, null, null, Context.NONE) .map(this::uploadResponse); }
return azureFileStorageClient.files().uploadRangeWithRestResponseAsync(shareName, filePath, range.toString(), FileRangeWriteType.UPDATE, 0, null, null, null, Context.NONE)
new FileRange(0, length - 1); return azureFileStorageClient.files().uploadRangeWithRestResponseAsync(shareName, filePath, range.toString(), FileRangeWriteType.UPDATE, length, data, null, null, context) .map(this::uploadResponse); } /** * Uploads a range of bytes to specific of a file in storage file service. Upload operations performs an in-place * write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Upload data "default" starting from 1024 bytes. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.upload
class FileAsyncClient { private final ClientLogger logger = new ClientLogger(FileAsyncClient.class); private static final long FILE_DEFAULT_BLOCK_SIZE = 4 * 1024 * 1024L; private static final long DOWNLOAD_UPLOAD_CHUNK_TIMEOUT = 300; private final AzureFileStorageImpl azureFileStorageClient; private final String shareName; private final String filePath; private final String snapshot; /** * Creates a FileAsyncClient that sends requests to the storage file at {@link AzureFileStorageImpl * endpoint}. Each service call goes through the {@link HttpPipeline pipeline} in the {@code client}. * * @param azureFileStorageClient Client that interacts with the service interfaces * @param shareName Name of the share * @param filePath Path to the file * @param snapshot The snapshot of the share */ FileAsyncClient(AzureFileStorageImpl azureFileStorageClient, String shareName, String filePath, String snapshot) { this.shareName = shareName; this.filePath = filePath; this.snapshot = snapshot; this.azureFileStorageClient = azureFileStorageClient; } /** * Creates a FileAsyncClient that sends requests to the storage account at {@code endpoint}. Each service call goes * through the {@code httpPipeline}. * * @param endpoint URL for the Storage File service * @param httpPipeline HttpPipeline that HTTP requests and response flow through * @param shareName Name of the share * @param filePath Path to the file * @param snapshot Optional snapshot of the share */ FileAsyncClient(URL endpoint, HttpPipeline httpPipeline, String shareName, String filePath, String snapshot) { this.shareName = shareName; this.filePath = filePath; this.snapshot = snapshot; this.azureFileStorageClient = new AzureFileStorageBuilder().pipeline(httpPipeline) .url(endpoint.toString()) .build(); } /** * Get the url of the storage file client. * * @return the URL of the storage file client * @throws RuntimeException If the file is using a malformed URL. */ public URL getFileUrl() { try { return new URL(azureFileStorageClient.getUrl()); } catch (MalformedURLException e) { throw logger.logExceptionAsError(new RuntimeException(String.format("Invalid URL on %s: %s" + getClass().getSimpleName(), azureFileStorageClient.getUrl()), e)); } } /** * Creates a file in the storage account and returns a response of {@link FileInfo} to interact with it. * * <p><strong>Code Samples</strong></p> * * <p>Create the file with size 1KB.</p> * * {@codesnippet com.azure.storage.file.fileClient.create} * * <p>For more information, see the * <a href="https: * * @param maxSize The maximum size in bytes for the file, up to 1 TiB. * @return A response containing the file info and the status of creating the file. * @throws StorageErrorException If the file has already existed, the parent directory does not exist or fileName is * an invalid resource name. */ public Mono<Response<FileInfo>> create(long maxSize) { return create(maxSize, null, null); } /** * Creates a file in the storage account and returns a response of FileInfo to interact with it. * * <p><strong>Code Samples</strong></p> * * <p>Create the file with length of 1024 bytes, some headers and metadata.</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.create * * <p>For more information, see the * <a href="https: * * @param maxSize The maximum size in bytes for the file, up to 1 TiB. * @param httpHeaders Additional parameters for the operation. * @param metadata Optional name-value pairs associated with the file as metadata. Metadata names must adhere to the * naming rules. * @return A response containing the directory info and the status of creating the directory. * @throws StorageErrorException If the directory has already existed, the parent directory does not exist or * directory is an invalid resource name. * @see <a href="https: */ public Mono<Response<FileInfo>> create(long maxSize, FileHTTPHeaders httpHeaders, Map<String, String> metadata) { String fileAttributes = "None"; String filePermission = "inherit"; String fileCreationTime = "now"; String fileLastWriteTime = "now"; return azureFileStorageClient.files().createWithRestResponseAsync(shareName, filePath, maxSize, fileAttributes, fileCreationTime, fileLastWriteTime, null, metadata, filePermission, null, httpHeaders, Context.NONE) .map(this::createFileInfoResponse); } /** * Copies a blob or file to a destination file within the storage account. * * <p><strong>Code Samples</strong></p> * * <p>Copy file from source url to the {@code filePath} </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.startCopy * * <p>For more information, see the * <a href="https: * * @param sourceUrl Specifies the URL of the source file or blob, up to 2 KB in length. * @param metadata Optional name-value pairs associated with the file as metadata. Metadata names must adhere to the * naming rules. * @return A response containing the file copy info and the status of copying the file. * @see <a href="https: */ public Mono<Response<FileCopyInfo>> startCopy(String sourceUrl, Map<String, String> metadata) { return azureFileStorageClient.files().startCopyWithRestResponseAsync(shareName, filePath, sourceUrl, null, metadata, Context.NONE) .map(this::startCopyResponse); } /** * Aborts a pending Copy File operation, and leaves a destination file with zero length and full metadata. * * <p><strong>Code Samples</strong></p> * * <p>Abort copy file from copy id("someCopyId") </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.abortCopy * * <p>For more information, see the * <a href="https: * * @param copyId Specifies the copy id which has copying pending status associate with it. * @return A response containing the status of aborting copy the file. */ public Mono<VoidResponse> abortCopy(String copyId) { return azureFileStorageClient.files().abortCopyWithRestResponseAsync(shareName, filePath, copyId, Context.NONE) .map(VoidResponse::new); } /** * Downloads a file from the system, including its metadata and properties * * <p><strong>Code Samples</strong></p> * * <p>Download the file to current folder. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.downloadToFile * * <p>For more information, see the * <a href="https: * * @param downloadFilePath The path where store the downloaded file * @return An empty response. */ public Mono<Void> downloadToFile(String downloadFilePath) { return downloadToFile(downloadFilePath, null); } /** * Downloads a file from the system, including its metadata and properties * * <p><strong>Code Samples</strong></p> * * <p>Download the file from 1024 to 2048 bytes to current folder. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.downloadToFile * * <p>For more information, see the * <a href="https: * * @param downloadFilePath The path where store the downloaded file * @param range Optional byte range which returns file data only from the specified range. * @return An empty response. */ public Mono<Void> downloadToFile(String downloadFilePath, FileRange range) { AsynchronousFileChannel channel = channelSetup(downloadFilePath); return sliceFileRange(range) .flatMap(chunk -> downloadWithProperties(chunk, false) .map(dar -> dar.value().body()) .subscribeOn(Schedulers.elastic()) .flatMap(fbb -> FluxUtil.bytebufStreamToFile(fbb, channel, chunk.start() - (range == null ? 0 : range.start())) .subscribeOn(Schedulers.elastic()) .timeout(Duration.ofSeconds(DOWNLOAD_UPLOAD_CHUNK_TIMEOUT)) .retry(3, throwable -> throwable instanceof IOException || throwable instanceof TimeoutException))) .then() .doOnTerminate(() -> channelCleanUp(channel)); } private AsynchronousFileChannel channelSetup(String filePath) { try { return AsynchronousFileChannel.open(Paths.get(filePath), StandardOpenOption.READ, StandardOpenOption.WRITE); } catch (IOException e) { throw logger.logExceptionAsError(new UncheckedIOException(e)); } } private void channelCleanUp(AsynchronousFileChannel channel) { try { channel.close(); } catch (IOException e) { throw logger.logExceptionAsError(new UncheckedIOException(e)); } } private Flux<FileRange> sliceFileRange(FileRange fileRange) { long offset = fileRange == null ? 0L : fileRange.start(); Mono<Long> end; if (fileRange != null) { end = Mono.just(fileRange.end()); } else { end = Mono.empty(); } end = end.switchIfEmpty(getProperties().map(rb -> rb.value().contentLength())); return end .map(e -> { List<FileRange> chunks = new ArrayList<>(); for (long pos = offset; pos < e; pos += FILE_DEFAULT_BLOCK_SIZE) { long count = FILE_DEFAULT_BLOCK_SIZE; if (pos + count > e) { count = e - pos; } chunks.add(new FileRange(pos, pos + count - 1)); } return chunks; }) .flatMapMany(Flux::fromIterable); } /** * Downloads a file from the system, including its metadata and properties * * <p><strong>Code Samples</strong></p> * * <p>Download the file with its metadata and properties. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.downloadWithProperties} * * <p>For more information, see the * <a href="https: * * @return A response that only contains headers and response status code */ public Mono<Response<FileDownloadInfo>> downloadWithProperties() { return downloadWithProperties(null, null); } /** * Downloads a file from the system, including its metadata and properties * * <p><strong>Code Samples</strong></p> * * <p>Download the file from 1024 to 2048 bytes with its metadata and properties and without the contentMD5. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.downloadWithProperties * * <p>For more information, see the * <a href="https: * * @param range Optional byte range which returns file data only from the specified range. * @param rangeGetContentMD5 Optional boolean which the service returns the MD5 hash for the range when it sets to * true, as long as the range is less than or equal to 4 MB in size. * @return A response that only contains headers and response status code */ public Mono<Response<FileDownloadInfo>> downloadWithProperties(FileRange range, Boolean rangeGetContentMD5) { String rangeString = range == null ? null : range.toString(); return azureFileStorageClient.files().downloadWithRestResponseAsync(shareName, filePath, null, rangeString, rangeGetContentMD5, Context.NONE) .map(this::downloadWithPropertiesResponse); } /** * Deletes the file associate with the client. * * <p><strong>Code Samples</strong></p> * * <p>Delete the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.delete} * * <p>For more information, see the * <a href="https: * * @return A response that only contains headers and response status code * @throws StorageErrorException If the directory doesn't exist or the file doesn't exist. */ public Mono<VoidResponse> delete() { return azureFileStorageClient.files().deleteWithRestResponseAsync(shareName, filePath, Context.NONE) .map(VoidResponse::new); } /** * Retrieves the properties of the storage account's file. The properties includes file metadata, last modified * date, is server encrypted, and eTag. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve file properties</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.getProperties} * * <p>For more information, see the * <a href="https: * * @return Storage file properties */ public Mono<Response<FileProperties>> getProperties() { return azureFileStorageClient.files().getPropertiesWithRestResponseAsync(shareName, filePath, snapshot, null, Context.NONE) .map(this::getPropertiesResponse); } /** * Sets the user-defined httpHeaders to associate to the file. * * <p>If {@code null} is passed for the httpHeaders it will clear the httpHeaders associated to the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the httpHeaders of contentType of "text/plain"</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setHttpHeaders * * <p>Clear the metadata of the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setHttpHeaders * * <p>For more information, see the * <a href="https: * * @param newFileSize New file size of the file * @param httpHeaders Resizes a file to the specified size. If the specified byte value is less than the current * size of the file, then all ranges above the specified byte value are cleared. * @return Response of the information about the file * @throws IllegalArgumentException thrown if parameters fail the validation. */ public Mono<Response<FileInfo>> setHttpHeaders(long newFileSize, FileHTTPHeaders httpHeaders) { String fileAttributes = "None"; String filePermission = "inherit"; String fileCreationTime = "preserve"; String fileLastWriteTime = "preserve"; return azureFileStorageClient.files().setHTTPHeadersWithRestResponseAsync(shareName, filePath, fileAttributes, fileCreationTime, fileLastWriteTime, null, newFileSize, filePermission, null, httpHeaders, Context.NONE) .map(this::setHttpHeadersResponse); } /** * Sets the user-defined metadata to associate to the file. * * <p>If {@code null} is passed for the metadata it will clear the metadata associated to the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the metadata to "file:updatedMetadata"</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setMetadata * * <p>Clear the metadata of the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setMetadata * * <p>For more information, see the * <a href="https: * * @param metadata Options.Metadata to set on the file, if null is passed the metadata for the file is cleared * @return information about the file * @throws StorageErrorException If the file doesn't exist or the metadata contains invalid keys */ public Mono<Response<FileMetadataInfo>> setMetadata(Map<String, String> metadata) { return azureFileStorageClient.files().setMetadataWithRestResponseAsync(shareName, filePath, null, metadata, Context.NONE) .map(this::setMetadataResponse); } /** * Uploads a range of bytes to the beginning of a file in storage file service. Upload operations performs an * in-place write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Upload "default" to the file in Storage File Service. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.upload * * <p>For more information, see the * <a href="https: * * @param data The data which will upload to the storage file. * @param length Specifies the number of bytes being transmitted in the request body. * @return A response that only contains headers and response status code * @throws StorageErrorException If you attempt to upload a range that is larger than 4 MB, the service returns * status code 413 (Request Entity Too Large) */ public Mono<Response<FileUploadInfo>> upload(Flux<ByteBuf> data, long length) { FileRange range = * * <p>For more information, see the * <a href="https: * * @param data The data which will upload to the storage file. * @param offset Optional starting point of the upload range. It will start from the beginning if it is {@code * null} * @param length Specifies the number of bytes being transmitted in the request body. * @return A response that only contains headers and response status code * @throws StorageErrorException If you attempt to upload a range that is larger than 4 MB, the service returns * status code 413 (Request Entity Too Large) */ public Mono<Response<FileUploadInfo>> upload(Flux<ByteBuf> data, long length, long offset) { FileRange range = new FileRange(offset, offset + length - 1); return azureFileStorageClient.files().uploadRangeWithRestResponseAsync(shareName, filePath, range.toString(), FileRangeWriteType.UPDATE, length, data, null, null, Context.NONE) .map(this::uploadResponse); } /** * Clear a range of bytes to specific of a file in storage file service. Clear operations performs an in-place * write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Clear the range from 0 bytes with length of 1024. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.clearRange * * <p>For more information, see the * <a href="https: * * @param length Specifies the number of bytes being cleared in the request body. * @return A response that only contains headers and response status code * @throws StorageErrorException If you attempt to clear a range that is larger than 4 MB, the service returns * status code 413 (Request Entity Too Large) */ public Mono<Response<FileUploadInfo>> clearRange(long length) { FileRange range = new FileRange(0, length - 1); return azureFileStorageClient.files().uploadRangeWithRestResponseAsync(shareName, filePath, range.toString(), FileRangeWriteType.UPDATE, 0, null, null, null, Context.NONE) .map(this::uploadResponse); } /** * Clear a range of bytes to specific of a file in storage file service. Clear operations performs an in-place * write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Clear the range starting from 1024 with length of 1024. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.clearRange * * <p>For more information, see the * <a href="https: * * @param length Specifies the number of bytes being cleared in the request body. * @param offset Optional starting point of the upload range. It will start from the beginning if it is {@code * null} * @return A response that only contains headers and response status code * @throws StorageErrorException If you attempt to clear a range that is larger than 4 MB, the service returns * status code 413 (Request Entity Too Large) */ public Mono<Response<FileUploadInfo>> clearRange(long length, long offset) { FileRange range = new FileRange(offset, offset + length - 1); return azureFileStorageClient.files().uploadRangeWithRestResponseAsync(shareName, filePath, range.toString(), FileRangeWriteType.UPDATE, 0, null, null, null, Context.NONE) .map(this::uploadResponse); } /** * Uploads file to storage file service. * * <p><strong>Code Samples</strong></p> * * <p> Upload the file from the source file path. </p> * * (@codesnippet com.azure.storage.file.fileAsyncClient.uploadFromFile
class FileAsyncClient { private final ClientLogger logger = new ClientLogger(FileAsyncClient.class); private static final long FILE_DEFAULT_BLOCK_SIZE = 4 * 1024 * 1024L; private static final long DOWNLOAD_UPLOAD_CHUNK_TIMEOUT = 300; private final AzureFileStorageImpl azureFileStorageClient; private final String shareName; private final String filePath; private final String snapshot; /** * Creates a FileAsyncClient that sends requests to the storage file at {@link AzureFileStorageImpl * endpoint}. Each service call goes through the {@link HttpPipeline pipeline} in the {@code client}. * * @param azureFileStorageClient Client that interacts with the service interfaces * @param shareName Name of the share * @param filePath Path to the file * @param snapshot The snapshot of the share */ FileAsyncClient(AzureFileStorageImpl azureFileStorageClient, String shareName, String filePath, String snapshot) { Objects.requireNonNull(shareName); Objects.requireNonNull(filePath); this.shareName = shareName; this.filePath = filePath; this.snapshot = snapshot; this.azureFileStorageClient = azureFileStorageClient; } /** * Creates a FileAsyncClient that sends requests to the storage account at {@code endpoint}. Each service call goes * through the {@code httpPipeline}. * * @param endpoint URL for the Storage File service * @param httpPipeline HttpPipeline that HTTP requests and response flow through * @param shareName Name of the share * @param filePath Path to the file * @param snapshot Optional snapshot of the share */ FileAsyncClient(URL endpoint, HttpPipeline httpPipeline, String shareName, String filePath, String snapshot) { Objects.requireNonNull(shareName); Objects.requireNonNull(filePath); this.shareName = shareName; this.filePath = filePath; this.snapshot = snapshot; this.azureFileStorageClient = new AzureFileStorageBuilder().pipeline(httpPipeline) .url(endpoint.toString()) .build(); } /** * Get the url of the storage file client. * * @return the URL of the storage file client * @throws RuntimeException If the file is using a malformed URL. */ public URL getFileUrl() { try { return new URL(azureFileStorageClient.getUrl()); } catch (MalformedURLException e) { throw logger.logExceptionAsError(new RuntimeException(String.format("Invalid URL on %s: %s" + getClass().getSimpleName(), azureFileStorageClient.getUrl()), e)); } } /** * Creates a file in the storage account and returns a response of {@link FileInfo} to interact with it. * * <p><strong>Code Samples</strong></p> * * <p>Create the file with size 1KB.</p> * * {@codesnippet com.azure.storage.file.fileClient.create} * * <p>For more information, see the * <a href="https: * * @param maxSize The maximum size in bytes for the file, up to 1 TiB. * @return A response containing the file info and the status of creating the file. * @throws StorageErrorException If the file has already existed, the parent directory does not exist or fileName is * an invalid resource name. */ public Mono<FileInfo> create(long maxSize) { return createWithResponse(maxSize, null, null).flatMap(FluxUtil::toMono); } /** * Creates a file in the storage account and returns a response of FileInfo to interact with it. * * <p><strong>Code Samples</strong></p> * * <p>Create the file with length of 1024 bytes, some headers and metadata.</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.createWithResponse * * <p>For more information, see the * <a href="https: * * @param maxSize The maximum size in bytes for the file, up to 1 TiB. * @param httpHeaders Additional parameters for the operation. * @param metadata Optional name-value pairs associated with the file as metadata. Metadata names must adhere to the naming rules. * @return A response containing the {@link FileInfo file info} and the status of creating the file. * @throws StorageErrorException If the directory has already existed, the parent directory does not exist or directory is an invalid resource name. */ public Mono<Response<FileInfo>> createWithResponse(long maxSize, FileHTTPHeaders httpHeaders, Map<String, String> metadata) { return withContext(context -> createWithResponse(maxSize, httpHeaders, metadata, context)); } Mono<Response<FileInfo>> createWithResponse(long maxSize, FileHTTPHeaders httpHeaders, Map<String, String> metadata, Context context) { String fileAttributes = "None"; String filePermission = "inherit"; String fileCreationTime = "now"; String fileLastWriteTime = "now"; return azureFileStorageClient.files().createWithRestResponseAsync(shareName, filePath, maxSize, fileAttributes, fileCreationTime, fileLastWriteTime, null, metadata, filePermission, null, httpHeaders, context) .map(this::createFileInfoResponse); } /** * Copies a blob or file to a destination file within the storage account. * * <p><strong>Code Samples</strong></p> * * <p>Copy file from source url to the {@code filePath} </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.startCopy * * <p>For more information, see the * <a href="https: * * @param sourceUrl Specifies the URL of the source file or blob, up to 2 KB in length. * @param metadata Optional name-value pairs associated with the file as metadata. Metadata names must adhere to the naming rules. * @return The {@link FileCopyInfo file copy info}. * @see <a href="https: */ public Mono<FileCopyInfo> startCopy(String sourceUrl, Map<String, String> metadata) { return startCopyWithResponse(sourceUrl, metadata).flatMap(FluxUtil::toMono); } /** * Copies a blob or file to a destination file within the storage account. * * <p><strong>Code Samples</strong></p> * * <p>Copy file from source url to the {@code filePath} </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.startCopyWithResponse * * <p>For more information, see the * <a href="https: * * @param sourceUrl Specifies the URL of the source file or blob, up to 2 KB in length. * @param metadata Optional name-value pairs associated with the file as metadata. Metadata names must adhere to the naming rules. * @return A response containing the {@link FileCopyInfo file copy info} and the status of copying the file. * @see <a href="https: */ public Mono<Response<FileCopyInfo>> startCopyWithResponse(String sourceUrl, Map<String, String> metadata) { return withContext(context -> startCopyWithResponse(sourceUrl, metadata, context)); } Mono<Response<FileCopyInfo>> startCopyWithResponse(String sourceUrl, Map<String, String> metadata, Context context) { return azureFileStorageClient.files().startCopyWithRestResponseAsync(shareName, filePath, sourceUrl, null, metadata, context) .map(this::startCopyResponse); } /** * Aborts a pending Copy File operation, and leaves a destination file with zero length and full metadata. * * <p><strong>Code Samples</strong></p> * * <p>Abort copy file from copy id("someCopyId") </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.abortCopy * * <p>For more information, see the * <a href="https: * * @param copyId Specifies the copy id which has copying pending status associate with it. * @return An empty response. */ public Mono<Void> abortCopy(String copyId) { return abortCopyWithResponse(copyId).flatMap(FluxUtil::toMono); } /** * Aborts a pending Copy File operation, and leaves a destination file with zero length and full metadata. * * <p><strong>Code Samples</strong></p> * * <p>Abort copy file from copy id("someCopyId") </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.abortCopyWithResponse * * <p>For more information, see the * <a href="https: * * @param copyId Specifies the copy id which has copying pending status associate with it. * @return A response containing the status of aborting copy the file. */ public Mono<VoidResponse> abortCopyWithResponse(String copyId) { return withContext(context -> abortCopyWithResponse(copyId, context)); } Mono<VoidResponse> abortCopyWithResponse(String copyId, Context context) { return azureFileStorageClient.files().abortCopyWithRestResponseAsync(shareName, filePath, copyId, context) .map(VoidResponse::new); } /** * Downloads a file from the system, including its metadata and properties * * <p><strong>Code Samples</strong></p> * * <p>Download the file to current folder. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.downloadToFile * * <p>For more information, see the * <a href="https: * * @param downloadFilePath The path where store the downloaded file * @return An empty response. */ public Mono<Void> downloadToFile(String downloadFilePath) { return downloadToFile(downloadFilePath, null); } /** * Downloads a file from the system, including its metadata and properties * * <p><strong>Code Samples</strong></p> * * <p>Download the file from 1024 to 2048 bytes to current folder. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.downloadToFile * * <p>For more information, see the * <a href="https: * * @param downloadFilePath The path where store the downloaded file * @param range Optional byte range which returns file data only from the specified range. * @return An empty response. */ public Mono<Void> downloadToFile(String downloadFilePath, FileRange range) { AsynchronousFileChannel channel = channelSetup(downloadFilePath); return sliceFileRange(range) .flatMap(chunk -> downloadWithPropertiesWithResponse(chunk, false) .map(dar -> dar.value().body()) .subscribeOn(Schedulers.elastic()) .flatMap(fbb -> FluxUtil.writeFile(fbb, channel, chunk.start() - (range == null ? 0 : range.start())) .subscribeOn(Schedulers.elastic()) .timeout(Duration.ofSeconds(DOWNLOAD_UPLOAD_CHUNK_TIMEOUT)) .retry(3, throwable -> throwable instanceof IOException || throwable instanceof TimeoutException))) .then() .doOnTerminate(() -> channelCleanUp(channel)); } private AsynchronousFileChannel channelSetup(String filePath) { try { return AsynchronousFileChannel.open(Paths.get(filePath), StandardOpenOption.READ, StandardOpenOption.WRITE); } catch (IOException e) { throw logger.logExceptionAsError(new UncheckedIOException(e)); } } private void channelCleanUp(AsynchronousFileChannel channel) { try { channel.close(); } catch (IOException e) { throw logger.logExceptionAsError(new UncheckedIOException(e)); } } private Flux<FileRange> sliceFileRange(FileRange fileRange) { long offset = fileRange == null ? 0L : fileRange.start(); Mono<Long> end; if (fileRange != null) { end = Mono.just(fileRange.end()); } else { end = Mono.empty(); } end = end.switchIfEmpty(getProperties().map(rb -> rb.contentLength())); return end .map(e -> { List<FileRange> chunks = new ArrayList<>(); for (long pos = offset; pos < e; pos += FILE_DEFAULT_BLOCK_SIZE) { long count = FILE_DEFAULT_BLOCK_SIZE; if (pos + count > e) { count = e - pos; } chunks.add(new FileRange(pos, pos + count - 1)); } return chunks; }) .flatMapMany(Flux::fromIterable); } /** * Downloads a file from the system, including its metadata and properties * * <p><strong>Code Samples</strong></p> * * <p>Download the file with its metadata and properties. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.downloadWithProperties} * * <p>For more information, see the * <a href="https: * * @return The {@link FileDownloadInfo file download Info} */ public Mono<FileDownloadInfo> downloadWithProperties() { return downloadWithPropertiesWithResponse(null, null).flatMap(FluxUtil::toMono); } /** * Downloads a file from the system, including its metadata and properties * * <p><strong>Code Samples</strong></p> * * <p>Download the file from 1024 to 2048 bytes with its metadata and properties and without the contentMD5. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.downloadWithPropertiesWithResponse * * <p>For more information, see the * <a href="https: * * @param range Optional byte range which returns file data only from the specified range. * @param rangeGetContentMD5 Optional boolean which the service returns the MD5 hash for the range when it sets * to true, as long as the range is less than or equal to 4 MB in size. * @return A response containing the {@link FileDownloadInfo file download Info} with headers and response status code */ public Mono<Response<FileDownloadInfo>> downloadWithPropertiesWithResponse(FileRange range, Boolean rangeGetContentMD5) { return withContext(context -> downloadWithPropertiesWithResponse(range, rangeGetContentMD5, context)); } Mono<Response<FileDownloadInfo>> downloadWithPropertiesWithResponse(FileRange range, Boolean rangeGetContentMD5, Context context) { String rangeString = range == null ? null : range.toString(); return azureFileStorageClient.files().downloadWithRestResponseAsync(shareName, filePath, null, rangeString, rangeGetContentMD5, context) .map(this::downloadWithPropertiesResponse); } /** * Deletes the file associate with the client. * * <p><strong>Code Samples</strong></p> * * <p>Delete the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.delete} * * <p>For more information, see the * <a href="https: * @return An empty response * @throws StorageErrorException If the directory doesn't exist or the file doesn't exist. */ public Mono<Void> delete() { return deleteWithResponse(null).flatMap(FluxUtil::toMono); } /** * Deletes the file associate with the client. * * <p><strong>Code Samples</strong></p> * * <p>Delete the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.deleteWithResponse} * * <p>For more information, see the * <a href="https: * * @return A response that only contains headers and response status code * @throws StorageErrorException If the directory doesn't exist or the file doesn't exist. */ public Mono<VoidResponse> deleteWithResponse() { return withContext(context -> deleteWithResponse(context)); } Mono<VoidResponse> deleteWithResponse(Context context) { return azureFileStorageClient.files().deleteWithRestResponseAsync(shareName, filePath, context) .map(VoidResponse::new); } /** * Retrieves the properties of the storage account's file. The properties includes file metadata, last modified * date, is server encrypted, and eTag. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve file properties</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.getProperties} * * <p>For more information, see the * <a href="https: * * @return {@link FileProperties Storage file properties} */ public Mono<FileProperties> getProperties() { return getPropertiesWithResponse().flatMap(FluxUtil::toMono); } /** * Retrieves the properties of the storage account's file. * The properties includes file metadata, last modified date, is server encrypted, and eTag. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve file properties</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.getPropertiesWithResponse} * * <p>For more information, see the * <a href="https: * * @return A response containing the {@link FileProperties storage file properties} and response status code */ public Mono<Response<FileProperties>> getPropertiesWithResponse() { return withContext(context -> getPropertiesWithResponse(context)); } Mono<Response<FileProperties>> getPropertiesWithResponse(Context context) { return azureFileStorageClient.files().getPropertiesWithRestResponseAsync(shareName, filePath, snapshot, null, context) .map(this::getPropertiesResponse); } /** * Sets the user-defined httpHeaders to associate to the file. * * <p>If {@code null} is passed for the httpHeaders it will clear the httpHeaders associated to the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the httpHeaders of contentType of "text/plain"</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setHttpHeaders * * <p>Clear the metadata of the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setHttpHeaders * * <p>For more information, see the * <a href="https: * * @param newFileSize New file size of the file * @param httpHeaders Resizes a file to the specified size. If the specified byte value is less than the current size of the file, then all ranges above the specified byte value are cleared. * @return The {@link FileInfo file info} * @throws IllegalArgumentException thrown if parameters fail the validation. */ public Mono<FileInfo> setHttpHeaders(long newFileSize, FileHTTPHeaders httpHeaders) { return setHttpHeadersWithResponse(newFileSize, httpHeaders).flatMap(FluxUtil::toMono); } /** * Sets the user-defined httpHeaders to associate to the file. * * <p>If {@code null} is passed for the httpHeaders it will clear the httpHeaders associated to the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the httpHeaders of contentType of "text/plain"</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setHttpHeadersWithResponse * * <p>Clear the metadata of the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setHttpHeadersWithResponse * * <p>For more information, see the * <a href="https: * * @param newFileSize New file size of the file * @param httpHeaders Resizes a file to the specified size. If the specified byte value is less than the current size of the file, then all ranges above the specified byte value are cleared. * @return Response containing the {@link FileInfo file info} and response status code * @throws IllegalArgumentException thrown if parameters fail the validation. */ public Mono<Response<FileInfo>> setHttpHeadersWithResponse(long newFileSize, FileHTTPHeaders httpHeaders) { return withContext(context -> setHttpHeadersWithResponse(newFileSize, httpHeaders, context)); } Mono<Response<FileInfo>> setHttpHeadersWithResponse(long newFileSize, FileHTTPHeaders httpHeaders, Context context) { String fileAttributes = "None"; String filePermission = "inherit"; String fileCreationTime = "preserve"; String fileLastWriteTime = "preserve"; return azureFileStorageClient.files().setHTTPHeadersWithRestResponseAsync(shareName, filePath, fileAttributes, fileCreationTime, fileLastWriteTime, null, newFileSize, filePermission, null, httpHeaders, context) .map(this::setHttpHeadersResponse); } /** * Sets the user-defined metadata to associate to the file. * * <p>If {@code null} is passed for the metadata it will clear the metadata associated to the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the metadata to "file:updatedMetadata"</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setMetadata * * <p>Clear the metadata of the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setMetadataWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata Options.Metadata to set on the file, if null is passed the metadata for the file is cleared * @return {@link FileMetadataInfo file meta info} * @throws StorageErrorException If the file doesn't exist or the metadata contains invalid keys */ public Mono<FileMetadataInfo> setMetadata(Map<String, String> metadata) { return setMetadataWithResponse(metadata).flatMap(FluxUtil::toMono); } /** * Sets the user-defined metadata to associate to the file. * * <p>If {@code null} is passed for the metadata it will clear the metadata associated to the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the metadata to "file:updatedMetadata"</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setMetadataWithResponse * * <p>Clear the metadata of the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setMetadataWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata Options.Metadata to set on the file, if null is passed the metadata for the file is cleared * @return A response containing the {@link FileMetadataInfo file meta info} and status code * @throws StorageErrorException If the file doesn't exist or the metadata contains invalid keys */ public Mono<Response<FileMetadataInfo>> setMetadataWithResponse(Map<String, String> metadata) { return withContext(context -> setMetadataWithResponse(metadata, context)); } Mono<Response<FileMetadataInfo>> setMetadataWithResponse(Map<String, String> metadata, Context context) { return azureFileStorageClient.files().setMetadataWithRestResponseAsync(shareName, filePath, null, metadata, context) .map(this::setMetadataResponse); } /** * Uploads a range of bytes to the beginning of a file in storage file service. Upload operations performs an * in-place write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Upload data "default" to the file in Storage File Service. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.upload * * <p>For more information, see the * <a href="https: * * @param data The data which will upload to the storage file. * @param length Specifies the number of bytes being transmitted in the request body. * @return A response that only contains headers and response status code */ public Mono<FileUploadInfo> upload(Flux<ByteBuffer> data, long length) { return uploadWithResponse(data, length).flatMap(FluxUtil::toMono); } /** * Uploads a range of bytes to the beginning of a file in storage file service. Upload operations performs an in-place write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Upload "default" to the file. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.uploadWithResponse * * <p>For more information, see the * <a href="https: * * @param data The data which will upload to the storage file. * @param length Specifies the number of bytes being transmitted in the request body. When the FileRangeWriteType is set to clear, the value of this header must be set to zero.. * @return A response containing the {@link FileUploadInfo file upload info} with headers and response status code * @throws StorageErrorException If you attempt to upload a range that is larger than 4 MB, the service returns status code 413 (Request Entity Too Large) */ public Mono<Response<FileUploadInfo>> uploadWithResponse(Flux<ByteBuffer> data, long length) { return withContext(context -> uploadWithResponse(data, length, context)); } Mono<Response<FileUploadInfo>> uploadWithResponse(Flux<ByteBuffer> data, long length, Context context) { FileRange range = * * <p>For more information, see the * <a href="https: * * @param data The data which will upload to the storage file. * @param length Specifies the number of bytes being transmitted in the request body. * @param offset Optional starting point of the upload range. It will start from the beginning if it is {@code * null} * @return The {@link FileUploadInfo file upload info} * @throws StorageErrorException If you attempt to upload a range that is larger than 4 MB, the service returns status code 413 (Request Entity Too Large) */ public Mono<FileUploadInfo> upload(Flux<ByteBuffer> data, long length, long offset) { return uploadWithResponse(data, length, offset).flatMap(FluxUtil::toMono); } /** * Uploads a range of bytes to specific of a file in storage file service. Upload operations performs an in-place write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Upload the file from 1024 to 2048 bytes with its metadata and properties and without the contentMD5. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.uploadWithResponse * * <p>For more information, see the * <a href="https: * * @param data The data which will upload to the storage file. * @param offset Optional starting point of the upload range. It will start from the beginning if it is {@code null} * @param length Specifies the number of bytes being transmitted in the request body. When the FileRangeWriteType is set to clear, the value of this header must be set to zero. * @return A response containing the {@link FileUploadInfo file upload info} with headers and response status code * @throws StorageErrorException If you attempt to upload a range that is larger than 4 MB, the service returns status code 413 (Request Entity Too Large) */ public Mono<Response<FileUploadInfo>> uploadWithResponse(Flux<ByteBuffer> data, long length, long offset) { return withContext(context -> uploadWithResponse(data, length, offset, context)); } Mono<Response<FileUploadInfo>> uploadWithResponse(Flux<ByteBuffer> data, long length, long offset, Context context) { FileRange range = new FileRange(offset, offset + length - 1); return azureFileStorageClient.files().uploadRangeWithRestResponseAsync(shareName, filePath, range.toString(), FileRangeWriteType.UPDATE, length, data, null, null, context) .map(this::uploadResponse); } /** * Clear a range of bytes to specific of a file in storage file service. Clear operations performs an in-place * write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Clears the first 1024 bytes. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.clearRange * * <p>For more information, see the * <a href="https: * * @param length Specifies the number of bytes being cleared. * @return The {@link FileUploadInfo file upload info} */ public Mono<FileUploadInfo> clearRange(long length) { return clearRangeWithResponse(length, 0).flatMap(FluxUtil::toMono); } /** * Clear a range of bytes to specific of a file in storage file service. Clear operations performs an in-place * write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Clear the range starting from 1024 with length of 1024. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.clearRange * * <p>For more information, see the * <a href="https: * * @param length Specifies the number of bytes being cleared in the request body. * @param offset Optional starting point of the upload range. It will start from the beginning if it is {@code * null} * @return A response of {@link FileUploadInfo file upload info} that only contains headers and response status code */ public Mono<Response<FileUploadInfo>> clearRangeWithResponse(long length, long offset) { return withContext(context -> clearRangeWithResponse(length, offset, context)); } Mono<Response<FileUploadInfo>> clearRangeWithResponse(long length, long offset, Context context) { FileRange range = new FileRange(offset, offset + length - 1); return azureFileStorageClient.files().uploadRangeWithRestResponseAsync(shareName, filePath, range.toString(), FileRangeWriteType.CLEAR, 0L, null, null, null, context) .map(this::uploadResponse); } /** * Uploads file to storage file service. * * <p><strong>Code Samples</strong></p> * * <p> Upload the file from the source file path. </p> * * (@codesnippet com.azure.storage.file.fileAsyncClient.uploadFromFile
Done.
public Mono<Response<FileUploadInfo>> clearRange(long length, long offset) { FileRange range = new FileRange(offset, offset + length - 1); return azureFileStorageClient.files().uploadRangeWithRestResponseAsync(shareName, filePath, range.toString(), FileRangeWriteType.UPDATE, 0, null, null, null, Context.NONE) .map(this::uploadResponse); }
return azureFileStorageClient.files().uploadRangeWithRestResponseAsync(shareName, filePath, range.toString(), FileRangeWriteType.UPDATE, 0, null, null, null, Context.NONE)
new FileRange(offset, offset + length - 1); return azureFileStorageClient.files().uploadRangeWithRestResponseAsync(shareName, filePath, range.toString(), FileRangeWriteType.UPDATE, length, data, null, null, context) .map(this::uploadResponse); } /** * Clear a range of bytes to specific of a file in storage file service. Clear operations performs an in-place * write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Clears the first 1024 bytes. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.clearRange
class FileAsyncClient { private final ClientLogger logger = new ClientLogger(FileAsyncClient.class); private static final long FILE_DEFAULT_BLOCK_SIZE = 4 * 1024 * 1024L; private static final long DOWNLOAD_UPLOAD_CHUNK_TIMEOUT = 300; private final AzureFileStorageImpl azureFileStorageClient; private final String shareName; private final String filePath; private final String snapshot; /** * Creates a FileAsyncClient that sends requests to the storage file at {@link AzureFileStorageImpl * endpoint}. Each service call goes through the {@link HttpPipeline pipeline} in the {@code client}. * * @param azureFileStorageClient Client that interacts with the service interfaces * @param shareName Name of the share * @param filePath Path to the file * @param snapshot The snapshot of the share */ FileAsyncClient(AzureFileStorageImpl azureFileStorageClient, String shareName, String filePath, String snapshot) { this.shareName = shareName; this.filePath = filePath; this.snapshot = snapshot; this.azureFileStorageClient = azureFileStorageClient; } /** * Creates a FileAsyncClient that sends requests to the storage account at {@code endpoint}. Each service call goes * through the {@code httpPipeline}. * * @param endpoint URL for the Storage File service * @param httpPipeline HttpPipeline that HTTP requests and response flow through * @param shareName Name of the share * @param filePath Path to the file * @param snapshot Optional snapshot of the share */ FileAsyncClient(URL endpoint, HttpPipeline httpPipeline, String shareName, String filePath, String snapshot) { this.shareName = shareName; this.filePath = filePath; this.snapshot = snapshot; this.azureFileStorageClient = new AzureFileStorageBuilder().pipeline(httpPipeline) .url(endpoint.toString()) .build(); } /** * Get the url of the storage file client. * * @return the URL of the storage file client * @throws RuntimeException If the file is using a malformed URL. */ public URL getFileUrl() { try { return new URL(azureFileStorageClient.getUrl()); } catch (MalformedURLException e) { throw logger.logExceptionAsError(new RuntimeException(String.format("Invalid URL on %s: %s" + getClass().getSimpleName(), azureFileStorageClient.getUrl()), e)); } } /** * Creates a file in the storage account and returns a response of {@link FileInfo} to interact with it. * * <p><strong>Code Samples</strong></p> * * <p>Create the file with size 1KB.</p> * * {@codesnippet com.azure.storage.file.fileClient.create} * * <p>For more information, see the * <a href="https: * * @param maxSize The maximum size in bytes for the file, up to 1 TiB. * @return A response containing the file info and the status of creating the file. * @throws StorageErrorException If the file has already existed, the parent directory does not exist or fileName is * an invalid resource name. */ public Mono<Response<FileInfo>> create(long maxSize) { return create(maxSize, null, null); } /** * Creates a file in the storage account and returns a response of FileInfo to interact with it. * * <p><strong>Code Samples</strong></p> * * <p>Create the file with length of 1024 bytes, some headers and metadata.</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.create * * <p>For more information, see the * <a href="https: * * @param maxSize The maximum size in bytes for the file, up to 1 TiB. * @param httpHeaders Additional parameters for the operation. * @param metadata Optional name-value pairs associated with the file as metadata. Metadata names must adhere to the * naming rules. * @return A response containing the directory info and the status of creating the directory. * @throws StorageErrorException If the directory has already existed, the parent directory does not exist or * directory is an invalid resource name. * @see <a href="https: */ public Mono<Response<FileInfo>> create(long maxSize, FileHTTPHeaders httpHeaders, Map<String, String> metadata) { String fileAttributes = "None"; String filePermission = "inherit"; String fileCreationTime = "now"; String fileLastWriteTime = "now"; return azureFileStorageClient.files().createWithRestResponseAsync(shareName, filePath, maxSize, fileAttributes, fileCreationTime, fileLastWriteTime, null, metadata, filePermission, null, httpHeaders, Context.NONE) .map(this::createFileInfoResponse); } /** * Copies a blob or file to a destination file within the storage account. * * <p><strong>Code Samples</strong></p> * * <p>Copy file from source url to the {@code filePath} </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.startCopy * * <p>For more information, see the * <a href="https: * * @param sourceUrl Specifies the URL of the source file or blob, up to 2 KB in length. * @param metadata Optional name-value pairs associated with the file as metadata. Metadata names must adhere to the * naming rules. * @return A response containing the file copy info and the status of copying the file. * @see <a href="https: */ public Mono<Response<FileCopyInfo>> startCopy(String sourceUrl, Map<String, String> metadata) { return azureFileStorageClient.files().startCopyWithRestResponseAsync(shareName, filePath, sourceUrl, null, metadata, Context.NONE) .map(this::startCopyResponse); } /** * Aborts a pending Copy File operation, and leaves a destination file with zero length and full metadata. * * <p><strong>Code Samples</strong></p> * * <p>Abort copy file from copy id("someCopyId") </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.abortCopy * * <p>For more information, see the * <a href="https: * * @param copyId Specifies the copy id which has copying pending status associate with it. * @return A response containing the status of aborting copy the file. */ public Mono<VoidResponse> abortCopy(String copyId) { return azureFileStorageClient.files().abortCopyWithRestResponseAsync(shareName, filePath, copyId, Context.NONE) .map(VoidResponse::new); } /** * Downloads a file from the system, including its metadata and properties * * <p><strong>Code Samples</strong></p> * * <p>Download the file to current folder. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.downloadToFile * * <p>For more information, see the * <a href="https: * * @param downloadFilePath The path where store the downloaded file * @return An empty response. */ public Mono<Void> downloadToFile(String downloadFilePath) { return downloadToFile(downloadFilePath, null); } /** * Downloads a file from the system, including its metadata and properties * * <p><strong>Code Samples</strong></p> * * <p>Download the file from 1024 to 2048 bytes to current folder. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.downloadToFile * * <p>For more information, see the * <a href="https: * * @param downloadFilePath The path where store the downloaded file * @param range Optional byte range which returns file data only from the specified range. * @return An empty response. */ public Mono<Void> downloadToFile(String downloadFilePath, FileRange range) { AsynchronousFileChannel channel = channelSetup(downloadFilePath); return sliceFileRange(range) .flatMap(chunk -> downloadWithProperties(chunk, false) .map(dar -> dar.value().body()) .subscribeOn(Schedulers.elastic()) .flatMap(fbb -> FluxUtil.bytebufStreamToFile(fbb, channel, chunk.start() - (range == null ? 0 : range.start())) .subscribeOn(Schedulers.elastic()) .timeout(Duration.ofSeconds(DOWNLOAD_UPLOAD_CHUNK_TIMEOUT)) .retry(3, throwable -> throwable instanceof IOException || throwable instanceof TimeoutException))) .then() .doOnTerminate(() -> channelCleanUp(channel)); } private AsynchronousFileChannel channelSetup(String filePath) { try { return AsynchronousFileChannel.open(Paths.get(filePath), StandardOpenOption.READ, StandardOpenOption.WRITE); } catch (IOException e) { throw logger.logExceptionAsError(new UncheckedIOException(e)); } } private void channelCleanUp(AsynchronousFileChannel channel) { try { channel.close(); } catch (IOException e) { throw logger.logExceptionAsError(new UncheckedIOException(e)); } } private Flux<FileRange> sliceFileRange(FileRange fileRange) { long offset = fileRange == null ? 0L : fileRange.start(); Mono<Long> end; if (fileRange != null) { end = Mono.just(fileRange.end()); } else { end = Mono.empty(); } end = end.switchIfEmpty(getProperties().map(rb -> rb.value().contentLength())); return end .map(e -> { List<FileRange> chunks = new ArrayList<>(); for (long pos = offset; pos < e; pos += FILE_DEFAULT_BLOCK_SIZE) { long count = FILE_DEFAULT_BLOCK_SIZE; if (pos + count > e) { count = e - pos; } chunks.add(new FileRange(pos, pos + count - 1)); } return chunks; }) .flatMapMany(Flux::fromIterable); } /** * Downloads a file from the system, including its metadata and properties * * <p><strong>Code Samples</strong></p> * * <p>Download the file with its metadata and properties. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.downloadWithProperties} * * <p>For more information, see the * <a href="https: * * @return A response that only contains headers and response status code */ public Mono<Response<FileDownloadInfo>> downloadWithProperties() { return downloadWithProperties(null, null); } /** * Downloads a file from the system, including its metadata and properties * * <p><strong>Code Samples</strong></p> * * <p>Download the file from 1024 to 2048 bytes with its metadata and properties and without the contentMD5. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.downloadWithProperties * * <p>For more information, see the * <a href="https: * * @param range Optional byte range which returns file data only from the specified range. * @param rangeGetContentMD5 Optional boolean which the service returns the MD5 hash for the range when it sets to * true, as long as the range is less than or equal to 4 MB in size. * @return A response that only contains headers and response status code */ public Mono<Response<FileDownloadInfo>> downloadWithProperties(FileRange range, Boolean rangeGetContentMD5) { String rangeString = range == null ? null : range.toString(); return azureFileStorageClient.files().downloadWithRestResponseAsync(shareName, filePath, null, rangeString, rangeGetContentMD5, Context.NONE) .map(this::downloadWithPropertiesResponse); } /** * Deletes the file associate with the client. * * <p><strong>Code Samples</strong></p> * * <p>Delete the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.delete} * * <p>For more information, see the * <a href="https: * * @return A response that only contains headers and response status code * @throws StorageErrorException If the directory doesn't exist or the file doesn't exist. */ public Mono<VoidResponse> delete() { return azureFileStorageClient.files().deleteWithRestResponseAsync(shareName, filePath, Context.NONE) .map(VoidResponse::new); } /** * Retrieves the properties of the storage account's file. The properties includes file metadata, last modified * date, is server encrypted, and eTag. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve file properties</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.getProperties} * * <p>For more information, see the * <a href="https: * * @return Storage file properties */ public Mono<Response<FileProperties>> getProperties() { return azureFileStorageClient.files().getPropertiesWithRestResponseAsync(shareName, filePath, snapshot, null, Context.NONE) .map(this::getPropertiesResponse); } /** * Sets the user-defined httpHeaders to associate to the file. * * <p>If {@code null} is passed for the httpHeaders it will clear the httpHeaders associated to the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the httpHeaders of contentType of "text/plain"</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setHttpHeaders * * <p>Clear the metadata of the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setHttpHeaders * * <p>For more information, see the * <a href="https: * * @param newFileSize New file size of the file * @param httpHeaders Resizes a file to the specified size. If the specified byte value is less than the current * size of the file, then all ranges above the specified byte value are cleared. * @return Response of the information about the file * @throws IllegalArgumentException thrown if parameters fail the validation. */ public Mono<Response<FileInfo>> setHttpHeaders(long newFileSize, FileHTTPHeaders httpHeaders) { String fileAttributes = "None"; String filePermission = "inherit"; String fileCreationTime = "preserve"; String fileLastWriteTime = "preserve"; return azureFileStorageClient.files().setHTTPHeadersWithRestResponseAsync(shareName, filePath, fileAttributes, fileCreationTime, fileLastWriteTime, null, newFileSize, filePermission, null, httpHeaders, Context.NONE) .map(this::setHttpHeadersResponse); } /** * Sets the user-defined metadata to associate to the file. * * <p>If {@code null} is passed for the metadata it will clear the metadata associated to the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the metadata to "file:updatedMetadata"</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setMetadata * * <p>Clear the metadata of the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setMetadata * * <p>For more information, see the * <a href="https: * * @param metadata Options.Metadata to set on the file, if null is passed the metadata for the file is cleared * @return information about the file * @throws StorageErrorException If the file doesn't exist or the metadata contains invalid keys */ public Mono<Response<FileMetadataInfo>> setMetadata(Map<String, String> metadata) { return azureFileStorageClient.files().setMetadataWithRestResponseAsync(shareName, filePath, null, metadata, Context.NONE) .map(this::setMetadataResponse); } /** * Uploads a range of bytes to the beginning of a file in storage file service. Upload operations performs an * in-place write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Upload "default" to the file in Storage File Service. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.upload * * <p>For more information, see the * <a href="https: * * @param data The data which will upload to the storage file. * @param length Specifies the number of bytes being transmitted in the request body. * @return A response that only contains headers and response status code * @throws StorageErrorException If you attempt to upload a range that is larger than 4 MB, the service returns * status code 413 (Request Entity Too Large) */ public Mono<Response<FileUploadInfo>> upload(Flux<ByteBuf> data, long length) { FileRange range = new FileRange(0, length - 1); return azureFileStorageClient.files().uploadRangeWithRestResponseAsync(shareName, filePath, range.toString(), FileRangeWriteType.UPDATE, length, data, null, null, Context.NONE) .map(this::uploadResponse); } /** * Uploads a range of bytes to specific of a file in storage file service. Upload operations performs an in-place * write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Upload date "default" starting from 1024 bytes. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.upload * * <p>For more information, see the * <a href="https: * * @param data The data which will upload to the storage file. * @param offset Optional starting point of the upload range. It will start from the beginning if it is {@code * null} * @param length Specifies the number of bytes being transmitted in the request body. * @return A response that only contains headers and response status code * @throws StorageErrorException If you attempt to upload a range that is larger than 4 MB, the service returns * status code 413 (Request Entity Too Large) */ public Mono<Response<FileUploadInfo>> upload(Flux<ByteBuf> data, long length, long offset) { FileRange range = * * <p>For more information, see the * <a href="https: * * @param length Specifies the number of bytes being cleared in the request body. * @return A response that only contains headers and response status code * @throws StorageErrorException If you attempt to clear a range that is larger than 4 MB, the service returns * status code 413 (Request Entity Too Large) */ public Mono<Response<FileUploadInfo>> clearRange(long length) { FileRange range = new FileRange(0, length - 1); return azureFileStorageClient.files().uploadRangeWithRestResponseAsync(shareName, filePath, range.toString(), FileRangeWriteType.UPDATE, 0, null, null, null, Context.NONE) .map(this::uploadResponse); } /** * Clear a range of bytes to specific of a file in storage file service. Clear operations performs an in-place * write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Clear the range starting from 1024 with length of 1024. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.clearRange * * <p>For more information, see the * <a href="https: * * @param length Specifies the number of bytes being cleared in the request body. * @param offset Optional starting point of the upload range. It will start from the beginning if it is {@code * null} * @return A response that only contains headers and response status code * @throws StorageErrorException If you attempt to clear a range that is larger than 4 MB, the service returns * status code 413 (Request Entity Too Large) */ public Mono<Response<FileUploadInfo>> clearRange(long length, long offset) { FileRange range = new FileRange(offset, offset + length - 1); return azureFileStorageClient.files().uploadRangeWithRestResponseAsync(shareName, filePath, range.toString(), FileRangeWriteType.UPDATE, 0, null, null, null, Context.NONE) .map(this::uploadResponse); } /** * Uploads file to storage file service. * * <p><strong>Code Samples</strong></p> * * <p> Upload the file from the source file path. </p> * * (@codesnippet com.azure.storage.file.fileAsyncClient.uploadFromFile
class FileAsyncClient { private final ClientLogger logger = new ClientLogger(FileAsyncClient.class); private static final long FILE_DEFAULT_BLOCK_SIZE = 4 * 1024 * 1024L; private static final long DOWNLOAD_UPLOAD_CHUNK_TIMEOUT = 300; private final AzureFileStorageImpl azureFileStorageClient; private final String shareName; private final String filePath; private final String snapshot; /** * Creates a FileAsyncClient that sends requests to the storage file at {@link AzureFileStorageImpl * endpoint}. Each service call goes through the {@link HttpPipeline pipeline} in the {@code client}. * * @param azureFileStorageClient Client that interacts with the service interfaces * @param shareName Name of the share * @param filePath Path to the file * @param snapshot The snapshot of the share */ FileAsyncClient(AzureFileStorageImpl azureFileStorageClient, String shareName, String filePath, String snapshot) { Objects.requireNonNull(shareName); Objects.requireNonNull(filePath); this.shareName = shareName; this.filePath = filePath; this.snapshot = snapshot; this.azureFileStorageClient = azureFileStorageClient; } /** * Creates a FileAsyncClient that sends requests to the storage account at {@code endpoint}. Each service call goes * through the {@code httpPipeline}. * * @param endpoint URL for the Storage File service * @param httpPipeline HttpPipeline that HTTP requests and response flow through * @param shareName Name of the share * @param filePath Path to the file * @param snapshot Optional snapshot of the share */ FileAsyncClient(URL endpoint, HttpPipeline httpPipeline, String shareName, String filePath, String snapshot) { Objects.requireNonNull(shareName); Objects.requireNonNull(filePath); this.shareName = shareName; this.filePath = filePath; this.snapshot = snapshot; this.azureFileStorageClient = new AzureFileStorageBuilder().pipeline(httpPipeline) .url(endpoint.toString()) .build(); } /** * Get the url of the storage file client. * * @return the URL of the storage file client * @throws RuntimeException If the file is using a malformed URL. */ public URL getFileUrl() { try { return new URL(azureFileStorageClient.getUrl()); } catch (MalformedURLException e) { throw logger.logExceptionAsError(new RuntimeException(String.format("Invalid URL on %s: %s" + getClass().getSimpleName(), azureFileStorageClient.getUrl()), e)); } } /** * Creates a file in the storage account and returns a response of {@link FileInfo} to interact with it. * * <p><strong>Code Samples</strong></p> * * <p>Create the file with size 1KB.</p> * * {@codesnippet com.azure.storage.file.fileClient.create} * * <p>For more information, see the * <a href="https: * * @param maxSize The maximum size in bytes for the file, up to 1 TiB. * @return A response containing the file info and the status of creating the file. * @throws StorageErrorException If the file has already existed, the parent directory does not exist or fileName is * an invalid resource name. */ public Mono<FileInfo> create(long maxSize) { return createWithResponse(maxSize, null, null).flatMap(FluxUtil::toMono); } /** * Creates a file in the storage account and returns a response of FileInfo to interact with it. * * <p><strong>Code Samples</strong></p> * * <p>Create the file with length of 1024 bytes, some headers and metadata.</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.createWithResponse * * <p>For more information, see the * <a href="https: * * @param maxSize The maximum size in bytes for the file, up to 1 TiB. * @param httpHeaders Additional parameters for the operation. * @param metadata Optional name-value pairs associated with the file as metadata. Metadata names must adhere to the naming rules. * @return A response containing the {@link FileInfo file info} and the status of creating the file. * @throws StorageErrorException If the directory has already existed, the parent directory does not exist or directory is an invalid resource name. */ public Mono<Response<FileInfo>> createWithResponse(long maxSize, FileHTTPHeaders httpHeaders, Map<String, String> metadata) { return withContext(context -> createWithResponse(maxSize, httpHeaders, metadata, context)); } Mono<Response<FileInfo>> createWithResponse(long maxSize, FileHTTPHeaders httpHeaders, Map<String, String> metadata, Context context) { String fileAttributes = "None"; String filePermission = "inherit"; String fileCreationTime = "now"; String fileLastWriteTime = "now"; return azureFileStorageClient.files().createWithRestResponseAsync(shareName, filePath, maxSize, fileAttributes, fileCreationTime, fileLastWriteTime, null, metadata, filePermission, null, httpHeaders, context) .map(this::createFileInfoResponse); } /** * Copies a blob or file to a destination file within the storage account. * * <p><strong>Code Samples</strong></p> * * <p>Copy file from source url to the {@code filePath} </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.startCopy * * <p>For more information, see the * <a href="https: * * @param sourceUrl Specifies the URL of the source file or blob, up to 2 KB in length. * @param metadata Optional name-value pairs associated with the file as metadata. Metadata names must adhere to the naming rules. * @return The {@link FileCopyInfo file copy info}. * @see <a href="https: */ public Mono<FileCopyInfo> startCopy(String sourceUrl, Map<String, String> metadata) { return startCopyWithResponse(sourceUrl, metadata).flatMap(FluxUtil::toMono); } /** * Copies a blob or file to a destination file within the storage account. * * <p><strong>Code Samples</strong></p> * * <p>Copy file from source url to the {@code filePath} </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.startCopyWithResponse * * <p>For more information, see the * <a href="https: * * @param sourceUrl Specifies the URL of the source file or blob, up to 2 KB in length. * @param metadata Optional name-value pairs associated with the file as metadata. Metadata names must adhere to the naming rules. * @return A response containing the {@link FileCopyInfo file copy info} and the status of copying the file. * @see <a href="https: */ public Mono<Response<FileCopyInfo>> startCopyWithResponse(String sourceUrl, Map<String, String> metadata) { return withContext(context -> startCopyWithResponse(sourceUrl, metadata, context)); } Mono<Response<FileCopyInfo>> startCopyWithResponse(String sourceUrl, Map<String, String> metadata, Context context) { return azureFileStorageClient.files().startCopyWithRestResponseAsync(shareName, filePath, sourceUrl, null, metadata, context) .map(this::startCopyResponse); } /** * Aborts a pending Copy File operation, and leaves a destination file with zero length and full metadata. * * <p><strong>Code Samples</strong></p> * * <p>Abort copy file from copy id("someCopyId") </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.abortCopy * * <p>For more information, see the * <a href="https: * * @param copyId Specifies the copy id which has copying pending status associate with it. * @return An empty response. */ public Mono<Void> abortCopy(String copyId) { return abortCopyWithResponse(copyId).flatMap(FluxUtil::toMono); } /** * Aborts a pending Copy File operation, and leaves a destination file with zero length and full metadata. * * <p><strong>Code Samples</strong></p> * * <p>Abort copy file from copy id("someCopyId") </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.abortCopyWithResponse * * <p>For more information, see the * <a href="https: * * @param copyId Specifies the copy id which has copying pending status associate with it. * @return A response containing the status of aborting copy the file. */ public Mono<VoidResponse> abortCopyWithResponse(String copyId) { return withContext(context -> abortCopyWithResponse(copyId, context)); } Mono<VoidResponse> abortCopyWithResponse(String copyId, Context context) { return azureFileStorageClient.files().abortCopyWithRestResponseAsync(shareName, filePath, copyId, context) .map(VoidResponse::new); } /** * Downloads a file from the system, including its metadata and properties * * <p><strong>Code Samples</strong></p> * * <p>Download the file to current folder. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.downloadToFile * * <p>For more information, see the * <a href="https: * * @param downloadFilePath The path where store the downloaded file * @return An empty response. */ public Mono<Void> downloadToFile(String downloadFilePath) { return downloadToFile(downloadFilePath, null); } /** * Downloads a file from the system, including its metadata and properties * * <p><strong>Code Samples</strong></p> * * <p>Download the file from 1024 to 2048 bytes to current folder. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.downloadToFile * * <p>For more information, see the * <a href="https: * * @param downloadFilePath The path where store the downloaded file * @param range Optional byte range which returns file data only from the specified range. * @return An empty response. */ public Mono<Void> downloadToFile(String downloadFilePath, FileRange range) { AsynchronousFileChannel channel = channelSetup(downloadFilePath); return sliceFileRange(range) .flatMap(chunk -> downloadWithPropertiesWithResponse(chunk, false) .map(dar -> dar.value().body()) .subscribeOn(Schedulers.elastic()) .flatMap(fbb -> FluxUtil.writeFile(fbb, channel, chunk.start() - (range == null ? 0 : range.start())) .subscribeOn(Schedulers.elastic()) .timeout(Duration.ofSeconds(DOWNLOAD_UPLOAD_CHUNK_TIMEOUT)) .retry(3, throwable -> throwable instanceof IOException || throwable instanceof TimeoutException))) .then() .doOnTerminate(() -> channelCleanUp(channel)); } private AsynchronousFileChannel channelSetup(String filePath) { try { return AsynchronousFileChannel.open(Paths.get(filePath), StandardOpenOption.READ, StandardOpenOption.WRITE); } catch (IOException e) { throw logger.logExceptionAsError(new UncheckedIOException(e)); } } private void channelCleanUp(AsynchronousFileChannel channel) { try { channel.close(); } catch (IOException e) { throw logger.logExceptionAsError(new UncheckedIOException(e)); } } private Flux<FileRange> sliceFileRange(FileRange fileRange) { long offset = fileRange == null ? 0L : fileRange.start(); Mono<Long> end; if (fileRange != null) { end = Mono.just(fileRange.end()); } else { end = Mono.empty(); } end = end.switchIfEmpty(getProperties().map(rb -> rb.contentLength())); return end .map(e -> { List<FileRange> chunks = new ArrayList<>(); for (long pos = offset; pos < e; pos += FILE_DEFAULT_BLOCK_SIZE) { long count = FILE_DEFAULT_BLOCK_SIZE; if (pos + count > e) { count = e - pos; } chunks.add(new FileRange(pos, pos + count - 1)); } return chunks; }) .flatMapMany(Flux::fromIterable); } /** * Downloads a file from the system, including its metadata and properties * * <p><strong>Code Samples</strong></p> * * <p>Download the file with its metadata and properties. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.downloadWithProperties} * * <p>For more information, see the * <a href="https: * * @return The {@link FileDownloadInfo file download Info} */ public Mono<FileDownloadInfo> downloadWithProperties() { return downloadWithPropertiesWithResponse(null, null).flatMap(FluxUtil::toMono); } /** * Downloads a file from the system, including its metadata and properties * * <p><strong>Code Samples</strong></p> * * <p>Download the file from 1024 to 2048 bytes with its metadata and properties and without the contentMD5. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.downloadWithPropertiesWithResponse * * <p>For more information, see the * <a href="https: * * @param range Optional byte range which returns file data only from the specified range. * @param rangeGetContentMD5 Optional boolean which the service returns the MD5 hash for the range when it sets * to true, as long as the range is less than or equal to 4 MB in size. * @return A response containing the {@link FileDownloadInfo file download Info} with headers and response status code */ public Mono<Response<FileDownloadInfo>> downloadWithPropertiesWithResponse(FileRange range, Boolean rangeGetContentMD5) { return withContext(context -> downloadWithPropertiesWithResponse(range, rangeGetContentMD5, context)); } Mono<Response<FileDownloadInfo>> downloadWithPropertiesWithResponse(FileRange range, Boolean rangeGetContentMD5, Context context) { String rangeString = range == null ? null : range.toString(); return azureFileStorageClient.files().downloadWithRestResponseAsync(shareName, filePath, null, rangeString, rangeGetContentMD5, context) .map(this::downloadWithPropertiesResponse); } /** * Deletes the file associate with the client. * * <p><strong>Code Samples</strong></p> * * <p>Delete the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.delete} * * <p>For more information, see the * <a href="https: * @return An empty response * @throws StorageErrorException If the directory doesn't exist or the file doesn't exist. */ public Mono<Void> delete() { return deleteWithResponse(null).flatMap(FluxUtil::toMono); } /** * Deletes the file associate with the client. * * <p><strong>Code Samples</strong></p> * * <p>Delete the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.deleteWithResponse} * * <p>For more information, see the * <a href="https: * * @return A response that only contains headers and response status code * @throws StorageErrorException If the directory doesn't exist or the file doesn't exist. */ public Mono<VoidResponse> deleteWithResponse() { return withContext(context -> deleteWithResponse(context)); } Mono<VoidResponse> deleteWithResponse(Context context) { return azureFileStorageClient.files().deleteWithRestResponseAsync(shareName, filePath, context) .map(VoidResponse::new); } /** * Retrieves the properties of the storage account's file. The properties includes file metadata, last modified * date, is server encrypted, and eTag. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve file properties</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.getProperties} * * <p>For more information, see the * <a href="https: * * @return {@link FileProperties Storage file properties} */ public Mono<FileProperties> getProperties() { return getPropertiesWithResponse().flatMap(FluxUtil::toMono); } /** * Retrieves the properties of the storage account's file. * The properties includes file metadata, last modified date, is server encrypted, and eTag. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve file properties</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.getPropertiesWithResponse} * * <p>For more information, see the * <a href="https: * * @return A response containing the {@link FileProperties storage file properties} and response status code */ public Mono<Response<FileProperties>> getPropertiesWithResponse() { return withContext(context -> getPropertiesWithResponse(context)); } Mono<Response<FileProperties>> getPropertiesWithResponse(Context context) { return azureFileStorageClient.files().getPropertiesWithRestResponseAsync(shareName, filePath, snapshot, null, context) .map(this::getPropertiesResponse); } /** * Sets the user-defined httpHeaders to associate to the file. * * <p>If {@code null} is passed for the httpHeaders it will clear the httpHeaders associated to the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the httpHeaders of contentType of "text/plain"</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setHttpHeaders * * <p>Clear the metadata of the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setHttpHeaders * * <p>For more information, see the * <a href="https: * * @param newFileSize New file size of the file * @param httpHeaders Resizes a file to the specified size. If the specified byte value is less than the current size of the file, then all ranges above the specified byte value are cleared. * @return The {@link FileInfo file info} * @throws IllegalArgumentException thrown if parameters fail the validation. */ public Mono<FileInfo> setHttpHeaders(long newFileSize, FileHTTPHeaders httpHeaders) { return setHttpHeadersWithResponse(newFileSize, httpHeaders).flatMap(FluxUtil::toMono); } /** * Sets the user-defined httpHeaders to associate to the file. * * <p>If {@code null} is passed for the httpHeaders it will clear the httpHeaders associated to the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the httpHeaders of contentType of "text/plain"</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setHttpHeadersWithResponse * * <p>Clear the metadata of the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setHttpHeadersWithResponse * * <p>For more information, see the * <a href="https: * * @param newFileSize New file size of the file * @param httpHeaders Resizes a file to the specified size. If the specified byte value is less than the current size of the file, then all ranges above the specified byte value are cleared. * @return Response containing the {@link FileInfo file info} and response status code * @throws IllegalArgumentException thrown if parameters fail the validation. */ public Mono<Response<FileInfo>> setHttpHeadersWithResponse(long newFileSize, FileHTTPHeaders httpHeaders) { return withContext(context -> setHttpHeadersWithResponse(newFileSize, httpHeaders, context)); } Mono<Response<FileInfo>> setHttpHeadersWithResponse(long newFileSize, FileHTTPHeaders httpHeaders, Context context) { String fileAttributes = "None"; String filePermission = "inherit"; String fileCreationTime = "preserve"; String fileLastWriteTime = "preserve"; return azureFileStorageClient.files().setHTTPHeadersWithRestResponseAsync(shareName, filePath, fileAttributes, fileCreationTime, fileLastWriteTime, null, newFileSize, filePermission, null, httpHeaders, context) .map(this::setHttpHeadersResponse); } /** * Sets the user-defined metadata to associate to the file. * * <p>If {@code null} is passed for the metadata it will clear the metadata associated to the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the metadata to "file:updatedMetadata"</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setMetadata * * <p>Clear the metadata of the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setMetadataWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata Options.Metadata to set on the file, if null is passed the metadata for the file is cleared * @return {@link FileMetadataInfo file meta info} * @throws StorageErrorException If the file doesn't exist or the metadata contains invalid keys */ public Mono<FileMetadataInfo> setMetadata(Map<String, String> metadata) { return setMetadataWithResponse(metadata).flatMap(FluxUtil::toMono); } /** * Sets the user-defined metadata to associate to the file. * * <p>If {@code null} is passed for the metadata it will clear the metadata associated to the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the metadata to "file:updatedMetadata"</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setMetadataWithResponse * * <p>Clear the metadata of the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setMetadataWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata Options.Metadata to set on the file, if null is passed the metadata for the file is cleared * @return A response containing the {@link FileMetadataInfo file meta info} and status code * @throws StorageErrorException If the file doesn't exist or the metadata contains invalid keys */ public Mono<Response<FileMetadataInfo>> setMetadataWithResponse(Map<String, String> metadata) { return withContext(context -> setMetadataWithResponse(metadata, context)); } Mono<Response<FileMetadataInfo>> setMetadataWithResponse(Map<String, String> metadata, Context context) { return azureFileStorageClient.files().setMetadataWithRestResponseAsync(shareName, filePath, null, metadata, context) .map(this::setMetadataResponse); } /** * Uploads a range of bytes to the beginning of a file in storage file service. Upload operations performs an * in-place write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Upload data "default" to the file in Storage File Service. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.upload * * <p>For more information, see the * <a href="https: * * @param data The data which will upload to the storage file. * @param length Specifies the number of bytes being transmitted in the request body. * @return A response that only contains headers and response status code */ public Mono<FileUploadInfo> upload(Flux<ByteBuffer> data, long length) { return uploadWithResponse(data, length).flatMap(FluxUtil::toMono); } /** * Uploads a range of bytes to the beginning of a file in storage file service. Upload operations performs an in-place write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Upload "default" to the file. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.uploadWithResponse * * <p>For more information, see the * <a href="https: * * @param data The data which will upload to the storage file. * @param length Specifies the number of bytes being transmitted in the request body. When the FileRangeWriteType is set to clear, the value of this header must be set to zero.. * @return A response containing the {@link FileUploadInfo file upload info} with headers and response status code * @throws StorageErrorException If you attempt to upload a range that is larger than 4 MB, the service returns status code 413 (Request Entity Too Large) */ public Mono<Response<FileUploadInfo>> uploadWithResponse(Flux<ByteBuffer> data, long length) { return withContext(context -> uploadWithResponse(data, length, context)); } Mono<Response<FileUploadInfo>> uploadWithResponse(Flux<ByteBuffer> data, long length, Context context) { FileRange range = new FileRange(0, length - 1); return azureFileStorageClient.files().uploadRangeWithRestResponseAsync(shareName, filePath, range.toString(), FileRangeWriteType.UPDATE, length, data, null, null, context) .map(this::uploadResponse); } /** * Uploads a range of bytes to specific of a file in storage file service. Upload operations performs an in-place * write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Upload data "default" starting from 1024 bytes. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.upload * * <p>For more information, see the * <a href="https: * * @param data The data which will upload to the storage file. * @param length Specifies the number of bytes being transmitted in the request body. * @param offset Optional starting point of the upload range. It will start from the beginning if it is {@code * null} * @return The {@link FileUploadInfo file upload info} * @throws StorageErrorException If you attempt to upload a range that is larger than 4 MB, the service returns status code 413 (Request Entity Too Large) */ public Mono<FileUploadInfo> upload(Flux<ByteBuffer> data, long length, long offset) { return uploadWithResponse(data, length, offset).flatMap(FluxUtil::toMono); } /** * Uploads a range of bytes to specific of a file in storage file service. Upload operations performs an in-place write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Upload the file from 1024 to 2048 bytes with its metadata and properties and without the contentMD5. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.uploadWithResponse * * <p>For more information, see the * <a href="https: * * @param data The data which will upload to the storage file. * @param offset Optional starting point of the upload range. It will start from the beginning if it is {@code null} * @param length Specifies the number of bytes being transmitted in the request body. When the FileRangeWriteType is set to clear, the value of this header must be set to zero. * @return A response containing the {@link FileUploadInfo file upload info} with headers and response status code * @throws StorageErrorException If you attempt to upload a range that is larger than 4 MB, the service returns status code 413 (Request Entity Too Large) */ public Mono<Response<FileUploadInfo>> uploadWithResponse(Flux<ByteBuffer> data, long length, long offset) { return withContext(context -> uploadWithResponse(data, length, offset, context)); } Mono<Response<FileUploadInfo>> uploadWithResponse(Flux<ByteBuffer> data, long length, long offset, Context context) { FileRange range = * * <p>For more information, see the * <a href="https: * * @param length Specifies the number of bytes being cleared. * @return The {@link FileUploadInfo file upload info} */ public Mono<FileUploadInfo> clearRange(long length) { return clearRangeWithResponse(length, 0).flatMap(FluxUtil::toMono); } /** * Clear a range of bytes to specific of a file in storage file service. Clear operations performs an in-place * write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Clear the range starting from 1024 with length of 1024. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.clearRange * * <p>For more information, see the * <a href="https: * * @param length Specifies the number of bytes being cleared in the request body. * @param offset Optional starting point of the upload range. It will start from the beginning if it is {@code * null} * @return A response of {@link FileUploadInfo file upload info} that only contains headers and response status code */ public Mono<Response<FileUploadInfo>> clearRangeWithResponse(long length, long offset) { return withContext(context -> clearRangeWithResponse(length, offset, context)); } Mono<Response<FileUploadInfo>> clearRangeWithResponse(long length, long offset, Context context) { FileRange range = new FileRange(offset, offset + length - 1); return azureFileStorageClient.files().uploadRangeWithRestResponseAsync(shareName, filePath, range.toString(), FileRangeWriteType.CLEAR, 0L, null, null, null, context) .map(this::uploadResponse); } /** * Uploads file to storage file service. * * <p><strong>Code Samples</strong></p> * * <p> Upload the file from the source file path. </p> * * (@codesnippet com.azure.storage.file.fileAsyncClient.uploadFromFile
`context` is being ignored here. Need to pass it down to the service call.
Mono<Response<FileUploadInfo>> clearRangeWithResponse(long length, long offset, Context context) { FileRange range = new FileRange(offset, offset + length - 1); return azureFileStorageClient.files().uploadRangeWithRestResponseAsync(shareName, filePath, range.toString(), FileRangeWriteType.CLEAR, 0L, null, null, null, Context.NONE) .map(this::uploadResponse); }
return azureFileStorageClient.files().uploadRangeWithRestResponseAsync(shareName, filePath, range.toString(), FileRangeWriteType.CLEAR, 0L, null, null, null, Context.NONE)
new FileRange(offset, offset + length - 1); return azureFileStorageClient.files().uploadRangeWithRestResponseAsync(shareName, filePath, range.toString(), FileRangeWriteType.UPDATE, length, data, null, null, context) .map(this::uploadResponse); } /** * Clear a range of bytes to specific of a file in storage file service. Clear operations performs an in-place * write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Clears the first 1024 bytes. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.clearRange
class FileAsyncClient { private final ClientLogger logger = new ClientLogger(FileAsyncClient.class); private static final long FILE_DEFAULT_BLOCK_SIZE = 4 * 1024 * 1024L; private static final long DOWNLOAD_UPLOAD_CHUNK_TIMEOUT = 300; private final AzureFileStorageImpl azureFileStorageClient; private final String shareName; private final String filePath; private final String snapshot; /** * Creates a FileAsyncClient that sends requests to the storage file at {@link AzureFileStorageImpl * endpoint}. Each service call goes through the {@link HttpPipeline pipeline} in the {@code client}. * * @param azureFileStorageClient Client that interacts with the service interfaces * @param shareName Name of the share * @param filePath Path to the file * @param snapshot The snapshot of the share */ FileAsyncClient(AzureFileStorageImpl azureFileStorageClient, String shareName, String filePath, String snapshot) { Objects.requireNonNull(shareName); Objects.requireNonNull(filePath); this.shareName = shareName; this.filePath = filePath; this.snapshot = snapshot; this.azureFileStorageClient = azureFileStorageClient; } /** * Creates a FileAsyncClient that sends requests to the storage account at {@code endpoint}. Each service call goes * through the {@code httpPipeline}. * * @param endpoint URL for the Storage File service * @param httpPipeline HttpPipeline that HTTP requests and response flow through * @param shareName Name of the share * @param filePath Path to the file * @param snapshot Optional snapshot of the share */ FileAsyncClient(URL endpoint, HttpPipeline httpPipeline, String shareName, String filePath, String snapshot) { Objects.requireNonNull(shareName); Objects.requireNonNull(filePath); this.shareName = shareName; this.filePath = filePath; this.snapshot = snapshot; this.azureFileStorageClient = new AzureFileStorageBuilder().pipeline(httpPipeline) .url(endpoint.toString()) .build(); } /** * Get the url of the storage file client. * * @return the URL of the storage file client * @throws RuntimeException If the file is using a malformed URL. */ public URL getFileUrl() { try { return new URL(azureFileStorageClient.getUrl()); } catch (MalformedURLException e) { throw logger.logExceptionAsError(new RuntimeException(String.format("Invalid URL on %s: %s" + getClass().getSimpleName(), azureFileStorageClient.getUrl()), e)); } } /** * Creates a file in the storage account and returns a response of {@link FileInfo} to interact with it. * * <p><strong>Code Samples</strong></p> * * <p>Create the file with size 1KB.</p> * * {@codesnippet com.azure.storage.file.fileClient.create} * * <p>For more information, see the * <a href="https: * * @param maxSize The maximum size in bytes for the file, up to 1 TiB. * @return A response containing the file info and the status of creating the file. * @throws StorageErrorException If the file has already existed, the parent directory does not exist or fileName is * an invalid resource name. */ public Mono<FileInfo> create(long maxSize) { return createWithResponse(maxSize, null, null).flatMap(FluxUtil::toMono); } /** * Creates a file in the storage account and returns a response of FileInfo to interact with it. * * <p><strong>Code Samples</strong></p> * * <p>Create the file with length of 1024 bytes, some headers and metadata.</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.createWithResponse * * <p>For more information, see the * <a href="https: * * @param maxSize The maximum size in bytes for the file, up to 1 TiB. * @param httpHeaders Additional parameters for the operation. * @param metadata Optional name-value pairs associated with the file as metadata. Metadata names must adhere to the naming rules. * @return A response containing the {@link FileInfo file info} and the status of creating the file. * @throws StorageErrorException If the directory has already existed, the parent directory does not exist or directory is an invalid resource name. */ public Mono<Response<FileInfo>> createWithResponse(long maxSize, FileHTTPHeaders httpHeaders, Map<String, String> metadata) { return withContext(context -> createWithResponse(maxSize, httpHeaders, metadata, context)); } Mono<Response<FileInfo>> createWithResponse(long maxSize, FileHTTPHeaders httpHeaders, Map<String, String> metadata, Context context) { String fileAttributes = "None"; String filePermission = "inherit"; String fileCreationTime = "now"; String fileLastWriteTime = "now"; return azureFileStorageClient.files().createWithRestResponseAsync(shareName, filePath, maxSize, fileAttributes, fileCreationTime, fileLastWriteTime, null, metadata, filePermission, null, httpHeaders, context) .map(this::createFileInfoResponse); } /** * Copies a blob or file to a destination file within the storage account. * * <p><strong>Code Samples</strong></p> * * <p>Copy file from source url to the {@code filePath} </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.startCopy * * <p>For more information, see the * <a href="https: * * @param sourceUrl Specifies the URL of the source file or blob, up to 2 KB in length. * @param metadata Optional name-value pairs associated with the file as metadata. Metadata names must adhere to the naming rules. * @return The {@link FileCopyInfo file copy info}. * @see <a href="https: */ public Mono<FileCopyInfo> startCopy(String sourceUrl, Map<String, String> metadata) { return startCopyWithResponse(sourceUrl, metadata).flatMap(FluxUtil::toMono); } /** * Copies a blob or file to a destination file within the storage account. * * <p><strong>Code Samples</strong></p> * * <p>Copy file from source url to the {@code filePath} </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.startCopyWithResponse * * <p>For more information, see the * <a href="https: * * @param sourceUrl Specifies the URL of the source file or blob, up to 2 KB in length. * @param metadata Optional name-value pairs associated with the file as metadata. Metadata names must adhere to the naming rules. * @return A response containing the {@link FileCopyInfo file copy info} and the status of copying the file. * @see <a href="https: */ public Mono<Response<FileCopyInfo>> startCopyWithResponse(String sourceUrl, Map<String, String> metadata) { return withContext(context -> startCopyWithResponse(sourceUrl, metadata, context)); } Mono<Response<FileCopyInfo>> startCopyWithResponse(String sourceUrl, Map<String, String> metadata, Context context) { return azureFileStorageClient.files().startCopyWithRestResponseAsync(shareName, filePath, sourceUrl, null, metadata, context) .map(this::startCopyResponse); } /** * Aborts a pending Copy File operation, and leaves a destination file with zero length and full metadata. * * <p><strong>Code Samples</strong></p> * * <p>Abort copy file from copy id("someCopyId") </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.abortCopy * * <p>For more information, see the * <a href="https: * * @param copyId Specifies the copy id which has copying pending status associate with it. * @return An empty response. */ public Mono<Void> abortCopy(String copyId) { return abortCopyWithResponse(copyId).flatMap(FluxUtil::toMono); } /** * Aborts a pending Copy File operation, and leaves a destination file with zero length and full metadata. * * <p><strong>Code Samples</strong></p> * * <p>Abort copy file from copy id("someCopyId") </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.abortCopyWithResponse * * <p>For more information, see the * <a href="https: * * @param copyId Specifies the copy id which has copying pending status associate with it. * @return A response containing the status of aborting copy the file. */ public Mono<VoidResponse> abortCopyWithResponse(String copyId) { return withContext(context -> abortCopyWithResponse(copyId, context)); } Mono<VoidResponse> abortCopyWithResponse(String copyId, Context context) { return azureFileStorageClient.files().abortCopyWithRestResponseAsync(shareName, filePath, copyId, context) .map(VoidResponse::new); } /** * Downloads a file from the system, including its metadata and properties * * <p><strong>Code Samples</strong></p> * * <p>Download the file to current folder. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.downloadToFile * * <p>For more information, see the * <a href="https: * * @param downloadFilePath The path where store the downloaded file * @return An empty response. */ public Mono<Void> downloadToFile(String downloadFilePath) { return downloadToFile(downloadFilePath, null); } /** * Downloads a file from the system, including its metadata and properties * * <p><strong>Code Samples</strong></p> * * <p>Download the file from 1024 to 2048 bytes to current folder. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.downloadToFile * * <p>For more information, see the * <a href="https: * * @param downloadFilePath The path where store the downloaded file * @param range Optional byte range which returns file data only from the specified range. * @return An empty response. */ public Mono<Void> downloadToFile(String downloadFilePath, FileRange range) { AsynchronousFileChannel channel = channelSetup(downloadFilePath); return sliceFileRange(range) .flatMap(chunk -> downloadWithPropertiesWithResponse(chunk, false) .map(dar -> dar.value().body()) .subscribeOn(Schedulers.elastic()) .flatMap(fbb -> FluxUtil.writeFile(fbb, channel, chunk.start() - (range == null ? 0 : range.start())) .subscribeOn(Schedulers.elastic()) .timeout(Duration.ofSeconds(DOWNLOAD_UPLOAD_CHUNK_TIMEOUT)) .retry(3, throwable -> throwable instanceof IOException || throwable instanceof TimeoutException))) .then() .doOnTerminate(() -> channelCleanUp(channel)); } private AsynchronousFileChannel channelSetup(String filePath) { try { return AsynchronousFileChannel.open(Paths.get(filePath), StandardOpenOption.READ, StandardOpenOption.WRITE); } catch (IOException e) { throw logger.logExceptionAsError(new UncheckedIOException(e)); } } private void channelCleanUp(AsynchronousFileChannel channel) { try { channel.close(); } catch (IOException e) { throw logger.logExceptionAsError(new UncheckedIOException(e)); } } private Flux<FileRange> sliceFileRange(FileRange fileRange) { long offset = fileRange == null ? 0L : fileRange.start(); Mono<Long> end; if (fileRange != null) { end = Mono.just(fileRange.end()); } else { end = Mono.empty(); } end = end.switchIfEmpty(getProperties().map(rb -> rb.contentLength())); return end .map(e -> { List<FileRange> chunks = new ArrayList<>(); for (long pos = offset; pos < e; pos += FILE_DEFAULT_BLOCK_SIZE) { long count = FILE_DEFAULT_BLOCK_SIZE; if (pos + count > e) { count = e - pos; } chunks.add(new FileRange(pos, pos + count - 1)); } return chunks; }) .flatMapMany(Flux::fromIterable); } /** * Downloads a file from the system, including its metadata and properties * * <p><strong>Code Samples</strong></p> * * <p>Download the file with its metadata and properties. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.downloadWithProperties} * * <p>For more information, see the * <a href="https: * * @return The {@link FileDownloadInfo file download Info} */ public Mono<FileDownloadInfo> downloadWithProperties() { return downloadWithPropertiesWithResponse(null, null).flatMap(FluxUtil::toMono); } /** * Downloads a file from the system, including its metadata and properties * * <p><strong>Code Samples</strong></p> * * <p>Download the file from 1024 to 2048 bytes with its metadata and properties and without the contentMD5. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.downloadWithPropertiesWithResponse * * <p>For more information, see the * <a href="https: * * @param range Optional byte range which returns file data only from the specified range. * @param rangeGetContentMD5 Optional boolean which the service returns the MD5 hash for the range when it sets * to true, as long as the range is less than or equal to 4 MB in size. * @return A response containing the {@link FileDownloadInfo file download Info} with headers and response status code */ public Mono<Response<FileDownloadInfo>> downloadWithPropertiesWithResponse(FileRange range, Boolean rangeGetContentMD5) { return withContext(context -> downloadWithPropertiesWithResponse(range, rangeGetContentMD5, context)); } Mono<Response<FileDownloadInfo>> downloadWithPropertiesWithResponse(FileRange range, Boolean rangeGetContentMD5, Context context) { String rangeString = range == null ? null : range.toString(); return azureFileStorageClient.files().downloadWithRestResponseAsync(shareName, filePath, null, rangeString, rangeGetContentMD5, context) .map(this::downloadWithPropertiesResponse); } /** * Deletes the file associate with the client. * * <p><strong>Code Samples</strong></p> * * <p>Delete the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.delete} * * <p>For more information, see the * <a href="https: * @return An empty response * @throws StorageErrorException If the directory doesn't exist or the file doesn't exist. */ public Mono<Void> delete() { return deleteWithResponse(null).flatMap(FluxUtil::toMono); } /** * Deletes the file associate with the client. * * <p><strong>Code Samples</strong></p> * * <p>Delete the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.deleteWithResponse} * * <p>For more information, see the * <a href="https: * * @return A response that only contains headers and response status code * @throws StorageErrorException If the directory doesn't exist or the file doesn't exist. */ public Mono<VoidResponse> deleteWithResponse() { return withContext(context -> deleteWithResponse(context)); } Mono<VoidResponse> deleteWithResponse(Context context) { return azureFileStorageClient.files().deleteWithRestResponseAsync(shareName, filePath, context) .map(VoidResponse::new); } /** * Retrieves the properties of the storage account's file. The properties includes file metadata, last modified * date, is server encrypted, and eTag. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve file properties</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.getProperties} * * <p>For more information, see the * <a href="https: * * @return {@link FileProperties Storage file properties} */ public Mono<FileProperties> getProperties() { return getPropertiesWithResponse().flatMap(FluxUtil::toMono); } /** * Retrieves the properties of the storage account's file. * The properties includes file metadata, last modified date, is server encrypted, and eTag. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve file properties</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.getPropertiesWithResponse} * * <p>For more information, see the * <a href="https: * * @return A response containing the {@link FileProperties storage file properties} and response status code */ public Mono<Response<FileProperties>> getPropertiesWithResponse() { return withContext(context -> getPropertiesWithResponse(context)); } Mono<Response<FileProperties>> getPropertiesWithResponse(Context context) { return azureFileStorageClient.files().getPropertiesWithRestResponseAsync(shareName, filePath, snapshot, null, context) .map(this::getPropertiesResponse); } /** * Sets the user-defined httpHeaders to associate to the file. * * <p>If {@code null} is passed for the httpHeaders it will clear the httpHeaders associated to the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the httpHeaders of contentType of "text/plain"</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setHttpHeaders * * <p>Clear the metadata of the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setHttpHeaders * * <p>For more information, see the * <a href="https: * * @param newFileSize New file size of the file * @param httpHeaders Resizes a file to the specified size. If the specified byte value is less than the current size of the file, then all ranges above the specified byte value are cleared. * @return The {@link FileInfo file info} * @throws IllegalArgumentException thrown if parameters fail the validation. */ public Mono<FileInfo> setHttpHeaders(long newFileSize, FileHTTPHeaders httpHeaders) { return setHttpHeadersWithResponse(newFileSize, httpHeaders).flatMap(FluxUtil::toMono); } /** * Sets the user-defined httpHeaders to associate to the file. * * <p>If {@code null} is passed for the httpHeaders it will clear the httpHeaders associated to the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the httpHeaders of contentType of "text/plain"</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setHttpHeadersWithResponse * * <p>Clear the metadata of the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setHttpHeadersWithResponse * * <p>For more information, see the * <a href="https: * * @param newFileSize New file size of the file * @param httpHeaders Resizes a file to the specified size. If the specified byte value is less than the current size of the file, then all ranges above the specified byte value are cleared. * @return Response containing the {@link FileInfo file info} and response status code * @throws IllegalArgumentException thrown if parameters fail the validation. */ public Mono<Response<FileInfo>> setHttpHeadersWithResponse(long newFileSize, FileHTTPHeaders httpHeaders) { return withContext(context -> setHttpHeadersWithResponse(newFileSize, httpHeaders, context)); } Mono<Response<FileInfo>> setHttpHeadersWithResponse(long newFileSize, FileHTTPHeaders httpHeaders, Context context) { String fileAttributes = "None"; String filePermission = "inherit"; String fileCreationTime = "preserve"; String fileLastWriteTime = "preserve"; return azureFileStorageClient.files().setHTTPHeadersWithRestResponseAsync(shareName, filePath, fileAttributes, fileCreationTime, fileLastWriteTime, null, newFileSize, filePermission, null, httpHeaders, context) .map(this::setHttpHeadersResponse); } /** * Sets the user-defined metadata to associate to the file. * * <p>If {@code null} is passed for the metadata it will clear the metadata associated to the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the metadata to "file:updatedMetadata"</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setMetadata * * <p>Clear the metadata of the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setMetadataWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata Options.Metadata to set on the file, if null is passed the metadata for the file is cleared * @return {@link FileMetadataInfo file meta info} * @throws StorageErrorException If the file doesn't exist or the metadata contains invalid keys */ public Mono<FileMetadataInfo> setMetadata(Map<String, String> metadata) { return setMetadataWithResponse(metadata).flatMap(FluxUtil::toMono); } /** * Sets the user-defined metadata to associate to the file. * * <p>If {@code null} is passed for the metadata it will clear the metadata associated to the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the metadata to "file:updatedMetadata"</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setMetadataWithResponse * * <p>Clear the metadata of the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setMetadataWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata Options.Metadata to set on the file, if null is passed the metadata for the file is cleared * @return A response containing the {@link FileMetadataInfo file meta info} and status code * @throws StorageErrorException If the file doesn't exist or the metadata contains invalid keys */ public Mono<Response<FileMetadataInfo>> setMetadataWithResponse(Map<String, String> metadata) { return withContext(context -> setMetadataWithResponse(metadata, context)); } Mono<Response<FileMetadataInfo>> setMetadataWithResponse(Map<String, String> metadata, Context context) { return azureFileStorageClient.files().setMetadataWithRestResponseAsync(shareName, filePath, null, metadata, context) .map(this::setMetadataResponse); } /** * Uploads a range of bytes to the beginning of a file in storage file service. Upload operations performs an * in-place write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Upload data "default" to the file in Storage File Service. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.upload * * <p>For more information, see the * <a href="https: * * @param data The data which will upload to the storage file. * @param length Specifies the number of bytes being transmitted in the request body. * @return A response that only contains headers and response status code */ public Mono<FileUploadInfo> upload(Flux<ByteBuffer> data, long length) { return uploadWithResponse(data, length).flatMap(FluxUtil::toMono); } /** * Uploads a range of bytes to the beginning of a file in storage file service. Upload operations performs an in-place write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Upload "default" to the file. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.uploadWithResponse * * <p>For more information, see the * <a href="https: * * @param data The data which will upload to the storage file. * @param length Specifies the number of bytes being transmitted in the request body. When the FileRangeWriteType is set to clear, the value of this header must be set to zero.. * @return A response containing the {@link FileUploadInfo file upload info} with headers and response status code * @throws StorageErrorException If you attempt to upload a range that is larger than 4 MB, the service returns status code 413 (Request Entity Too Large) */ public Mono<Response<FileUploadInfo>> uploadWithResponse(Flux<ByteBuffer> data, long length) { return withContext(context -> uploadWithResponse(data, length, context)); } Mono<Response<FileUploadInfo>> uploadWithResponse(Flux<ByteBuffer> data, long length, Context context) { FileRange range = new FileRange(0, length - 1); return azureFileStorageClient.files().uploadRangeWithRestResponseAsync(shareName, filePath, range.toString(), FileRangeWriteType.UPDATE, length, data, null, null, context) .map(this::uploadResponse); } /** * Uploads a range of bytes to specific of a file in storage file service. Upload operations performs an in-place * write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Upload data "default" starting from 1024 bytes. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.upload * * <p>For more information, see the * <a href="https: * * @param data The data which will upload to the storage file. * @param length Specifies the number of bytes being transmitted in the request body. * @param offset Optional starting point of the upload range. It will start from the beginning if it is {@code * null} * @return The {@link FileUploadInfo file upload info} * @throws StorageErrorException If you attempt to upload a range that is larger than 4 MB, the service returns status code 413 (Request Entity Too Large) */ public Mono<FileUploadInfo> upload(Flux<ByteBuffer> data, long length, long offset) { return uploadWithResponse(data, length, offset).flatMap(FluxUtil::toMono); } /** * Uploads a range of bytes to specific of a file in storage file service. Upload operations performs an in-place write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Upload the file from 1024 to 2048 bytes with its metadata and properties and without the contentMD5. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.uploadWithResponse * * <p>For more information, see the * <a href="https: * * @param data The data which will upload to the storage file. * @param offset Optional starting point of the upload range. It will start from the beginning if it is {@code null} * @param length Specifies the number of bytes being transmitted in the request body. When the FileRangeWriteType is set to clear, the value of this header must be set to zero. * @return A response containing the {@link FileUploadInfo file upload info} with headers and response status code * @throws StorageErrorException If you attempt to upload a range that is larger than 4 MB, the service returns status code 413 (Request Entity Too Large) */ public Mono<Response<FileUploadInfo>> uploadWithResponse(Flux<ByteBuffer> data, long length, long offset) { return withContext(context -> uploadWithResponse(data, length, offset, context)); } Mono<Response<FileUploadInfo>> uploadWithResponse(Flux<ByteBuffer> data, long length, long offset, Context context) { FileRange range = * * <p>For more information, see the * <a href="https: * * @param length Specifies the number of bytes being cleared. * @return The {@link FileUploadInfo file upload info} */ public Mono<FileUploadInfo> clearRange(long length) { return clearRangeWithResponse(length, 0).flatMap(FluxUtil::toMono); } /** * Clear a range of bytes to specific of a file in storage file service. Clear operations performs an in-place * write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Clear the range starting from 1024 with length of 1024. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.clearRange * * <p>For more information, see the * <a href="https: * * @param length Specifies the number of bytes being cleared in the request body. * @param offset Optional starting point of the upload range. It will start from the beginning if it is {@code * null} * @return A response of {@link FileUploadInfo file upload info} that only contains headers and response status code */ public Mono<Response<FileUploadInfo>> clearRangeWithResponse(long length, long offset) { return withContext(context -> clearRangeWithResponse(length, offset, context)); } Mono<Response<FileUploadInfo>> clearRangeWithResponse(long length, long offset, Context context) { FileRange range = new FileRange(offset, offset + length - 1); return azureFileStorageClient.files().uploadRangeWithRestResponseAsync(shareName, filePath, range.toString(), FileRangeWriteType.CLEAR, 0L, null, null, null, Context.NONE) .map(this::uploadResponse); } /** * Uploads file to storage file service. * * <p><strong>Code Samples</strong></p> * * <p> Upload the file from the source file path. </p> * * (@codesnippet com.azure.storage.file.fileAsyncClient.uploadFromFile
class FileAsyncClient { private final ClientLogger logger = new ClientLogger(FileAsyncClient.class); private static final long FILE_DEFAULT_BLOCK_SIZE = 4 * 1024 * 1024L; private static final long DOWNLOAD_UPLOAD_CHUNK_TIMEOUT = 300; private final AzureFileStorageImpl azureFileStorageClient; private final String shareName; private final String filePath; private final String snapshot; /** * Creates a FileAsyncClient that sends requests to the storage file at {@link AzureFileStorageImpl * endpoint}. Each service call goes through the {@link HttpPipeline pipeline} in the {@code client}. * * @param azureFileStorageClient Client that interacts with the service interfaces * @param shareName Name of the share * @param filePath Path to the file * @param snapshot The snapshot of the share */ FileAsyncClient(AzureFileStorageImpl azureFileStorageClient, String shareName, String filePath, String snapshot) { Objects.requireNonNull(shareName); Objects.requireNonNull(filePath); this.shareName = shareName; this.filePath = filePath; this.snapshot = snapshot; this.azureFileStorageClient = azureFileStorageClient; } /** * Creates a FileAsyncClient that sends requests to the storage account at {@code endpoint}. Each service call goes * through the {@code httpPipeline}. * * @param endpoint URL for the Storage File service * @param httpPipeline HttpPipeline that HTTP requests and response flow through * @param shareName Name of the share * @param filePath Path to the file * @param snapshot Optional snapshot of the share */ FileAsyncClient(URL endpoint, HttpPipeline httpPipeline, String shareName, String filePath, String snapshot) { Objects.requireNonNull(shareName); Objects.requireNonNull(filePath); this.shareName = shareName; this.filePath = filePath; this.snapshot = snapshot; this.azureFileStorageClient = new AzureFileStorageBuilder().pipeline(httpPipeline) .url(endpoint.toString()) .build(); } /** * Get the url of the storage file client. * * @return the URL of the storage file client * @throws RuntimeException If the file is using a malformed URL. */ public URL getFileUrl() { try { return new URL(azureFileStorageClient.getUrl()); } catch (MalformedURLException e) { throw logger.logExceptionAsError(new RuntimeException(String.format("Invalid URL on %s: %s" + getClass().getSimpleName(), azureFileStorageClient.getUrl()), e)); } } /** * Creates a file in the storage account and returns a response of {@link FileInfo} to interact with it. * * <p><strong>Code Samples</strong></p> * * <p>Create the file with size 1KB.</p> * * {@codesnippet com.azure.storage.file.fileClient.create} * * <p>For more information, see the * <a href="https: * * @param maxSize The maximum size in bytes for the file, up to 1 TiB. * @return A response containing the file info and the status of creating the file. * @throws StorageErrorException If the file has already existed, the parent directory does not exist or fileName is * an invalid resource name. */ public Mono<FileInfo> create(long maxSize) { return createWithResponse(maxSize, null, null).flatMap(FluxUtil::toMono); } /** * Creates a file in the storage account and returns a response of FileInfo to interact with it. * * <p><strong>Code Samples</strong></p> * * <p>Create the file with length of 1024 bytes, some headers and metadata.</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.createWithResponse * * <p>For more information, see the * <a href="https: * * @param maxSize The maximum size in bytes for the file, up to 1 TiB. * @param httpHeaders Additional parameters for the operation. * @param metadata Optional name-value pairs associated with the file as metadata. Metadata names must adhere to the naming rules. * @return A response containing the {@link FileInfo file info} and the status of creating the file. * @throws StorageErrorException If the directory has already existed, the parent directory does not exist or directory is an invalid resource name. */ public Mono<Response<FileInfo>> createWithResponse(long maxSize, FileHTTPHeaders httpHeaders, Map<String, String> metadata) { return withContext(context -> createWithResponse(maxSize, httpHeaders, metadata, context)); } Mono<Response<FileInfo>> createWithResponse(long maxSize, FileHTTPHeaders httpHeaders, Map<String, String> metadata, Context context) { String fileAttributes = "None"; String filePermission = "inherit"; String fileCreationTime = "now"; String fileLastWriteTime = "now"; return azureFileStorageClient.files().createWithRestResponseAsync(shareName, filePath, maxSize, fileAttributes, fileCreationTime, fileLastWriteTime, null, metadata, filePermission, null, httpHeaders, context) .map(this::createFileInfoResponse); } /** * Copies a blob or file to a destination file within the storage account. * * <p><strong>Code Samples</strong></p> * * <p>Copy file from source url to the {@code filePath} </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.startCopy * * <p>For more information, see the * <a href="https: * * @param sourceUrl Specifies the URL of the source file or blob, up to 2 KB in length. * @param metadata Optional name-value pairs associated with the file as metadata. Metadata names must adhere to the naming rules. * @return The {@link FileCopyInfo file copy info}. * @see <a href="https: */ public Mono<FileCopyInfo> startCopy(String sourceUrl, Map<String, String> metadata) { return startCopyWithResponse(sourceUrl, metadata).flatMap(FluxUtil::toMono); } /** * Copies a blob or file to a destination file within the storage account. * * <p><strong>Code Samples</strong></p> * * <p>Copy file from source url to the {@code filePath} </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.startCopyWithResponse * * <p>For more information, see the * <a href="https: * * @param sourceUrl Specifies the URL of the source file or blob, up to 2 KB in length. * @param metadata Optional name-value pairs associated with the file as metadata. Metadata names must adhere to the naming rules. * @return A response containing the {@link FileCopyInfo file copy info} and the status of copying the file. * @see <a href="https: */ public Mono<Response<FileCopyInfo>> startCopyWithResponse(String sourceUrl, Map<String, String> metadata) { return withContext(context -> startCopyWithResponse(sourceUrl, metadata, context)); } Mono<Response<FileCopyInfo>> startCopyWithResponse(String sourceUrl, Map<String, String> metadata, Context context) { return azureFileStorageClient.files().startCopyWithRestResponseAsync(shareName, filePath, sourceUrl, null, metadata, context) .map(this::startCopyResponse); } /** * Aborts a pending Copy File operation, and leaves a destination file with zero length and full metadata. * * <p><strong>Code Samples</strong></p> * * <p>Abort copy file from copy id("someCopyId") </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.abortCopy * * <p>For more information, see the * <a href="https: * * @param copyId Specifies the copy id which has copying pending status associate with it. * @return An empty response. */ public Mono<Void> abortCopy(String copyId) { return abortCopyWithResponse(copyId).flatMap(FluxUtil::toMono); } /** * Aborts a pending Copy File operation, and leaves a destination file with zero length and full metadata. * * <p><strong>Code Samples</strong></p> * * <p>Abort copy file from copy id("someCopyId") </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.abortCopyWithResponse * * <p>For more information, see the * <a href="https: * * @param copyId Specifies the copy id which has copying pending status associate with it. * @return A response containing the status of aborting copy the file. */ public Mono<VoidResponse> abortCopyWithResponse(String copyId) { return withContext(context -> abortCopyWithResponse(copyId, context)); } Mono<VoidResponse> abortCopyWithResponse(String copyId, Context context) { return azureFileStorageClient.files().abortCopyWithRestResponseAsync(shareName, filePath, copyId, context) .map(VoidResponse::new); } /** * Downloads a file from the system, including its metadata and properties * * <p><strong>Code Samples</strong></p> * * <p>Download the file to current folder. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.downloadToFile * * <p>For more information, see the * <a href="https: * * @param downloadFilePath The path where store the downloaded file * @return An empty response. */ public Mono<Void> downloadToFile(String downloadFilePath) { return downloadToFile(downloadFilePath, null); } /** * Downloads a file from the system, including its metadata and properties * * <p><strong>Code Samples</strong></p> * * <p>Download the file from 1024 to 2048 bytes to current folder. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.downloadToFile * * <p>For more information, see the * <a href="https: * * @param downloadFilePath The path where store the downloaded file * @param range Optional byte range which returns file data only from the specified range. * @return An empty response. */ public Mono<Void> downloadToFile(String downloadFilePath, FileRange range) { AsynchronousFileChannel channel = channelSetup(downloadFilePath); return sliceFileRange(range) .flatMap(chunk -> downloadWithPropertiesWithResponse(chunk, false) .map(dar -> dar.value().body()) .subscribeOn(Schedulers.elastic()) .flatMap(fbb -> FluxUtil.writeFile(fbb, channel, chunk.start() - (range == null ? 0 : range.start())) .subscribeOn(Schedulers.elastic()) .timeout(Duration.ofSeconds(DOWNLOAD_UPLOAD_CHUNK_TIMEOUT)) .retry(3, throwable -> throwable instanceof IOException || throwable instanceof TimeoutException))) .then() .doOnTerminate(() -> channelCleanUp(channel)); } private AsynchronousFileChannel channelSetup(String filePath) { try { return AsynchronousFileChannel.open(Paths.get(filePath), StandardOpenOption.READ, StandardOpenOption.WRITE); } catch (IOException e) { throw logger.logExceptionAsError(new UncheckedIOException(e)); } } private void channelCleanUp(AsynchronousFileChannel channel) { try { channel.close(); } catch (IOException e) { throw logger.logExceptionAsError(new UncheckedIOException(e)); } } private Flux<FileRange> sliceFileRange(FileRange fileRange) { long offset = fileRange == null ? 0L : fileRange.start(); Mono<Long> end; if (fileRange != null) { end = Mono.just(fileRange.end()); } else { end = Mono.empty(); } end = end.switchIfEmpty(getProperties().map(rb -> rb.contentLength())); return end .map(e -> { List<FileRange> chunks = new ArrayList<>(); for (long pos = offset; pos < e; pos += FILE_DEFAULT_BLOCK_SIZE) { long count = FILE_DEFAULT_BLOCK_SIZE; if (pos + count > e) { count = e - pos; } chunks.add(new FileRange(pos, pos + count - 1)); } return chunks; }) .flatMapMany(Flux::fromIterable); } /** * Downloads a file from the system, including its metadata and properties * * <p><strong>Code Samples</strong></p> * * <p>Download the file with its metadata and properties. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.downloadWithProperties} * * <p>For more information, see the * <a href="https: * * @return The {@link FileDownloadInfo file download Info} */ public Mono<FileDownloadInfo> downloadWithProperties() { return downloadWithPropertiesWithResponse(null, null).flatMap(FluxUtil::toMono); } /** * Downloads a file from the system, including its metadata and properties * * <p><strong>Code Samples</strong></p> * * <p>Download the file from 1024 to 2048 bytes with its metadata and properties and without the contentMD5. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.downloadWithPropertiesWithResponse * * <p>For more information, see the * <a href="https: * * @param range Optional byte range which returns file data only from the specified range. * @param rangeGetContentMD5 Optional boolean which the service returns the MD5 hash for the range when it sets * to true, as long as the range is less than or equal to 4 MB in size. * @return A response containing the {@link FileDownloadInfo file download Info} with headers and response status code */ public Mono<Response<FileDownloadInfo>> downloadWithPropertiesWithResponse(FileRange range, Boolean rangeGetContentMD5) { return withContext(context -> downloadWithPropertiesWithResponse(range, rangeGetContentMD5, context)); } Mono<Response<FileDownloadInfo>> downloadWithPropertiesWithResponse(FileRange range, Boolean rangeGetContentMD5, Context context) { String rangeString = range == null ? null : range.toString(); return azureFileStorageClient.files().downloadWithRestResponseAsync(shareName, filePath, null, rangeString, rangeGetContentMD5, context) .map(this::downloadWithPropertiesResponse); } /** * Deletes the file associate with the client. * * <p><strong>Code Samples</strong></p> * * <p>Delete the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.delete} * * <p>For more information, see the * <a href="https: * @return An empty response * @throws StorageErrorException If the directory doesn't exist or the file doesn't exist. */ public Mono<Void> delete() { return deleteWithResponse(null).flatMap(FluxUtil::toMono); } /** * Deletes the file associate with the client. * * <p><strong>Code Samples</strong></p> * * <p>Delete the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.deleteWithResponse} * * <p>For more information, see the * <a href="https: * * @return A response that only contains headers and response status code * @throws StorageErrorException If the directory doesn't exist or the file doesn't exist. */ public Mono<VoidResponse> deleteWithResponse() { return withContext(context -> deleteWithResponse(context)); } Mono<VoidResponse> deleteWithResponse(Context context) { return azureFileStorageClient.files().deleteWithRestResponseAsync(shareName, filePath, context) .map(VoidResponse::new); } /** * Retrieves the properties of the storage account's file. The properties includes file metadata, last modified * date, is server encrypted, and eTag. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve file properties</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.getProperties} * * <p>For more information, see the * <a href="https: * * @return {@link FileProperties Storage file properties} */ public Mono<FileProperties> getProperties() { return getPropertiesWithResponse().flatMap(FluxUtil::toMono); } /** * Retrieves the properties of the storage account's file. * The properties includes file metadata, last modified date, is server encrypted, and eTag. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve file properties</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.getPropertiesWithResponse} * * <p>For more information, see the * <a href="https: * * @return A response containing the {@link FileProperties storage file properties} and response status code */ public Mono<Response<FileProperties>> getPropertiesWithResponse() { return withContext(context -> getPropertiesWithResponse(context)); } Mono<Response<FileProperties>> getPropertiesWithResponse(Context context) { return azureFileStorageClient.files().getPropertiesWithRestResponseAsync(shareName, filePath, snapshot, null, context) .map(this::getPropertiesResponse); } /** * Sets the user-defined httpHeaders to associate to the file. * * <p>If {@code null} is passed for the httpHeaders it will clear the httpHeaders associated to the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the httpHeaders of contentType of "text/plain"</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setHttpHeaders * * <p>Clear the metadata of the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setHttpHeaders * * <p>For more information, see the * <a href="https: * * @param newFileSize New file size of the file * @param httpHeaders Resizes a file to the specified size. If the specified byte value is less than the current size of the file, then all ranges above the specified byte value are cleared. * @return The {@link FileInfo file info} * @throws IllegalArgumentException thrown if parameters fail the validation. */ public Mono<FileInfo> setHttpHeaders(long newFileSize, FileHTTPHeaders httpHeaders) { return setHttpHeadersWithResponse(newFileSize, httpHeaders).flatMap(FluxUtil::toMono); } /** * Sets the user-defined httpHeaders to associate to the file. * * <p>If {@code null} is passed for the httpHeaders it will clear the httpHeaders associated to the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the httpHeaders of contentType of "text/plain"</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setHttpHeadersWithResponse * * <p>Clear the metadata of the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setHttpHeadersWithResponse * * <p>For more information, see the * <a href="https: * * @param newFileSize New file size of the file * @param httpHeaders Resizes a file to the specified size. If the specified byte value is less than the current size of the file, then all ranges above the specified byte value are cleared. * @return Response containing the {@link FileInfo file info} and response status code * @throws IllegalArgumentException thrown if parameters fail the validation. */ public Mono<Response<FileInfo>> setHttpHeadersWithResponse(long newFileSize, FileHTTPHeaders httpHeaders) { return withContext(context -> setHttpHeadersWithResponse(newFileSize, httpHeaders, context)); } Mono<Response<FileInfo>> setHttpHeadersWithResponse(long newFileSize, FileHTTPHeaders httpHeaders, Context context) { String fileAttributes = "None"; String filePermission = "inherit"; String fileCreationTime = "preserve"; String fileLastWriteTime = "preserve"; return azureFileStorageClient.files().setHTTPHeadersWithRestResponseAsync(shareName, filePath, fileAttributes, fileCreationTime, fileLastWriteTime, null, newFileSize, filePermission, null, httpHeaders, context) .map(this::setHttpHeadersResponse); } /** * Sets the user-defined metadata to associate to the file. * * <p>If {@code null} is passed for the metadata it will clear the metadata associated to the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the metadata to "file:updatedMetadata"</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setMetadata * * <p>Clear the metadata of the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setMetadataWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata Options.Metadata to set on the file, if null is passed the metadata for the file is cleared * @return {@link FileMetadataInfo file meta info} * @throws StorageErrorException If the file doesn't exist or the metadata contains invalid keys */ public Mono<FileMetadataInfo> setMetadata(Map<String, String> metadata) { return setMetadataWithResponse(metadata).flatMap(FluxUtil::toMono); } /** * Sets the user-defined metadata to associate to the file. * * <p>If {@code null} is passed for the metadata it will clear the metadata associated to the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the metadata to "file:updatedMetadata"</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setMetadataWithResponse * * <p>Clear the metadata of the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setMetadataWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata Options.Metadata to set on the file, if null is passed the metadata for the file is cleared * @return A response containing the {@link FileMetadataInfo file meta info} and status code * @throws StorageErrorException If the file doesn't exist or the metadata contains invalid keys */ public Mono<Response<FileMetadataInfo>> setMetadataWithResponse(Map<String, String> metadata) { return withContext(context -> setMetadataWithResponse(metadata, context)); } Mono<Response<FileMetadataInfo>> setMetadataWithResponse(Map<String, String> metadata, Context context) { return azureFileStorageClient.files().setMetadataWithRestResponseAsync(shareName, filePath, null, metadata, context) .map(this::setMetadataResponse); } /** * Uploads a range of bytes to the beginning of a file in storage file service. Upload operations performs an * in-place write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Upload data "default" to the file in Storage File Service. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.upload * * <p>For more information, see the * <a href="https: * * @param data The data which will upload to the storage file. * @param length Specifies the number of bytes being transmitted in the request body. * @return A response that only contains headers and response status code */ public Mono<FileUploadInfo> upload(Flux<ByteBuffer> data, long length) { return uploadWithResponse(data, length).flatMap(FluxUtil::toMono); } /** * Uploads a range of bytes to the beginning of a file in storage file service. Upload operations performs an in-place write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Upload "default" to the file. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.uploadWithResponse * * <p>For more information, see the * <a href="https: * * @param data The data which will upload to the storage file. * @param length Specifies the number of bytes being transmitted in the request body. When the FileRangeWriteType is set to clear, the value of this header must be set to zero.. * @return A response containing the {@link FileUploadInfo file upload info} with headers and response status code * @throws StorageErrorException If you attempt to upload a range that is larger than 4 MB, the service returns status code 413 (Request Entity Too Large) */ public Mono<Response<FileUploadInfo>> uploadWithResponse(Flux<ByteBuffer> data, long length) { return withContext(context -> uploadWithResponse(data, length, context)); } Mono<Response<FileUploadInfo>> uploadWithResponse(Flux<ByteBuffer> data, long length, Context context) { FileRange range = new FileRange(0, length - 1); return azureFileStorageClient.files().uploadRangeWithRestResponseAsync(shareName, filePath, range.toString(), FileRangeWriteType.UPDATE, length, data, null, null, context) .map(this::uploadResponse); } /** * Uploads a range of bytes to specific of a file in storage file service. Upload operations performs an in-place * write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Upload data "default" starting from 1024 bytes. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.upload * * <p>For more information, see the * <a href="https: * * @param data The data which will upload to the storage file. * @param length Specifies the number of bytes being transmitted in the request body. * @param offset Optional starting point of the upload range. It will start from the beginning if it is {@code * null} * @return The {@link FileUploadInfo file upload info} * @throws StorageErrorException If you attempt to upload a range that is larger than 4 MB, the service returns status code 413 (Request Entity Too Large) */ public Mono<FileUploadInfo> upload(Flux<ByteBuffer> data, long length, long offset) { return uploadWithResponse(data, length, offset).flatMap(FluxUtil::toMono); } /** * Uploads a range of bytes to specific of a file in storage file service. Upload operations performs an in-place write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Upload the file from 1024 to 2048 bytes with its metadata and properties and without the contentMD5. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.uploadWithResponse * * <p>For more information, see the * <a href="https: * * @param data The data which will upload to the storage file. * @param offset Optional starting point of the upload range. It will start from the beginning if it is {@code null} * @param length Specifies the number of bytes being transmitted in the request body. When the FileRangeWriteType is set to clear, the value of this header must be set to zero. * @return A response containing the {@link FileUploadInfo file upload info} with headers and response status code * @throws StorageErrorException If you attempt to upload a range that is larger than 4 MB, the service returns status code 413 (Request Entity Too Large) */ public Mono<Response<FileUploadInfo>> uploadWithResponse(Flux<ByteBuffer> data, long length, long offset) { return withContext(context -> uploadWithResponse(data, length, offset, context)); } Mono<Response<FileUploadInfo>> uploadWithResponse(Flux<ByteBuffer> data, long length, long offset, Context context) { FileRange range = * * <p>For more information, see the * <a href="https: * * @param length Specifies the number of bytes being cleared. * @return The {@link FileUploadInfo file upload info} */ public Mono<FileUploadInfo> clearRange(long length) { return clearRangeWithResponse(length, 0).flatMap(FluxUtil::toMono); } /** * Clear a range of bytes to specific of a file in storage file service. Clear operations performs an in-place * write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Clear the range starting from 1024 with length of 1024. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.clearRange * * <p>For more information, see the * <a href="https: * * @param length Specifies the number of bytes being cleared in the request body. * @param offset Optional starting point of the upload range. It will start from the beginning if it is {@code * null} * @return A response of {@link FileUploadInfo file upload info} that only contains headers and response status code */ public Mono<Response<FileUploadInfo>> clearRangeWithResponse(long length, long offset) { return withContext(context -> clearRangeWithResponse(length, offset, context)); } Mono<Response<FileUploadInfo>> clearRangeWithResponse(long length, long offset, Context context) { FileRange range = new FileRange(offset, offset + length - 1); return azureFileStorageClient.files().uploadRangeWithRestResponseAsync(shareName, filePath, range.toString(), FileRangeWriteType.CLEAR, 0L, null, null, null, context) .map(this::uploadResponse); } /** * Uploads file to storage file service. * * <p><strong>Code Samples</strong></p> * * <p> Upload the file from the source file path. </p> * * (@codesnippet com.azure.storage.file.fileAsyncClient.uploadFromFile
Done.
Mono<Response<FileUploadInfo>> clearRangeWithResponse(long length, long offset, Context context) { FileRange range = new FileRange(offset, offset + length - 1); return azureFileStorageClient.files().uploadRangeWithRestResponseAsync(shareName, filePath, range.toString(), FileRangeWriteType.CLEAR, 0L, null, null, null, Context.NONE) .map(this::uploadResponse); }
return azureFileStorageClient.files().uploadRangeWithRestResponseAsync(shareName, filePath, range.toString(), FileRangeWriteType.CLEAR, 0L, null, null, null, Context.NONE)
new FileRange(offset, offset + length - 1); return azureFileStorageClient.files().uploadRangeWithRestResponseAsync(shareName, filePath, range.toString(), FileRangeWriteType.UPDATE, length, data, null, null, context) .map(this::uploadResponse); } /** * Clear a range of bytes to specific of a file in storage file service. Clear operations performs an in-place * write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Clears the first 1024 bytes. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.clearRange
class FileAsyncClient { private final ClientLogger logger = new ClientLogger(FileAsyncClient.class); private static final long FILE_DEFAULT_BLOCK_SIZE = 4 * 1024 * 1024L; private static final long DOWNLOAD_UPLOAD_CHUNK_TIMEOUT = 300; private final AzureFileStorageImpl azureFileStorageClient; private final String shareName; private final String filePath; private final String snapshot; /** * Creates a FileAsyncClient that sends requests to the storage file at {@link AzureFileStorageImpl * endpoint}. Each service call goes through the {@link HttpPipeline pipeline} in the {@code client}. * * @param azureFileStorageClient Client that interacts with the service interfaces * @param shareName Name of the share * @param filePath Path to the file * @param snapshot The snapshot of the share */ FileAsyncClient(AzureFileStorageImpl azureFileStorageClient, String shareName, String filePath, String snapshot) { Objects.requireNonNull(shareName); Objects.requireNonNull(filePath); this.shareName = shareName; this.filePath = filePath; this.snapshot = snapshot; this.azureFileStorageClient = azureFileStorageClient; } /** * Creates a FileAsyncClient that sends requests to the storage account at {@code endpoint}. Each service call goes * through the {@code httpPipeline}. * * @param endpoint URL for the Storage File service * @param httpPipeline HttpPipeline that HTTP requests and response flow through * @param shareName Name of the share * @param filePath Path to the file * @param snapshot Optional snapshot of the share */ FileAsyncClient(URL endpoint, HttpPipeline httpPipeline, String shareName, String filePath, String snapshot) { Objects.requireNonNull(shareName); Objects.requireNonNull(filePath); this.shareName = shareName; this.filePath = filePath; this.snapshot = snapshot; this.azureFileStorageClient = new AzureFileStorageBuilder().pipeline(httpPipeline) .url(endpoint.toString()) .build(); } /** * Get the url of the storage file client. * * @return the URL of the storage file client * @throws RuntimeException If the file is using a malformed URL. */ public URL getFileUrl() { try { return new URL(azureFileStorageClient.getUrl()); } catch (MalformedURLException e) { throw logger.logExceptionAsError(new RuntimeException(String.format("Invalid URL on %s: %s" + getClass().getSimpleName(), azureFileStorageClient.getUrl()), e)); } } /** * Creates a file in the storage account and returns a response of {@link FileInfo} to interact with it. * * <p><strong>Code Samples</strong></p> * * <p>Create the file with size 1KB.</p> * * {@codesnippet com.azure.storage.file.fileClient.create} * * <p>For more information, see the * <a href="https: * * @param maxSize The maximum size in bytes for the file, up to 1 TiB. * @return A response containing the file info and the status of creating the file. * @throws StorageErrorException If the file has already existed, the parent directory does not exist or fileName is * an invalid resource name. */ public Mono<FileInfo> create(long maxSize) { return createWithResponse(maxSize, null, null).flatMap(FluxUtil::toMono); } /** * Creates a file in the storage account and returns a response of FileInfo to interact with it. * * <p><strong>Code Samples</strong></p> * * <p>Create the file with length of 1024 bytes, some headers and metadata.</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.createWithResponse * * <p>For more information, see the * <a href="https: * * @param maxSize The maximum size in bytes for the file, up to 1 TiB. * @param httpHeaders Additional parameters for the operation. * @param metadata Optional name-value pairs associated with the file as metadata. Metadata names must adhere to the naming rules. * @return A response containing the {@link FileInfo file info} and the status of creating the file. * @throws StorageErrorException If the directory has already existed, the parent directory does not exist or directory is an invalid resource name. */ public Mono<Response<FileInfo>> createWithResponse(long maxSize, FileHTTPHeaders httpHeaders, Map<String, String> metadata) { return withContext(context -> createWithResponse(maxSize, httpHeaders, metadata, context)); } Mono<Response<FileInfo>> createWithResponse(long maxSize, FileHTTPHeaders httpHeaders, Map<String, String> metadata, Context context) { String fileAttributes = "None"; String filePermission = "inherit"; String fileCreationTime = "now"; String fileLastWriteTime = "now"; return azureFileStorageClient.files().createWithRestResponseAsync(shareName, filePath, maxSize, fileAttributes, fileCreationTime, fileLastWriteTime, null, metadata, filePermission, null, httpHeaders, context) .map(this::createFileInfoResponse); } /** * Copies a blob or file to a destination file within the storage account. * * <p><strong>Code Samples</strong></p> * * <p>Copy file from source url to the {@code filePath} </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.startCopy * * <p>For more information, see the * <a href="https: * * @param sourceUrl Specifies the URL of the source file or blob, up to 2 KB in length. * @param metadata Optional name-value pairs associated with the file as metadata. Metadata names must adhere to the naming rules. * @return The {@link FileCopyInfo file copy info}. * @see <a href="https: */ public Mono<FileCopyInfo> startCopy(String sourceUrl, Map<String, String> metadata) { return startCopyWithResponse(sourceUrl, metadata).flatMap(FluxUtil::toMono); } /** * Copies a blob or file to a destination file within the storage account. * * <p><strong>Code Samples</strong></p> * * <p>Copy file from source url to the {@code filePath} </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.startCopyWithResponse * * <p>For more information, see the * <a href="https: * * @param sourceUrl Specifies the URL of the source file or blob, up to 2 KB in length. * @param metadata Optional name-value pairs associated with the file as metadata. Metadata names must adhere to the naming rules. * @return A response containing the {@link FileCopyInfo file copy info} and the status of copying the file. * @see <a href="https: */ public Mono<Response<FileCopyInfo>> startCopyWithResponse(String sourceUrl, Map<String, String> metadata) { return withContext(context -> startCopyWithResponse(sourceUrl, metadata, context)); } Mono<Response<FileCopyInfo>> startCopyWithResponse(String sourceUrl, Map<String, String> metadata, Context context) { return azureFileStorageClient.files().startCopyWithRestResponseAsync(shareName, filePath, sourceUrl, null, metadata, context) .map(this::startCopyResponse); } /** * Aborts a pending Copy File operation, and leaves a destination file with zero length and full metadata. * * <p><strong>Code Samples</strong></p> * * <p>Abort copy file from copy id("someCopyId") </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.abortCopy * * <p>For more information, see the * <a href="https: * * @param copyId Specifies the copy id which has copying pending status associate with it. * @return An empty response. */ public Mono<Void> abortCopy(String copyId) { return abortCopyWithResponse(copyId).flatMap(FluxUtil::toMono); } /** * Aborts a pending Copy File operation, and leaves a destination file with zero length and full metadata. * * <p><strong>Code Samples</strong></p> * * <p>Abort copy file from copy id("someCopyId") </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.abortCopyWithResponse * * <p>For more information, see the * <a href="https: * * @param copyId Specifies the copy id which has copying pending status associate with it. * @return A response containing the status of aborting copy the file. */ public Mono<VoidResponse> abortCopyWithResponse(String copyId) { return withContext(context -> abortCopyWithResponse(copyId, context)); } Mono<VoidResponse> abortCopyWithResponse(String copyId, Context context) { return azureFileStorageClient.files().abortCopyWithRestResponseAsync(shareName, filePath, copyId, context) .map(VoidResponse::new); } /** * Downloads a file from the system, including its metadata and properties * * <p><strong>Code Samples</strong></p> * * <p>Download the file to current folder. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.downloadToFile * * <p>For more information, see the * <a href="https: * * @param downloadFilePath The path where store the downloaded file * @return An empty response. */ public Mono<Void> downloadToFile(String downloadFilePath) { return downloadToFile(downloadFilePath, null); } /** * Downloads a file from the system, including its metadata and properties * * <p><strong>Code Samples</strong></p> * * <p>Download the file from 1024 to 2048 bytes to current folder. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.downloadToFile * * <p>For more information, see the * <a href="https: * * @param downloadFilePath The path where store the downloaded file * @param range Optional byte range which returns file data only from the specified range. * @return An empty response. */ public Mono<Void> downloadToFile(String downloadFilePath, FileRange range) { AsynchronousFileChannel channel = channelSetup(downloadFilePath); return sliceFileRange(range) .flatMap(chunk -> downloadWithPropertiesWithResponse(chunk, false) .map(dar -> dar.value().body()) .subscribeOn(Schedulers.elastic()) .flatMap(fbb -> FluxUtil.writeFile(fbb, channel, chunk.start() - (range == null ? 0 : range.start())) .subscribeOn(Schedulers.elastic()) .timeout(Duration.ofSeconds(DOWNLOAD_UPLOAD_CHUNK_TIMEOUT)) .retry(3, throwable -> throwable instanceof IOException || throwable instanceof TimeoutException))) .then() .doOnTerminate(() -> channelCleanUp(channel)); } private AsynchronousFileChannel channelSetup(String filePath) { try { return AsynchronousFileChannel.open(Paths.get(filePath), StandardOpenOption.READ, StandardOpenOption.WRITE); } catch (IOException e) { throw logger.logExceptionAsError(new UncheckedIOException(e)); } } private void channelCleanUp(AsynchronousFileChannel channel) { try { channel.close(); } catch (IOException e) { throw logger.logExceptionAsError(new UncheckedIOException(e)); } } private Flux<FileRange> sliceFileRange(FileRange fileRange) { long offset = fileRange == null ? 0L : fileRange.start(); Mono<Long> end; if (fileRange != null) { end = Mono.just(fileRange.end()); } else { end = Mono.empty(); } end = end.switchIfEmpty(getProperties().map(rb -> rb.contentLength())); return end .map(e -> { List<FileRange> chunks = new ArrayList<>(); for (long pos = offset; pos < e; pos += FILE_DEFAULT_BLOCK_SIZE) { long count = FILE_DEFAULT_BLOCK_SIZE; if (pos + count > e) { count = e - pos; } chunks.add(new FileRange(pos, pos + count - 1)); } return chunks; }) .flatMapMany(Flux::fromIterable); } /** * Downloads a file from the system, including its metadata and properties * * <p><strong>Code Samples</strong></p> * * <p>Download the file with its metadata and properties. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.downloadWithProperties} * * <p>For more information, see the * <a href="https: * * @return The {@link FileDownloadInfo file download Info} */ public Mono<FileDownloadInfo> downloadWithProperties() { return downloadWithPropertiesWithResponse(null, null).flatMap(FluxUtil::toMono); } /** * Downloads a file from the system, including its metadata and properties * * <p><strong>Code Samples</strong></p> * * <p>Download the file from 1024 to 2048 bytes with its metadata and properties and without the contentMD5. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.downloadWithPropertiesWithResponse * * <p>For more information, see the * <a href="https: * * @param range Optional byte range which returns file data only from the specified range. * @param rangeGetContentMD5 Optional boolean which the service returns the MD5 hash for the range when it sets * to true, as long as the range is less than or equal to 4 MB in size. * @return A response containing the {@link FileDownloadInfo file download Info} with headers and response status code */ public Mono<Response<FileDownloadInfo>> downloadWithPropertiesWithResponse(FileRange range, Boolean rangeGetContentMD5) { return withContext(context -> downloadWithPropertiesWithResponse(range, rangeGetContentMD5, context)); } Mono<Response<FileDownloadInfo>> downloadWithPropertiesWithResponse(FileRange range, Boolean rangeGetContentMD5, Context context) { String rangeString = range == null ? null : range.toString(); return azureFileStorageClient.files().downloadWithRestResponseAsync(shareName, filePath, null, rangeString, rangeGetContentMD5, context) .map(this::downloadWithPropertiesResponse); } /** * Deletes the file associate with the client. * * <p><strong>Code Samples</strong></p> * * <p>Delete the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.delete} * * <p>For more information, see the * <a href="https: * @return An empty response * @throws StorageErrorException If the directory doesn't exist or the file doesn't exist. */ public Mono<Void> delete() { return deleteWithResponse(null).flatMap(FluxUtil::toMono); } /** * Deletes the file associate with the client. * * <p><strong>Code Samples</strong></p> * * <p>Delete the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.deleteWithResponse} * * <p>For more information, see the * <a href="https: * * @return A response that only contains headers and response status code * @throws StorageErrorException If the directory doesn't exist or the file doesn't exist. */ public Mono<VoidResponse> deleteWithResponse() { return withContext(context -> deleteWithResponse(context)); } Mono<VoidResponse> deleteWithResponse(Context context) { return azureFileStorageClient.files().deleteWithRestResponseAsync(shareName, filePath, context) .map(VoidResponse::new); } /** * Retrieves the properties of the storage account's file. The properties includes file metadata, last modified * date, is server encrypted, and eTag. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve file properties</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.getProperties} * * <p>For more information, see the * <a href="https: * * @return {@link FileProperties Storage file properties} */ public Mono<FileProperties> getProperties() { return getPropertiesWithResponse().flatMap(FluxUtil::toMono); } /** * Retrieves the properties of the storage account's file. * The properties includes file metadata, last modified date, is server encrypted, and eTag. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve file properties</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.getPropertiesWithResponse} * * <p>For more information, see the * <a href="https: * * @return A response containing the {@link FileProperties storage file properties} and response status code */ public Mono<Response<FileProperties>> getPropertiesWithResponse() { return withContext(context -> getPropertiesWithResponse(context)); } Mono<Response<FileProperties>> getPropertiesWithResponse(Context context) { return azureFileStorageClient.files().getPropertiesWithRestResponseAsync(shareName, filePath, snapshot, null, context) .map(this::getPropertiesResponse); } /** * Sets the user-defined httpHeaders to associate to the file. * * <p>If {@code null} is passed for the httpHeaders it will clear the httpHeaders associated to the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the httpHeaders of contentType of "text/plain"</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setHttpHeaders * * <p>Clear the metadata of the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setHttpHeaders * * <p>For more information, see the * <a href="https: * * @param newFileSize New file size of the file * @param httpHeaders Resizes a file to the specified size. If the specified byte value is less than the current size of the file, then all ranges above the specified byte value are cleared. * @return The {@link FileInfo file info} * @throws IllegalArgumentException thrown if parameters fail the validation. */ public Mono<FileInfo> setHttpHeaders(long newFileSize, FileHTTPHeaders httpHeaders) { return setHttpHeadersWithResponse(newFileSize, httpHeaders).flatMap(FluxUtil::toMono); } /** * Sets the user-defined httpHeaders to associate to the file. * * <p>If {@code null} is passed for the httpHeaders it will clear the httpHeaders associated to the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the httpHeaders of contentType of "text/plain"</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setHttpHeadersWithResponse * * <p>Clear the metadata of the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setHttpHeadersWithResponse * * <p>For more information, see the * <a href="https: * * @param newFileSize New file size of the file * @param httpHeaders Resizes a file to the specified size. If the specified byte value is less than the current size of the file, then all ranges above the specified byte value are cleared. * @return Response containing the {@link FileInfo file info} and response status code * @throws IllegalArgumentException thrown if parameters fail the validation. */ public Mono<Response<FileInfo>> setHttpHeadersWithResponse(long newFileSize, FileHTTPHeaders httpHeaders) { return withContext(context -> setHttpHeadersWithResponse(newFileSize, httpHeaders, context)); } Mono<Response<FileInfo>> setHttpHeadersWithResponse(long newFileSize, FileHTTPHeaders httpHeaders, Context context) { String fileAttributes = "None"; String filePermission = "inherit"; String fileCreationTime = "preserve"; String fileLastWriteTime = "preserve"; return azureFileStorageClient.files().setHTTPHeadersWithRestResponseAsync(shareName, filePath, fileAttributes, fileCreationTime, fileLastWriteTime, null, newFileSize, filePermission, null, httpHeaders, context) .map(this::setHttpHeadersResponse); } /** * Sets the user-defined metadata to associate to the file. * * <p>If {@code null} is passed for the metadata it will clear the metadata associated to the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the metadata to "file:updatedMetadata"</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setMetadata * * <p>Clear the metadata of the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setMetadataWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata Options.Metadata to set on the file, if null is passed the metadata for the file is cleared * @return {@link FileMetadataInfo file meta info} * @throws StorageErrorException If the file doesn't exist or the metadata contains invalid keys */ public Mono<FileMetadataInfo> setMetadata(Map<String, String> metadata) { return setMetadataWithResponse(metadata).flatMap(FluxUtil::toMono); } /** * Sets the user-defined metadata to associate to the file. * * <p>If {@code null} is passed for the metadata it will clear the metadata associated to the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the metadata to "file:updatedMetadata"</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setMetadataWithResponse * * <p>Clear the metadata of the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setMetadataWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata Options.Metadata to set on the file, if null is passed the metadata for the file is cleared * @return A response containing the {@link FileMetadataInfo file meta info} and status code * @throws StorageErrorException If the file doesn't exist or the metadata contains invalid keys */ public Mono<Response<FileMetadataInfo>> setMetadataWithResponse(Map<String, String> metadata) { return withContext(context -> setMetadataWithResponse(metadata, context)); } Mono<Response<FileMetadataInfo>> setMetadataWithResponse(Map<String, String> metadata, Context context) { return azureFileStorageClient.files().setMetadataWithRestResponseAsync(shareName, filePath, null, metadata, context) .map(this::setMetadataResponse); } /** * Uploads a range of bytes to the beginning of a file in storage file service. Upload operations performs an * in-place write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Upload data "default" to the file in Storage File Service. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.upload * * <p>For more information, see the * <a href="https: * * @param data The data which will upload to the storage file. * @param length Specifies the number of bytes being transmitted in the request body. * @return A response that only contains headers and response status code */ public Mono<FileUploadInfo> upload(Flux<ByteBuffer> data, long length) { return uploadWithResponse(data, length).flatMap(FluxUtil::toMono); } /** * Uploads a range of bytes to the beginning of a file in storage file service. Upload operations performs an in-place write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Upload "default" to the file. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.uploadWithResponse * * <p>For more information, see the * <a href="https: * * @param data The data which will upload to the storage file. * @param length Specifies the number of bytes being transmitted in the request body. When the FileRangeWriteType is set to clear, the value of this header must be set to zero.. * @return A response containing the {@link FileUploadInfo file upload info} with headers and response status code * @throws StorageErrorException If you attempt to upload a range that is larger than 4 MB, the service returns status code 413 (Request Entity Too Large) */ public Mono<Response<FileUploadInfo>> uploadWithResponse(Flux<ByteBuffer> data, long length) { return withContext(context -> uploadWithResponse(data, length, context)); } Mono<Response<FileUploadInfo>> uploadWithResponse(Flux<ByteBuffer> data, long length, Context context) { FileRange range = new FileRange(0, length - 1); return azureFileStorageClient.files().uploadRangeWithRestResponseAsync(shareName, filePath, range.toString(), FileRangeWriteType.UPDATE, length, data, null, null, context) .map(this::uploadResponse); } /** * Uploads a range of bytes to specific of a file in storage file service. Upload operations performs an in-place * write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Upload data "default" starting from 1024 bytes. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.upload * * <p>For more information, see the * <a href="https: * * @param data The data which will upload to the storage file. * @param length Specifies the number of bytes being transmitted in the request body. * @param offset Optional starting point of the upload range. It will start from the beginning if it is {@code * null} * @return The {@link FileUploadInfo file upload info} * @throws StorageErrorException If you attempt to upload a range that is larger than 4 MB, the service returns status code 413 (Request Entity Too Large) */ public Mono<FileUploadInfo> upload(Flux<ByteBuffer> data, long length, long offset) { return uploadWithResponse(data, length, offset).flatMap(FluxUtil::toMono); } /** * Uploads a range of bytes to specific of a file in storage file service. Upload operations performs an in-place write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Upload the file from 1024 to 2048 bytes with its metadata and properties and without the contentMD5. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.uploadWithResponse * * <p>For more information, see the * <a href="https: * * @param data The data which will upload to the storage file. * @param offset Optional starting point of the upload range. It will start from the beginning if it is {@code null} * @param length Specifies the number of bytes being transmitted in the request body. When the FileRangeWriteType is set to clear, the value of this header must be set to zero. * @return A response containing the {@link FileUploadInfo file upload info} with headers and response status code * @throws StorageErrorException If you attempt to upload a range that is larger than 4 MB, the service returns status code 413 (Request Entity Too Large) */ public Mono<Response<FileUploadInfo>> uploadWithResponse(Flux<ByteBuffer> data, long length, long offset) { return withContext(context -> uploadWithResponse(data, length, offset, context)); } Mono<Response<FileUploadInfo>> uploadWithResponse(Flux<ByteBuffer> data, long length, long offset, Context context) { FileRange range = * * <p>For more information, see the * <a href="https: * * @param length Specifies the number of bytes being cleared. * @return The {@link FileUploadInfo file upload info} */ public Mono<FileUploadInfo> clearRange(long length) { return clearRangeWithResponse(length, 0).flatMap(FluxUtil::toMono); } /** * Clear a range of bytes to specific of a file in storage file service. Clear operations performs an in-place * write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Clear the range starting from 1024 with length of 1024. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.clearRange * * <p>For more information, see the * <a href="https: * * @param length Specifies the number of bytes being cleared in the request body. * @param offset Optional starting point of the upload range. It will start from the beginning if it is {@code * null} * @return A response of {@link FileUploadInfo file upload info} that only contains headers and response status code */ public Mono<Response<FileUploadInfo>> clearRangeWithResponse(long length, long offset) { return withContext(context -> clearRangeWithResponse(length, offset, context)); } Mono<Response<FileUploadInfo>> clearRangeWithResponse(long length, long offset, Context context) { FileRange range = new FileRange(offset, offset + length - 1); return azureFileStorageClient.files().uploadRangeWithRestResponseAsync(shareName, filePath, range.toString(), FileRangeWriteType.CLEAR, 0L, null, null, null, Context.NONE) .map(this::uploadResponse); } /** * Uploads file to storage file service. * * <p><strong>Code Samples</strong></p> * * <p> Upload the file from the source file path. </p> * * (@codesnippet com.azure.storage.file.fileAsyncClient.uploadFromFile
class FileAsyncClient { private final ClientLogger logger = new ClientLogger(FileAsyncClient.class); private static final long FILE_DEFAULT_BLOCK_SIZE = 4 * 1024 * 1024L; private static final long DOWNLOAD_UPLOAD_CHUNK_TIMEOUT = 300; private final AzureFileStorageImpl azureFileStorageClient; private final String shareName; private final String filePath; private final String snapshot; /** * Creates a FileAsyncClient that sends requests to the storage file at {@link AzureFileStorageImpl * endpoint}. Each service call goes through the {@link HttpPipeline pipeline} in the {@code client}. * * @param azureFileStorageClient Client that interacts with the service interfaces * @param shareName Name of the share * @param filePath Path to the file * @param snapshot The snapshot of the share */ FileAsyncClient(AzureFileStorageImpl azureFileStorageClient, String shareName, String filePath, String snapshot) { Objects.requireNonNull(shareName); Objects.requireNonNull(filePath); this.shareName = shareName; this.filePath = filePath; this.snapshot = snapshot; this.azureFileStorageClient = azureFileStorageClient; } /** * Creates a FileAsyncClient that sends requests to the storage account at {@code endpoint}. Each service call goes * through the {@code httpPipeline}. * * @param endpoint URL for the Storage File service * @param httpPipeline HttpPipeline that HTTP requests and response flow through * @param shareName Name of the share * @param filePath Path to the file * @param snapshot Optional snapshot of the share */ FileAsyncClient(URL endpoint, HttpPipeline httpPipeline, String shareName, String filePath, String snapshot) { Objects.requireNonNull(shareName); Objects.requireNonNull(filePath); this.shareName = shareName; this.filePath = filePath; this.snapshot = snapshot; this.azureFileStorageClient = new AzureFileStorageBuilder().pipeline(httpPipeline) .url(endpoint.toString()) .build(); } /** * Get the url of the storage file client. * * @return the URL of the storage file client * @throws RuntimeException If the file is using a malformed URL. */ public URL getFileUrl() { try { return new URL(azureFileStorageClient.getUrl()); } catch (MalformedURLException e) { throw logger.logExceptionAsError(new RuntimeException(String.format("Invalid URL on %s: %s" + getClass().getSimpleName(), azureFileStorageClient.getUrl()), e)); } } /** * Creates a file in the storage account and returns a response of {@link FileInfo} to interact with it. * * <p><strong>Code Samples</strong></p> * * <p>Create the file with size 1KB.</p> * * {@codesnippet com.azure.storage.file.fileClient.create} * * <p>For more information, see the * <a href="https: * * @param maxSize The maximum size in bytes for the file, up to 1 TiB. * @return A response containing the file info and the status of creating the file. * @throws StorageErrorException If the file has already existed, the parent directory does not exist or fileName is * an invalid resource name. */ public Mono<FileInfo> create(long maxSize) { return createWithResponse(maxSize, null, null).flatMap(FluxUtil::toMono); } /** * Creates a file in the storage account and returns a response of FileInfo to interact with it. * * <p><strong>Code Samples</strong></p> * * <p>Create the file with length of 1024 bytes, some headers and metadata.</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.createWithResponse * * <p>For more information, see the * <a href="https: * * @param maxSize The maximum size in bytes for the file, up to 1 TiB. * @param httpHeaders Additional parameters for the operation. * @param metadata Optional name-value pairs associated with the file as metadata. Metadata names must adhere to the naming rules. * @return A response containing the {@link FileInfo file info} and the status of creating the file. * @throws StorageErrorException If the directory has already existed, the parent directory does not exist or directory is an invalid resource name. */ public Mono<Response<FileInfo>> createWithResponse(long maxSize, FileHTTPHeaders httpHeaders, Map<String, String> metadata) { return withContext(context -> createWithResponse(maxSize, httpHeaders, metadata, context)); } Mono<Response<FileInfo>> createWithResponse(long maxSize, FileHTTPHeaders httpHeaders, Map<String, String> metadata, Context context) { String fileAttributes = "None"; String filePermission = "inherit"; String fileCreationTime = "now"; String fileLastWriteTime = "now"; return azureFileStorageClient.files().createWithRestResponseAsync(shareName, filePath, maxSize, fileAttributes, fileCreationTime, fileLastWriteTime, null, metadata, filePermission, null, httpHeaders, context) .map(this::createFileInfoResponse); } /** * Copies a blob or file to a destination file within the storage account. * * <p><strong>Code Samples</strong></p> * * <p>Copy file from source url to the {@code filePath} </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.startCopy * * <p>For more information, see the * <a href="https: * * @param sourceUrl Specifies the URL of the source file or blob, up to 2 KB in length. * @param metadata Optional name-value pairs associated with the file as metadata. Metadata names must adhere to the naming rules. * @return The {@link FileCopyInfo file copy info}. * @see <a href="https: */ public Mono<FileCopyInfo> startCopy(String sourceUrl, Map<String, String> metadata) { return startCopyWithResponse(sourceUrl, metadata).flatMap(FluxUtil::toMono); } /** * Copies a blob or file to a destination file within the storage account. * * <p><strong>Code Samples</strong></p> * * <p>Copy file from source url to the {@code filePath} </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.startCopyWithResponse * * <p>For more information, see the * <a href="https: * * @param sourceUrl Specifies the URL of the source file or blob, up to 2 KB in length. * @param metadata Optional name-value pairs associated with the file as metadata. Metadata names must adhere to the naming rules. * @return A response containing the {@link FileCopyInfo file copy info} and the status of copying the file. * @see <a href="https: */ public Mono<Response<FileCopyInfo>> startCopyWithResponse(String sourceUrl, Map<String, String> metadata) { return withContext(context -> startCopyWithResponse(sourceUrl, metadata, context)); } Mono<Response<FileCopyInfo>> startCopyWithResponse(String sourceUrl, Map<String, String> metadata, Context context) { return azureFileStorageClient.files().startCopyWithRestResponseAsync(shareName, filePath, sourceUrl, null, metadata, context) .map(this::startCopyResponse); } /** * Aborts a pending Copy File operation, and leaves a destination file with zero length and full metadata. * * <p><strong>Code Samples</strong></p> * * <p>Abort copy file from copy id("someCopyId") </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.abortCopy * * <p>For more information, see the * <a href="https: * * @param copyId Specifies the copy id which has copying pending status associate with it. * @return An empty response. */ public Mono<Void> abortCopy(String copyId) { return abortCopyWithResponse(copyId).flatMap(FluxUtil::toMono); } /** * Aborts a pending Copy File operation, and leaves a destination file with zero length and full metadata. * * <p><strong>Code Samples</strong></p> * * <p>Abort copy file from copy id("someCopyId") </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.abortCopyWithResponse * * <p>For more information, see the * <a href="https: * * @param copyId Specifies the copy id which has copying pending status associate with it. * @return A response containing the status of aborting copy the file. */ public Mono<VoidResponse> abortCopyWithResponse(String copyId) { return withContext(context -> abortCopyWithResponse(copyId, context)); } Mono<VoidResponse> abortCopyWithResponse(String copyId, Context context) { return azureFileStorageClient.files().abortCopyWithRestResponseAsync(shareName, filePath, copyId, context) .map(VoidResponse::new); } /** * Downloads a file from the system, including its metadata and properties * * <p><strong>Code Samples</strong></p> * * <p>Download the file to current folder. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.downloadToFile * * <p>For more information, see the * <a href="https: * * @param downloadFilePath The path where store the downloaded file * @return An empty response. */ public Mono<Void> downloadToFile(String downloadFilePath) { return downloadToFile(downloadFilePath, null); } /** * Downloads a file from the system, including its metadata and properties * * <p><strong>Code Samples</strong></p> * * <p>Download the file from 1024 to 2048 bytes to current folder. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.downloadToFile * * <p>For more information, see the * <a href="https: * * @param downloadFilePath The path where store the downloaded file * @param range Optional byte range which returns file data only from the specified range. * @return An empty response. */ public Mono<Void> downloadToFile(String downloadFilePath, FileRange range) { AsynchronousFileChannel channel = channelSetup(downloadFilePath); return sliceFileRange(range) .flatMap(chunk -> downloadWithPropertiesWithResponse(chunk, false) .map(dar -> dar.value().body()) .subscribeOn(Schedulers.elastic()) .flatMap(fbb -> FluxUtil.writeFile(fbb, channel, chunk.start() - (range == null ? 0 : range.start())) .subscribeOn(Schedulers.elastic()) .timeout(Duration.ofSeconds(DOWNLOAD_UPLOAD_CHUNK_TIMEOUT)) .retry(3, throwable -> throwable instanceof IOException || throwable instanceof TimeoutException))) .then() .doOnTerminate(() -> channelCleanUp(channel)); } private AsynchronousFileChannel channelSetup(String filePath) { try { return AsynchronousFileChannel.open(Paths.get(filePath), StandardOpenOption.READ, StandardOpenOption.WRITE); } catch (IOException e) { throw logger.logExceptionAsError(new UncheckedIOException(e)); } } private void channelCleanUp(AsynchronousFileChannel channel) { try { channel.close(); } catch (IOException e) { throw logger.logExceptionAsError(new UncheckedIOException(e)); } } private Flux<FileRange> sliceFileRange(FileRange fileRange) { long offset = fileRange == null ? 0L : fileRange.start(); Mono<Long> end; if (fileRange != null) { end = Mono.just(fileRange.end()); } else { end = Mono.empty(); } end = end.switchIfEmpty(getProperties().map(rb -> rb.contentLength())); return end .map(e -> { List<FileRange> chunks = new ArrayList<>(); for (long pos = offset; pos < e; pos += FILE_DEFAULT_BLOCK_SIZE) { long count = FILE_DEFAULT_BLOCK_SIZE; if (pos + count > e) { count = e - pos; } chunks.add(new FileRange(pos, pos + count - 1)); } return chunks; }) .flatMapMany(Flux::fromIterable); } /** * Downloads a file from the system, including its metadata and properties * * <p><strong>Code Samples</strong></p> * * <p>Download the file with its metadata and properties. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.downloadWithProperties} * * <p>For more information, see the * <a href="https: * * @return The {@link FileDownloadInfo file download Info} */ public Mono<FileDownloadInfo> downloadWithProperties() { return downloadWithPropertiesWithResponse(null, null).flatMap(FluxUtil::toMono); } /** * Downloads a file from the system, including its metadata and properties * * <p><strong>Code Samples</strong></p> * * <p>Download the file from 1024 to 2048 bytes with its metadata and properties and without the contentMD5. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.downloadWithPropertiesWithResponse * * <p>For more information, see the * <a href="https: * * @param range Optional byte range which returns file data only from the specified range. * @param rangeGetContentMD5 Optional boolean which the service returns the MD5 hash for the range when it sets * to true, as long as the range is less than or equal to 4 MB in size. * @return A response containing the {@link FileDownloadInfo file download Info} with headers and response status code */ public Mono<Response<FileDownloadInfo>> downloadWithPropertiesWithResponse(FileRange range, Boolean rangeGetContentMD5) { return withContext(context -> downloadWithPropertiesWithResponse(range, rangeGetContentMD5, context)); } Mono<Response<FileDownloadInfo>> downloadWithPropertiesWithResponse(FileRange range, Boolean rangeGetContentMD5, Context context) { String rangeString = range == null ? null : range.toString(); return azureFileStorageClient.files().downloadWithRestResponseAsync(shareName, filePath, null, rangeString, rangeGetContentMD5, context) .map(this::downloadWithPropertiesResponse); } /** * Deletes the file associate with the client. * * <p><strong>Code Samples</strong></p> * * <p>Delete the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.delete} * * <p>For more information, see the * <a href="https: * @return An empty response * @throws StorageErrorException If the directory doesn't exist or the file doesn't exist. */ public Mono<Void> delete() { return deleteWithResponse(null).flatMap(FluxUtil::toMono); } /** * Deletes the file associate with the client. * * <p><strong>Code Samples</strong></p> * * <p>Delete the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.deleteWithResponse} * * <p>For more information, see the * <a href="https: * * @return A response that only contains headers and response status code * @throws StorageErrorException If the directory doesn't exist or the file doesn't exist. */ public Mono<VoidResponse> deleteWithResponse() { return withContext(context -> deleteWithResponse(context)); } Mono<VoidResponse> deleteWithResponse(Context context) { return azureFileStorageClient.files().deleteWithRestResponseAsync(shareName, filePath, context) .map(VoidResponse::new); } /** * Retrieves the properties of the storage account's file. The properties includes file metadata, last modified * date, is server encrypted, and eTag. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve file properties</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.getProperties} * * <p>For more information, see the * <a href="https: * * @return {@link FileProperties Storage file properties} */ public Mono<FileProperties> getProperties() { return getPropertiesWithResponse().flatMap(FluxUtil::toMono); } /** * Retrieves the properties of the storage account's file. * The properties includes file metadata, last modified date, is server encrypted, and eTag. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve file properties</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.getPropertiesWithResponse} * * <p>For more information, see the * <a href="https: * * @return A response containing the {@link FileProperties storage file properties} and response status code */ public Mono<Response<FileProperties>> getPropertiesWithResponse() { return withContext(context -> getPropertiesWithResponse(context)); } Mono<Response<FileProperties>> getPropertiesWithResponse(Context context) { return azureFileStorageClient.files().getPropertiesWithRestResponseAsync(shareName, filePath, snapshot, null, context) .map(this::getPropertiesResponse); } /** * Sets the user-defined httpHeaders to associate to the file. * * <p>If {@code null} is passed for the httpHeaders it will clear the httpHeaders associated to the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the httpHeaders of contentType of "text/plain"</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setHttpHeaders * * <p>Clear the metadata of the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setHttpHeaders * * <p>For more information, see the * <a href="https: * * @param newFileSize New file size of the file * @param httpHeaders Resizes a file to the specified size. If the specified byte value is less than the current size of the file, then all ranges above the specified byte value are cleared. * @return The {@link FileInfo file info} * @throws IllegalArgumentException thrown if parameters fail the validation. */ public Mono<FileInfo> setHttpHeaders(long newFileSize, FileHTTPHeaders httpHeaders) { return setHttpHeadersWithResponse(newFileSize, httpHeaders).flatMap(FluxUtil::toMono); } /** * Sets the user-defined httpHeaders to associate to the file. * * <p>If {@code null} is passed for the httpHeaders it will clear the httpHeaders associated to the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the httpHeaders of contentType of "text/plain"</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setHttpHeadersWithResponse * * <p>Clear the metadata of the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setHttpHeadersWithResponse * * <p>For more information, see the * <a href="https: * * @param newFileSize New file size of the file * @param httpHeaders Resizes a file to the specified size. If the specified byte value is less than the current size of the file, then all ranges above the specified byte value are cleared. * @return Response containing the {@link FileInfo file info} and response status code * @throws IllegalArgumentException thrown if parameters fail the validation. */ public Mono<Response<FileInfo>> setHttpHeadersWithResponse(long newFileSize, FileHTTPHeaders httpHeaders) { return withContext(context -> setHttpHeadersWithResponse(newFileSize, httpHeaders, context)); } Mono<Response<FileInfo>> setHttpHeadersWithResponse(long newFileSize, FileHTTPHeaders httpHeaders, Context context) { String fileAttributes = "None"; String filePermission = "inherit"; String fileCreationTime = "preserve"; String fileLastWriteTime = "preserve"; return azureFileStorageClient.files().setHTTPHeadersWithRestResponseAsync(shareName, filePath, fileAttributes, fileCreationTime, fileLastWriteTime, null, newFileSize, filePermission, null, httpHeaders, context) .map(this::setHttpHeadersResponse); } /** * Sets the user-defined metadata to associate to the file. * * <p>If {@code null} is passed for the metadata it will clear the metadata associated to the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the metadata to "file:updatedMetadata"</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setMetadata * * <p>Clear the metadata of the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setMetadataWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata Options.Metadata to set on the file, if null is passed the metadata for the file is cleared * @return {@link FileMetadataInfo file meta info} * @throws StorageErrorException If the file doesn't exist or the metadata contains invalid keys */ public Mono<FileMetadataInfo> setMetadata(Map<String, String> metadata) { return setMetadataWithResponse(metadata).flatMap(FluxUtil::toMono); } /** * Sets the user-defined metadata to associate to the file. * * <p>If {@code null} is passed for the metadata it will clear the metadata associated to the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the metadata to "file:updatedMetadata"</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setMetadataWithResponse * * <p>Clear the metadata of the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setMetadataWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata Options.Metadata to set on the file, if null is passed the metadata for the file is cleared * @return A response containing the {@link FileMetadataInfo file meta info} and status code * @throws StorageErrorException If the file doesn't exist or the metadata contains invalid keys */ public Mono<Response<FileMetadataInfo>> setMetadataWithResponse(Map<String, String> metadata) { return withContext(context -> setMetadataWithResponse(metadata, context)); } Mono<Response<FileMetadataInfo>> setMetadataWithResponse(Map<String, String> metadata, Context context) { return azureFileStorageClient.files().setMetadataWithRestResponseAsync(shareName, filePath, null, metadata, context) .map(this::setMetadataResponse); } /** * Uploads a range of bytes to the beginning of a file in storage file service. Upload operations performs an * in-place write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Upload data "default" to the file in Storage File Service. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.upload * * <p>For more information, see the * <a href="https: * * @param data The data which will upload to the storage file. * @param length Specifies the number of bytes being transmitted in the request body. * @return A response that only contains headers and response status code */ public Mono<FileUploadInfo> upload(Flux<ByteBuffer> data, long length) { return uploadWithResponse(data, length).flatMap(FluxUtil::toMono); } /** * Uploads a range of bytes to the beginning of a file in storage file service. Upload operations performs an in-place write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Upload "default" to the file. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.uploadWithResponse * * <p>For more information, see the * <a href="https: * * @param data The data which will upload to the storage file. * @param length Specifies the number of bytes being transmitted in the request body. When the FileRangeWriteType is set to clear, the value of this header must be set to zero.. * @return A response containing the {@link FileUploadInfo file upload info} with headers and response status code * @throws StorageErrorException If you attempt to upload a range that is larger than 4 MB, the service returns status code 413 (Request Entity Too Large) */ public Mono<Response<FileUploadInfo>> uploadWithResponse(Flux<ByteBuffer> data, long length) { return withContext(context -> uploadWithResponse(data, length, context)); } Mono<Response<FileUploadInfo>> uploadWithResponse(Flux<ByteBuffer> data, long length, Context context) { FileRange range = new FileRange(0, length - 1); return azureFileStorageClient.files().uploadRangeWithRestResponseAsync(shareName, filePath, range.toString(), FileRangeWriteType.UPDATE, length, data, null, null, context) .map(this::uploadResponse); } /** * Uploads a range of bytes to specific of a file in storage file service. Upload operations performs an in-place * write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Upload data "default" starting from 1024 bytes. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.upload * * <p>For more information, see the * <a href="https: * * @param data The data which will upload to the storage file. * @param length Specifies the number of bytes being transmitted in the request body. * @param offset Optional starting point of the upload range. It will start from the beginning if it is {@code * null} * @return The {@link FileUploadInfo file upload info} * @throws StorageErrorException If you attempt to upload a range that is larger than 4 MB, the service returns status code 413 (Request Entity Too Large) */ public Mono<FileUploadInfo> upload(Flux<ByteBuffer> data, long length, long offset) { return uploadWithResponse(data, length, offset).flatMap(FluxUtil::toMono); } /** * Uploads a range of bytes to specific of a file in storage file service. Upload operations performs an in-place write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Upload the file from 1024 to 2048 bytes with its metadata and properties and without the contentMD5. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.uploadWithResponse * * <p>For more information, see the * <a href="https: * * @param data The data which will upload to the storage file. * @param offset Optional starting point of the upload range. It will start from the beginning if it is {@code null} * @param length Specifies the number of bytes being transmitted in the request body. When the FileRangeWriteType is set to clear, the value of this header must be set to zero. * @return A response containing the {@link FileUploadInfo file upload info} with headers and response status code * @throws StorageErrorException If you attempt to upload a range that is larger than 4 MB, the service returns status code 413 (Request Entity Too Large) */ public Mono<Response<FileUploadInfo>> uploadWithResponse(Flux<ByteBuffer> data, long length, long offset) { return withContext(context -> uploadWithResponse(data, length, offset, context)); } Mono<Response<FileUploadInfo>> uploadWithResponse(Flux<ByteBuffer> data, long length, long offset, Context context) { FileRange range = * * <p>For more information, see the * <a href="https: * * @param length Specifies the number of bytes being cleared. * @return The {@link FileUploadInfo file upload info} */ public Mono<FileUploadInfo> clearRange(long length) { return clearRangeWithResponse(length, 0).flatMap(FluxUtil::toMono); } /** * Clear a range of bytes to specific of a file in storage file service. Clear operations performs an in-place * write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Clear the range starting from 1024 with length of 1024. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.clearRange * * <p>For more information, see the * <a href="https: * * @param length Specifies the number of bytes being cleared in the request body. * @param offset Optional starting point of the upload range. It will start from the beginning if it is {@code * null} * @return A response of {@link FileUploadInfo file upload info} that only contains headers and response status code */ public Mono<Response<FileUploadInfo>> clearRangeWithResponse(long length, long offset) { return withContext(context -> clearRangeWithResponse(length, offset, context)); } Mono<Response<FileUploadInfo>> clearRangeWithResponse(long length, long offset, Context context) { FileRange range = new FileRange(offset, offset + length - 1); return azureFileStorageClient.files().uploadRangeWithRestResponseAsync(shareName, filePath, range.toString(), FileRangeWriteType.CLEAR, 0L, null, null, null, context) .map(this::uploadResponse); } /** * Uploads file to storage file service. * * <p><strong>Code Samples</strong></p> * * <p> Upload the file from the source file path. </p> * * (@codesnippet com.azure.storage.file.fileAsyncClient.uploadFromFile
This can be undone? It looks like the comments are not at the same indentation level.
private void receiveEvents(PartitionOwnership partitionOwnership) { EventHubConsumerOptions consumerOptions = new EventHubConsumerOptions(); consumerOptions.ownerLevel(0L); EventPosition startFromEventPosition = partitionOwnership.sequenceNumber() == null ? this.initialEventPosition : EventPosition.fromSequenceNumber(partitionOwnership.sequenceNumber(), false); EventHubAsyncConsumer consumer = this.eventHubAsyncClient .createConsumer(this.consumerGroupName, partitionOwnership.partitionId(), startFromEventPosition, consumerOptions); this.partitionConsumers.put(partitionOwnership.partitionId(), consumer); PartitionContext partitionContext = new PartitionContext(partitionOwnership.partitionId(), this.eventHubName, this.consumerGroupName); CheckpointManager checkpointManager = new CheckpointManager(this.identifier, partitionContext, this.partitionManager, null); logger.info("Subscribing to receive events from partition {}", partitionOwnership.partitionId()); PartitionProcessor partitionProcessor = this.partitionProcessorFactory .createPartitionProcessor(partitionContext, checkpointManager); partitionProcessor.initialize().subscribe(); final AtomicReference<Context> processSpanContext = new AtomicReference<>(Context.NONE); consumer.receive().subscribeOn(Schedulers.newElastic("PartitionPump")) .subscribe(eventData -> { startScopedTracingSpan(eventData, processSpanContext); partitionProcessor.processEvent(eventData).subscribe(unused -> { }, partitionProcessor::processError); endScopedTracingSpan(processSpanContext); }, partitionProcessor::processError, () -> partitionProcessor.close(CloseReason.LOST_PARTITION_OWNERSHIP)); }
private void receiveEvents(PartitionOwnership partitionOwnership) { EventHubConsumerOptions consumerOptions = new EventHubConsumerOptions(); consumerOptions.ownerLevel(0L); EventPosition startFromEventPosition = partitionOwnership.sequenceNumber() == null ? this.initialEventPosition : EventPosition.fromSequenceNumber(partitionOwnership.sequenceNumber(), false); EventHubAsyncConsumer consumer = this.eventHubAsyncClient .createConsumer(this.consumerGroupName, partitionOwnership.partitionId(), startFromEventPosition, consumerOptions); this.partitionConsumers.put(partitionOwnership.partitionId(), consumer); PartitionContext partitionContext = new PartitionContext(partitionOwnership.partitionId(), this.eventHubName, this.consumerGroupName); CheckpointManager checkpointManager = new CheckpointManager(this.identifier, partitionContext, this.partitionManager, null); logger.info("Subscribing to receive events from partition {}", partitionOwnership.partitionId()); PartitionProcessor partitionProcessor = this.partitionProcessorFactory .createPartitionProcessor(partitionContext, checkpointManager); partitionProcessor.initialize().subscribe(); consumer.receive().subscribeOn(Schedulers.newElastic("PartitionPump")) .subscribe(eventData -> { Context processSpanContext = startProcessTracingSpan(eventData); if (processSpanContext.getData(SPAN_CONTEXT).isPresent()) { eventData.addContext(SPAN_CONTEXT, processSpanContext); } partitionProcessor.processEvent(eventData).doOnEach(signal -> endProcessTracingSpan(processSpanContext, signal)).subscribe(unused -> { }, partitionProcessor::processError); }, partitionProcessor::processError, () -> partitionProcessor.close(CloseReason.LOST_PARTITION_OWNERSHIP)); }
class EventProcessor { private static final long INTERVAL_IN_SECONDS = 10; private static final long INITIAL_DELAY = 0; private static final long OWNERSHIP_EXPIRATION_TIME_IN_MILLIS = TimeUnit.SECONDS.toMillis(30); private static final String SPAN_CONTEXT = Tracer.OPENTELEMETRY_AMQP_EVENT_SPAN_CONTEXT; private static final String DIAGNOSTIC_ID = com.azure.core.implementation.tracing.Tracer.OPENTELEMETRY_DIAGNOSTIC_ID_KEY; private final ClientLogger logger = new ClientLogger(EventProcessor.class); private final EventHubAsyncClient eventHubAsyncClient; private final String consumerGroupName; private final EventPosition initialEventPosition; private final PartitionProcessorFactory partitionProcessorFactory; private final PartitionManager partitionManager; private final String identifier; private final Map<String, EventHubAsyncConsumer> partitionConsumers = new ConcurrentHashMap<>(); private final String eventHubName; private final AtomicBoolean started = new AtomicBoolean(false); private Disposable runner; private Scheduler scheduler; /** * Package-private constructor. Use {@link EventHubClientBuilder} to create an instance. * * @param eventHubAsyncClient The {@link EventHubAsyncClient}. * @param consumerGroupName The consumer group name used in this event processor to consumer events. * @param partitionProcessorFactory The factory to create new partition processor(s). * @param initialEventPosition Initial event position to start consuming events. * @param partitionManager The partition manager. * @param eventHubName The Event Hub name. */ EventProcessor(EventHubAsyncClient eventHubAsyncClient, String consumerGroupName, PartitionProcessorFactory partitionProcessorFactory, EventPosition initialEventPosition, PartitionManager partitionManager, String eventHubName) { this.eventHubAsyncClient = Objects .requireNonNull(eventHubAsyncClient, "eventHubAsyncClient cannot be null"); this.consumerGroupName = Objects .requireNonNull(consumerGroupName, "consumerGroupname cannot be null"); this.partitionProcessorFactory = Objects .requireNonNull(partitionProcessorFactory, "partitionProcessorFactory cannot be null"); this.partitionManager = Objects .requireNonNull(partitionManager, "partitionManager cannot be null"); this.initialEventPosition = Objects .requireNonNull(initialEventPosition, "initialEventPosition cannot be null"); this.eventHubName = Objects .requireNonNull(eventHubName, "eventHubName cannot be null"); this.identifier = UUID.randomUUID().toString(); logger.info("The instance ID for this event processors is {}", this.identifier); } /** * The identifier is a unique name given to this event processor instance. * * @return Identifier for this event processor. */ public String identifier() { return this.identifier; } /** * Starts processing of events for all partitions of the Event Hub that this event processor can own, assigning a * dedicated {@link PartitionProcessor} to each partition. If there are other Event Processors active for the same * consumer group on the Event Hub, responsibility for partitions will be shared between them. * <p> * Subsequent calls to start will be ignored if this event processor is already running. Calling start after {@link * * </p> * * <p><strong>Starting the processor to consume events from all partitions</strong></p> * {@codesnippet com.azure.messaging.eventhubs.eventprocessor.startstop} */ public synchronized void start() { if (!started.compareAndSet(false, true)) { logger.info("Event processor is already running"); return; } logger.info("Starting a new event processor instance with id {}", this.identifier); scheduler = Schedulers.newElastic("EventProcessor"); runner = scheduler.schedulePeriodically(this::run, INITIAL_DELAY, INTERVAL_IN_SECONDS, TimeUnit.SECONDS); } /** * Stops processing events for all partitions owned by this event processor. All {@link PartitionProcessor} will be * shutdown and any open resources will be closed. * <p> * Subsequent calls to stop will be ignored if the event processor is not running. * </p> * * <p><strong>Stopping the processor</strong></p> * {@codesnippet com.azure.messaging.eventhubs.eventprocessor.startstop} */ public synchronized void stop() { if (!started.compareAndSet(true, false)) { logger.info("Event processor has already stopped"); return; } this.partitionConsumers.forEach((key, value) -> { try { logger.info("Closing event hub consumer for partition {}", key); value.close(); logger.info("Closed event hub consumer for partition {}", key); partitionConsumers.remove(key); } catch (IOException ex) { logger.warning("Unable to close event hub consumer for partition {}", key); } }); runner.dispose(); scheduler.dispose(); } /* * A simple implementation of an event processor that: * 1. Fetches all partition ids from Event Hub * 2. Gets the current ownership information of all the partitions from PartitionManager * 3. Claims ownership of any partition that doesn't have an owner yet. * 4. Starts a new PartitionProcessor and receives events from each of the partitions this instance owns */ private void run() { /* This will run periodically to get new ownership details and close/open new consumers when ownership of this instance has changed */ final Flux<PartitionOwnership> ownershipFlux = partitionManager.listOwnership(eventHubName, consumerGroupName) .cache(); eventHubAsyncClient.getPartitionIds() .flatMap(id -> getCandidatePartitions(ownershipFlux, id)) .flatMap(this::claimOwnership) .subscribe(this::receiveEvents, ex -> logger.warning("Failed to receive events {}", ex.getMessage()), () -> logger.info("Completed starting partition pumps for new partitions owned")); } /* * Get the candidate partitions for claiming ownerships */ private Publisher<? extends PartitionOwnership> getCandidatePartitions(Flux<PartitionOwnership> ownershipFlux, String id) { return ownershipFlux .filter(ownership -> id.equals(ownership.partitionId())) .single(new PartitionOwnership() .partitionId(id) .eventHubName(this.eventHubName) .ownerId(this.identifier) .consumerGroupName(this.consumerGroupName) .ownerLevel(0L)); } /* * Claim ownership of the given partition if it's available */ private Publisher<? extends PartitionOwnership> claimOwnership(PartitionOwnership ownershipInfo) { if (ownershipInfo.lastModifiedTime() == null || (System.currentTimeMillis() - ownershipInfo.lastModifiedTime() > OWNERSHIP_EXPIRATION_TIME_IN_MILLIS && !ownershipInfo.ownerId().equals(this.identifier))) { ownershipInfo.ownerId(this.identifier); return partitionManager.claimOwnership(ownershipInfo).doOnComplete(() -> { logger.info("Claimed ownership of partition {}", ownershipInfo.partitionId()); }).doOnError(error -> { logger.error("Unable to claim ownership of partition {}", ownershipInfo.partitionId(), error); }); } else { return Flux.empty(); } } /* * Creates a new consumer for given partition and starts receiving events for that partition. */ /* * Starts a new process tracing span and attached context the EventData object for users. */ private void startScopedTracingSpan(EventData eventData, AtomicReference<Context> processSpanContext) { Object diagnosticId = eventData.properties().get(DIAGNOSTIC_ID); if (diagnosticId == null) { return; } eventData.context(TraceUtil.extractContext(diagnosticId.toString())); processSpanContext.set(TraceUtil.startScopedSpan("process", eventData.context())); eventData.context(processSpanContext.get()); } /* * Ends the tracing span and the scope of that span. */ private void endScopedTracingSpan(AtomicReference<Context> processSpanContext) { if (!processSpanContext.get().getData("scope").isPresent()) { return; } Closeable close = (Closeable) processSpanContext.get().getData("scope").get(); try { close.close(); } catch (IOException ioException) { logger.error("EventProcessor.run() endTracingSpan().close() failed with an error %s", ioException); } TraceUtil.endTracingSpan(processSpanContext.get(), null); } }
class EventProcessor { private static final long INTERVAL_IN_SECONDS = 10; private static final long INITIAL_DELAY = 0; private static final long OWNERSHIP_EXPIRATION_TIME_IN_MILLIS = TimeUnit.SECONDS.toMillis(30); private final ClientLogger logger = new ClientLogger(EventProcessor.class); private final EventHubAsyncClient eventHubAsyncClient; private final String consumerGroupName; private final EventPosition initialEventPosition; private final PartitionProcessorFactory partitionProcessorFactory; private final PartitionManager partitionManager; private final String identifier; private final Map<String, EventHubAsyncConsumer> partitionConsumers = new ConcurrentHashMap<>(); private final String eventHubName; private final TracerProvider tracerProvider; private final AtomicBoolean started = new AtomicBoolean(false); private Disposable runner; private Scheduler scheduler; /** * Package-private constructor. Use {@link EventHubClientBuilder} to create an instance. * @param eventHubAsyncClient The {@link EventHubAsyncClient}. * @param consumerGroupName The consumer group name used in this event processor to consumer events. * @param partitionProcessorFactory The factory to create new partition processor(s). * @param initialEventPosition Initial event position to start consuming events. * @param partitionManager The partition manager. * @param eventHubName The Event Hub name. * @param tracerProvider The tracer implementation */ EventProcessor(EventHubAsyncClient eventHubAsyncClient, String consumerGroupName, PartitionProcessorFactory partitionProcessorFactory, EventPosition initialEventPosition, PartitionManager partitionManager, String eventHubName, TracerProvider tracerProvider) { this.eventHubAsyncClient = Objects .requireNonNull(eventHubAsyncClient, "eventHubAsyncClient cannot be null"); this.consumerGroupName = Objects .requireNonNull(consumerGroupName, "consumerGroupname cannot be null"); this.partitionProcessorFactory = Objects .requireNonNull(partitionProcessorFactory, "partitionProcessorFactory cannot be null"); this.partitionManager = Objects .requireNonNull(partitionManager, "partitionManager cannot be null"); this.initialEventPosition = Objects .requireNonNull(initialEventPosition, "initialEventPosition cannot be null"); this.eventHubName = Objects .requireNonNull(eventHubName, "eventHubName cannot be null"); this.tracerProvider = tracerProvider; this.identifier = UUID.randomUUID().toString(); logger.info("The instance ID for this event processors is {}", this.identifier); } /** * The identifier is a unique name given to this event processor instance. * * @return Identifier for this event processor. */ public String identifier() { return this.identifier; } /** * Starts processing of events for all partitions of the Event Hub that this event processor can own, assigning a * dedicated {@link PartitionProcessor} to each partition. If there are other Event Processors active for the same * consumer group on the Event Hub, responsibility for partitions will be shared between them. * <p> * Subsequent calls to start will be ignored if this event processor is already running. Calling start after {@link * * </p> * * <p><strong>Starting the processor to consume events from all partitions</strong></p> * {@codesnippet com.azure.messaging.eventhubs.eventprocessor.startstop} */ public synchronized void start() { if (!started.compareAndSet(false, true)) { logger.info("Event processor is already running"); return; } logger.info("Starting a new event processor instance with id {}", this.identifier); scheduler = Schedulers.newElastic("EventProcessor"); runner = scheduler.schedulePeriodically(this::run, INITIAL_DELAY, INTERVAL_IN_SECONDS, TimeUnit.SECONDS); } /** * Stops processing events for all partitions owned by this event processor. All {@link PartitionProcessor} will be * shutdown and any open resources will be closed. * <p> * Subsequent calls to stop will be ignored if the event processor is not running. * </p> * * <p><strong>Stopping the processor</strong></p> * {@codesnippet com.azure.messaging.eventhubs.eventprocessor.startstop} */ public synchronized void stop() { if (!started.compareAndSet(true, false)) { logger.info("Event processor has already stopped"); return; } this.partitionConsumers.forEach((key, value) -> { try { logger.info("Closing event hub consumer for partition {}", key); value.close(); logger.info("Closed event hub consumer for partition {}", key); partitionConsumers.remove(key); } catch (IOException ex) { logger.warning("Unable to close event hub consumer for partition {}", key); } }); runner.dispose(); scheduler.dispose(); } /* * A simple implementation of an event processor that: * 1. Fetches all partition ids from Event Hub * 2. Gets the current ownership information of all the partitions from PartitionManager * 3. Claims ownership of any partition that doesn't have an owner yet. * 4. Starts a new PartitionProcessor and receives events from each of the partitions this instance owns */ private void run() { /* This will run periodically to get new ownership details and close/open new consumers when ownership of this instance has changed */ final Flux<PartitionOwnership> ownershipFlux = partitionManager.listOwnership(eventHubName, consumerGroupName) .cache(); eventHubAsyncClient.getPartitionIds() .flatMap(id -> getCandidatePartitions(ownershipFlux, id)) .flatMap(this::claimOwnership) .subscribe(this::receiveEvents, ex -> logger.warning("Failed to receive events {}", ex.getMessage()), () -> logger.info("Completed starting partition pumps for new partitions owned")); } /* * Get the candidate partitions for claiming ownerships */ private Publisher<? extends PartitionOwnership> getCandidatePartitions(Flux<PartitionOwnership> ownershipFlux, String id) { return ownershipFlux .filter(ownership -> id.equals(ownership.partitionId())) .single(new PartitionOwnership() .partitionId(id) .eventHubName(this.eventHubName) .ownerId(this.identifier) .consumerGroupName(this.consumerGroupName) .ownerLevel(0L)); } /* * Claim ownership of the given partition if it's available */ private Publisher<? extends PartitionOwnership> claimOwnership(PartitionOwnership ownershipInfo) { if (ownershipInfo.lastModifiedTime() == null || (System.currentTimeMillis() - ownershipInfo.lastModifiedTime() > OWNERSHIP_EXPIRATION_TIME_IN_MILLIS && !ownershipInfo.ownerId().equals(this.identifier))) { ownershipInfo.ownerId(this.identifier); return partitionManager.claimOwnership(ownershipInfo).doOnComplete(() -> { logger.info("Claimed ownership of partition {}", ownershipInfo.partitionId()); }).doOnError(error -> { logger.error("Unable to claim ownership of partition {}", ownershipInfo.partitionId(), error); }); } else { return Flux.empty(); } } /* * Creates a new consumer for given partition and starts receiving events for that partition. */ /* * Starts a new process tracing span and attached context the EventData object for users. */ private Context startProcessTracingSpan(EventData eventData) { Object diagnosticId = eventData.properties().get(DIAGNOSTIC_ID_KEY); if (diagnosticId == null || !tracerProvider.isEnabled()) { return Context.NONE; } Context spanContext = tracerProvider.extractContext(diagnosticId.toString(), Context.NONE); return tracerProvider.startSpan(spanContext, ProcessKind.PROCESS); } /* * Ends the process tracing span and the scope of that span. */ private void endProcessTracingSpan(Context processSpanContext, Signal<Void> signal) { Optional<Object> spanScope = processSpanContext.getData("scope"); if (!spanScope.isPresent() || !tracerProvider.isEnabled()) { return; } if (spanScope.get() instanceof Closeable) { Closeable close = (Closeable) processSpanContext.getData("scope").get(); try { close.close(); tracerProvider.endSpan(processSpanContext, signal); } catch (IOException ioException) { logger.error("EventProcessor.run() endTracingSpan().close() failed with an error %s", ioException); } } else { logger.warning(String.format(Locale.US, "Process span scope type is not of type Closeable, but type: %s. Not closing the scope and span", spanScope.get() != null ? spanScope.getClass() : "null")); } } }
Is there a spec to look at for this?
public static String getDiagnosticId(SpanContext spanContext) { char[] chars = new char[55]; chars[0] = '0'; chars[1] = '0'; chars[2] = '-'; spanContext.getTraceId().copyLowerBase16To(chars, 3); chars[35] = '-'; spanContext.getSpanId().copyLowerBase16To(chars, 36); chars[52] = '-'; spanContext.getTraceOptions().copyLowerBase16To(chars, 53); return new String(chars); }
char[] chars = new char[55];
public static String getDiagnosticId(SpanContext spanContext) { char[] chars = new char[55]; chars[0] = '0'; chars[1] = '0'; chars[2] = '-'; spanContext.getTraceId().copyLowerBase16To(chars, 3); chars[35] = '-'; spanContext.getSpanId().copyLowerBase16To(chars, 36); chars[52] = '-'; spanContext.getTraceOptions().copyLowerBase16To(chars, 53); return new String(chars); }
class AmqpPropagationFormatUtil { private static final String SPAN_CONTEXT = Tracer.OPENTELEMETRY_AMQP_EVENT_SPAN_CONTEXT; private AmqpPropagationFormatUtil() { } /** * This method is called to extract the Span Context information from the received event's diagnostic Id. * * @param diagnosticId The diagnostic Id for the event. * @return {@link Context} */ public static Context extractContext(String diagnosticId) { return new Context(SPAN_CONTEXT, fromDiagnosticId(diagnosticId)); } /** * Parse span context and get the diagnostic Id for that event. * * @param spanContext the span context. * @retrun The dignostic Id */ private static SpanContext fromDiagnosticId(String diagnosticId) { if (diagnosticId == null || diagnosticId.length() < 55 || !diagnosticId.startsWith("00")) { return SpanContext.create(TraceId.INVALID, SpanId.INVALID, TraceOptions.DEFAULT, Tracestate.builder().build()); } return SpanContext.create( TraceId.fromLowerBase16(diagnosticId, 3), SpanId.fromLowerBase16(diagnosticId, 36), TraceOptions.fromLowerBase16(diagnosticId, 53), Tracestate.builder().build()); } }
class AmqpPropagationFormatUtil { private AmqpPropagationFormatUtil() { } /** * This method is called to extract the Span Context information from the received event's diagnostic Id. * * @param diagnosticId The dignostic Id providing an unique identifier for individual traces and requests * @return {@link Context} which contains the trace context propagation data */ public static Context extractContext(String diagnosticId, Context context) { return context.addData(SPAN_CONTEXT, fromDiagnosticId(diagnosticId)); } /** * The traceparent HTTP header field identifies the incoming request in a tracing system with four fields: * version, trace-id, parent-id, trace-flags. * * Please refer to the <a href=https: * for more information on the conversion of these fields to Span Context format. * * @param spanContext is a specification defines an agreed-upon format for the exchange of trace context propagation data * @retrun The dignostic Id providing an unique identifier for individual traces and requests, * allowing trace data of multiple providers to be linked together. */ /** * The traceparent HTTP header field identifies the incoming request in a tracing system with four fields: * version, trace-id, parent-id, trace-flags. * * Please refer to the <a href=https: * for more information on the conversion of these fields to Span Context format. * * @param diagnosticId provides a unique identifier for individual traces and requests, * @return SpanContext is a specification defines an agreed-upon format for the exchange of trace context propagation data */ private static SpanContext fromDiagnosticId(String diagnosticId) { if (diagnosticId == null || diagnosticId.length() < 55 || !diagnosticId.startsWith("00")) { return SpanContext.create(TraceId.INVALID, SpanId.INVALID, TraceOptions.DEFAULT, Tracestate.builder().build()); } return SpanContext.create( TraceId.fromLowerBase16(diagnosticId, 3), SpanId.fromLowerBase16(diagnosticId, 36), TraceOptions.fromLowerBase16(diagnosticId, 53), Tracestate.builder().build()); } }
can you please leave a TODO here (or where we call this method) that once we switch to OpenTelemetry, we need to add links before span is starter (and no link type - parent - is needed)
public void addLink(Context eventContext) { Optional<Object> spanContextOptional = eventContext.getData(SPAN_CONTEXT); Optional<Object> spanOptional = eventContext.getData(OPENTELEMETRY_SPAN_KEY); if (!spanOptional.isPresent()) { logger.warning("Failed to find span to link it."); return; } SpanContext spanContext = (SpanContext) spanContextOptional.get(); Span span = (Span) spanOptional.get(); span.addLink(Link.fromSpanContext(spanContext, PARENT_LINKED_SPAN)); }
span.addLink(Link.fromSpanContext(spanContext, PARENT_LINKED_SPAN));
public void addLink(Context eventContext) { final Span span = getSpan(eventContext); if (span == null) { logger.warning("Failed to find span to link it."); return; } final Optional<Object> spanContextOptional = eventContext.getData(SPAN_CONTEXT); if (!spanContextOptional.isPresent()) { logger.warning("Failed to find Span context to link it."); return; } else if (!(spanContextOptional.get() instanceof SpanContext)) { logger.warning("Context in event is not of type SpanContext. Actual: {}", spanContextOptional.get().getClass()); return; } final SpanContext spanContext = (SpanContext) spanContextOptional.get(); span.addLink(Link.fromSpanContext(spanContext, PARENT_LINKED_SPAN)); }
class OpenTelemetryTracer implements com.azure.core.implementation.tracing.Tracer { private static final Tracer TRACER = Tracing.getTracer(); private static final String OPENTELEMETRY_SPAN_KEY = com.azure.core.implementation.tracing.Tracer.OPENTELEMETRY_SPAN_KEY; private static final String OPENTELEMETRY_SPAN_NAME_KEY = com.azure.core.implementation.tracing.Tracer.OPENTELEMETRY_SPAN_NAME_KEY; private static final String DIAGNOSTIC_ID = com.azure.core.implementation.tracing.Tracer.OPENTELEMETRY_DIAGNOSTIC_ID_KEY; private static final String ENTITY_PATH = com.azure.core.implementation.tracing.Tracer.OPENTELEMETRY_AMQP_ENTITY_PATH; private static final String HOSTNAME = com.azure.core.implementation.tracing.Tracer.OPENTELEMETRY_AMQP_HOST_NAME; private static final String SPAN_CONTEXT = com.azure.core.implementation.tracing.Tracer.OPENTELEMETRY_AMQP_EVENT_SPAN_CONTEXT; private static final String COMPONENT = "component"; private static final String MESSAGE_BUS_DESTINATION = "message_bus.destination"; private static final String PEER_ENDPOINT = "peer.address"; private final ClientLogger logger = new ClientLogger(OpenTelemetryTracer.class); @Override public Context start(String spanName, Context context) { Span span = startSpanWithExplicitParent(spanName, context); if (context.getData(ENTITY_PATH).isPresent() && span.getOptions().contains(Span.Options.RECORD_EVENTS)) { addSpanRequestAttributes(span, context, spanName); } else { context = setContextData(span); } return context.addData(OPENTELEMETRY_SPAN_KEY, span); } @Override public Context startScopedSpan(String spanName, Context context) { Span span; if (context.getData(SPAN_CONTEXT).isPresent()) { span = startSpanWithRemoteParent(spanName, context); } else { span = startSpanWithExplicitParent(spanName, context); } return context.addData(OPENTELEMETRY_SPAN_KEY, span).addData("scope", TRACER.withSpan(span)); } @Override public void end(int responseCode, Throwable throwable, Context context) { Optional<Object> spanOptional = context.getData(OPENTELEMETRY_SPAN_KEY); if (!spanOptional.isPresent()) { logger.warning("Failed to find span to end it."); return; } Span span = (Span) spanOptional.get(); if (span.getOptions().contains(Options.RECORD_EVENTS)) { span.setStatus(HttpTraceUtil.parseResponseStatus(responseCode, throwable)); } span.end(); } @Override public void setAttribute(String key, String value, Context context) { if (ImplUtils.isNullOrEmpty(value)) { logger.info("Failed to set span attribute since value is null or empty."); return; } Optional<Object> spanOptional = context.getData(OPENTELEMETRY_SPAN_KEY); if (spanOptional.isPresent()) { Span span = (Span) spanOptional.get(); span.putAttribute(key, AttributeValue.stringAttributeValue(value)); } else { logger.warning("Failed to find span to add attribute."); } } @Override public Context setSpanName(String spanName, Context context) { return context.addData(OPENTELEMETRY_SPAN_NAME_KEY, spanName); } @Override public void end(String errorCondition, Throwable throwable, Context context) { Optional<Object> spanOptional = context.getData(OPENTELEMETRY_SPAN_KEY); if (!spanOptional.isPresent()) { logger.warning("Failed to find span to end it."); return; } Span span = (Span) spanOptional.get(); if (span.getOptions().contains(Options.RECORD_EVENTS)) { span.setStatus(AmqpTraceUtil.parseErrorCondition(errorCondition, throwable)); } span.end(); } @Override @Override public Context extractContext(String diagnosticId) { return AmqpPropagationFormatUtil.extractContext(diagnosticId); } private Span startSpanWithExplicitParent(String spanName, Context context) { Span parentSpan = (Span) context.getData(OPENTELEMETRY_SPAN_KEY).orElse(TRACER.getCurrentSpan()); String spanNameKey = (String) context.getData(OPENTELEMETRY_SPAN_NAME_KEY).orElse(spanName); SpanBuilder spanBuilder = TRACER.spanBuilderWithExplicitParent(spanNameKey, parentSpan); return spanBuilder.startSpan(); } private Span startSpanWithRemoteParent(String spanName, Context context) { SpanBuilder spanBuilder = TRACER.spanBuilderWithRemoteParent(spanName, (SpanContext) context.getData(SPAN_CONTEXT).get()); return spanBuilder.startSpan(); } private Context setContextData(Span span) { final String traceparent = AmqpPropagationFormatUtil.getDiagnosticId(span.getContext()); Context parentContext = new Context(DIAGNOSTIC_ID, traceparent).addData(SPAN_CONTEXT, span.getContext()); return parentContext; } private static void addSpanRequestAttributes(Span span, Context context, String spanName) { span.putAttribute(COMPONENT, AttributeValue.stringAttributeValue(parseComponentValue(spanName))); span.putAttribute(MESSAGE_BUS_DESTINATION, AttributeValue.stringAttributeValue(context.getData(ENTITY_PATH).get().toString())); span.putAttribute(PEER_ENDPOINT, AttributeValue.stringAttributeValue(context.getData(HOSTNAME).get().toString())); } private static String parseComponentValue(String spanName) { return spanName.substring(spanName.indexOf(".") + 1, spanName.lastIndexOf(".")); } }
class OpenTelemetryTracer implements com.azure.core.implementation.tracing.Tracer { private static final Tracer TRACER = Tracing.getTracer(); private static final String COMPONENT = "component"; private static final String MESSAGE_BUS_DESTINATION = "message_bus.destination"; private static final String PEER_ENDPOINT = "peer.address"; private final ClientLogger logger = new ClientLogger(OpenTelemetryTracer.class); @Override public Context start(String methodName, Context context) { Span parentSpan = (Span) context.getData(OPENTELEMETRY_SPAN_KEY).orElse(TRACER.getCurrentSpan()); String spanName = (String) context.getData(OPENTELEMETRY_SPAN_NAME_KEY).orElse(methodName); SpanBuilder spanBuilder = TRACER.spanBuilderWithExplicitParent(spanName, parentSpan); Span span = spanBuilder.startSpan(); return context.addData(OPENTELEMETRY_SPAN_KEY, span); } @Override public Context start(String spanName, Context context, ProcessKind processKind) { Span span; SpanBuilder spanBuilder; switch (processKind) { case SEND: spanBuilder = startSpanWithExplicitParent(spanName, context); span = spanBuilder.setSpanKind(Span.Kind.CLIENT).startSpan(); if (span.getOptions().contains(Span.Options.RECORD_EVENTS)) { addSpanRequestAttributes(span, context, spanName); } return context.addData(OPENTELEMETRY_SPAN_KEY, span); case RECEIVE: spanBuilder = startSpanWithExplicitParent(spanName, context); span = spanBuilder.startSpan(); context = setContextData(span); return context.addData(OPENTELEMETRY_SPAN_KEY, span); case PROCESS: return startScopedSpan(spanName, context); default: return Context.NONE; } } @Override public void end(int responseCode, Throwable throwable, Context context) { final Span span = getSpan(context); if (span == null) { return; } if (span.getOptions().contains(Options.RECORD_EVENTS)) { span.setStatus(HttpTraceUtil.parseResponseStatus(responseCode, throwable)); } span.end(); } @Override public void setAttribute(String key, String value, Context context) { if (ImplUtils.isNullOrEmpty(value)) { logger.info("Failed to set span attribute since value is null or empty."); return; } final Span span = getSpan(context); if (span != null) { span.putAttribute(key, AttributeValue.stringAttributeValue(value)); } else { logger.warning("Failed to find span to add attribute."); } } @Override public Context setSpanName(String spanName, Context context) { return context.addData(OPENTELEMETRY_SPAN_NAME_KEY, spanName); } @Override public void end(String statusMessage, Throwable throwable, Context context) { final Span span = getSpan(context); if (span == null) { logger.warning("Failed to find span to end it."); return; } if (span.getOptions().contains(Options.RECORD_EVENTS)) { span.setStatus(AmqpTraceUtil.parseStatusMessage(statusMessage, throwable)); } span.end(); } @Override @Override public Context extractContext(String diagnosticId, Context context) { return AmqpPropagationFormatUtil.extractContext(diagnosticId, context); } private Context startScopedSpan(String spanName, Context context) { Span span; Optional<Object> optionalSpanContext = context.getData(SPAN_CONTEXT); if (optionalSpanContext.isPresent() && optionalSpanContext.get() instanceof SpanContext) { span = startSpanWithRemoteParent(spanName, (SpanContext) optionalSpanContext.get()); } else { SpanBuilder spanBuilder = startSpanWithExplicitParent(spanName, context); span = spanBuilder.setSpanKind(Span.Kind.SERVER).startSpan(); } return context.addData(OPENTELEMETRY_SPAN_KEY, span).addData("scope", TRACER.withSpan(span)); } private SpanBuilder startSpanWithExplicitParent(String spanName, Context context) { Optional<Object> optionalSpanKey = context.getData(OPENTELEMETRY_SPAN_KEY); Optional<Object> optionalSpanNameKey = context.getData(OPENTELEMETRY_SPAN_NAME_KEY); Span parentSpan = null; String spanNameKey = null; if (optionalSpanKey.get() instanceof Span && optionalSpanNameKey.get() instanceof String) { parentSpan = (Span) context.getData(OPENTELEMETRY_SPAN_KEY).orElse(TRACER.getCurrentSpan()); spanNameKey = (String) context.getData(OPENTELEMETRY_SPAN_NAME_KEY).orElse(spanName); } else { logger.warning(String.format(Locale.US, "Parent span type is not of type Span, but type: %s. Failed to add span links.", optionalSpanKey.get() != null ? optionalSpanKey.get().getClass() : "null")); } SpanBuilder spanBuilder = TRACER.spanBuilderWithExplicitParent(spanNameKey, parentSpan); return spanBuilder; } private Span startSpanWithRemoteParent(String spanName, SpanContext spanContext) { SpanBuilder spanBuilder = TRACER.spanBuilderWithRemoteParent(spanName, spanContext); spanBuilder.setSpanKind(Span.Kind.SERVER); return spanBuilder.startSpan(); } private Context setContextData(Span span) { final String traceparent = AmqpPropagationFormatUtil.getDiagnosticId(span.getContext()); Context parentContext = new Context(DIAGNOSTIC_ID_KEY, traceparent).addData(SPAN_CONTEXT, span.getContext()); return parentContext; } private static void addSpanRequestAttributes(Span span, Context context, String spanName) { if (context.getData(ENTITY_PATH).isPresent() && context.getData(HOST_NAME).isPresent()) { span.putAttribute(COMPONENT, AttributeValue.stringAttributeValue(parseComponentValue(spanName))); span.putAttribute(MESSAGE_BUS_DESTINATION, AttributeValue.stringAttributeValue(context.getData(ENTITY_PATH).get().toString())); span.putAttribute(PEER_ENDPOINT, AttributeValue.stringAttributeValue(context.getData(HOST_NAME).get().toString())); } } private static String parseComponentValue(String spanName) { return spanName.substring(spanName.indexOf(".") + 1, spanName.lastIndexOf(".")); } /** * Extracts a {@link Span} from the given {@code context}. * * @param context The context containing the span. * @return The {@link Span} contained in the context, and {@code null} if it does not. */ private Span getSpan(Context context) { final Optional<Object> spanOptional = context.getData(OPENTELEMETRY_SPAN_KEY); if (!spanOptional.isPresent()) { logger.warning("Failed to find span in the context."); return null; } final Object value = spanOptional.get(); if (!(value instanceof Span)) { logger.warning("Could not extract span. Data in {} is not of type Span. Actual class: {}", OPENTELEMETRY_SPAN_KEY, value.getClass()); return null; } return (Span) value; } }
This is the spec - https://gist.github.com/lmolkova/e4215c0f44a49ef824983382762e6b92#file-minimal_instrumentation-md
public static String getDiagnosticId(SpanContext spanContext) { char[] chars = new char[55]; chars[0] = '0'; chars[1] = '0'; chars[2] = '-'; spanContext.getTraceId().copyLowerBase16To(chars, 3); chars[35] = '-'; spanContext.getSpanId().copyLowerBase16To(chars, 36); chars[52] = '-'; spanContext.getTraceOptions().copyLowerBase16To(chars, 53); return new String(chars); }
char[] chars = new char[55];
public static String getDiagnosticId(SpanContext spanContext) { char[] chars = new char[55]; chars[0] = '0'; chars[1] = '0'; chars[2] = '-'; spanContext.getTraceId().copyLowerBase16To(chars, 3); chars[35] = '-'; spanContext.getSpanId().copyLowerBase16To(chars, 36); chars[52] = '-'; spanContext.getTraceOptions().copyLowerBase16To(chars, 53); return new String(chars); }
class AmqpPropagationFormatUtil { private static final String SPAN_CONTEXT = Tracer.OPENTELEMETRY_AMQP_EVENT_SPAN_CONTEXT; private AmqpPropagationFormatUtil() { } /** * This method is called to extract the Span Context information from the received event's diagnostic Id. * * @param diagnosticId The diagnostic Id for the event. * @return {@link Context} */ public static Context extractContext(String diagnosticId) { return new Context(SPAN_CONTEXT, fromDiagnosticId(diagnosticId)); } /** * Parse span context and get the diagnostic Id for that event. * * @param spanContext the span context. * @retrun The dignostic Id */ private static SpanContext fromDiagnosticId(String diagnosticId) { if (diagnosticId == null || diagnosticId.length() < 55 || !diagnosticId.startsWith("00")) { return SpanContext.create(TraceId.INVALID, SpanId.INVALID, TraceOptions.DEFAULT, Tracestate.builder().build()); } return SpanContext.create( TraceId.fromLowerBase16(diagnosticId, 3), SpanId.fromLowerBase16(diagnosticId, 36), TraceOptions.fromLowerBase16(diagnosticId, 53), Tracestate.builder().build()); } }
class AmqpPropagationFormatUtil { private AmqpPropagationFormatUtil() { } /** * This method is called to extract the Span Context information from the received event's diagnostic Id. * * @param diagnosticId The dignostic Id providing an unique identifier for individual traces and requests * @return {@link Context} which contains the trace context propagation data */ public static Context extractContext(String diagnosticId, Context context) { return context.addData(SPAN_CONTEXT, fromDiagnosticId(diagnosticId)); } /** * The traceparent HTTP header field identifies the incoming request in a tracing system with four fields: * version, trace-id, parent-id, trace-flags. * * Please refer to the <a href=https: * for more information on the conversion of these fields to Span Context format. * * @param spanContext is a specification defines an agreed-upon format for the exchange of trace context propagation data * @retrun The dignostic Id providing an unique identifier for individual traces and requests, * allowing trace data of multiple providers to be linked together. */ /** * The traceparent HTTP header field identifies the incoming request in a tracing system with four fields: * version, trace-id, parent-id, trace-flags. * * Please refer to the <a href=https: * for more information on the conversion of these fields to Span Context format. * * @param diagnosticId provides a unique identifier for individual traces and requests, * @return SpanContext is a specification defines an agreed-upon format for the exchange of trace context propagation data */ private static SpanContext fromDiagnosticId(String diagnosticId) { if (diagnosticId == null || diagnosticId.length() < 55 || !diagnosticId.startsWith("00")) { return SpanContext.create(TraceId.INVALID, SpanId.INVALID, TraceOptions.DEFAULT, Tracestate.builder().build()); } return SpanContext.create( TraceId.fromLowerBase16(diagnosticId, 3), SpanId.fromLowerBase16(diagnosticId, 36), TraceOptions.fromLowerBase16(diagnosticId, 53), Tracestate.builder().build()); } }
Can we add documentation in the javadocs how this gets created? A year from now, someone else looking at this would have no idea.
public static String getDiagnosticId(SpanContext spanContext) { char[] chars = new char[55]; chars[0] = '0'; chars[1] = '0'; chars[2] = '-'; spanContext.getTraceId().copyLowerBase16To(chars, 3); chars[35] = '-'; spanContext.getSpanId().copyLowerBase16To(chars, 36); chars[52] = '-'; spanContext.getTraceOptions().copyLowerBase16To(chars, 53); return new String(chars); }
char[] chars = new char[55];
public static String getDiagnosticId(SpanContext spanContext) { char[] chars = new char[55]; chars[0] = '0'; chars[1] = '0'; chars[2] = '-'; spanContext.getTraceId().copyLowerBase16To(chars, 3); chars[35] = '-'; spanContext.getSpanId().copyLowerBase16To(chars, 36); chars[52] = '-'; spanContext.getTraceOptions().copyLowerBase16To(chars, 53); return new String(chars); }
class AmqpPropagationFormatUtil { private static final String SPAN_CONTEXT = Tracer.OPENTELEMETRY_AMQP_EVENT_SPAN_CONTEXT; private AmqpPropagationFormatUtil() { } /** * This method is called to extract the Span Context information from the received event's diagnostic Id. * * @param diagnosticId The diagnostic Id for the event. * @return {@link Context} */ public static Context extractContext(String diagnosticId) { return new Context(SPAN_CONTEXT, fromDiagnosticId(diagnosticId)); } /** * Parse span context and get the diagnostic Id for that event. * * @param spanContext the span context. * @retrun The dignostic Id */ private static SpanContext fromDiagnosticId(String diagnosticId) { if (diagnosticId == null || diagnosticId.length() < 55 || !diagnosticId.startsWith("00")) { return SpanContext.create(TraceId.INVALID, SpanId.INVALID, TraceOptions.DEFAULT, Tracestate.builder().build()); } return SpanContext.create( TraceId.fromLowerBase16(diagnosticId, 3), SpanId.fromLowerBase16(diagnosticId, 36), TraceOptions.fromLowerBase16(diagnosticId, 53), Tracestate.builder().build()); } }
class AmqpPropagationFormatUtil { private AmqpPropagationFormatUtil() { } /** * This method is called to extract the Span Context information from the received event's diagnostic Id. * * @param diagnosticId The dignostic Id providing an unique identifier for individual traces and requests * @return {@link Context} which contains the trace context propagation data */ public static Context extractContext(String diagnosticId, Context context) { return context.addData(SPAN_CONTEXT, fromDiagnosticId(diagnosticId)); } /** * The traceparent HTTP header field identifies the incoming request in a tracing system with four fields: * version, trace-id, parent-id, trace-flags. * * Please refer to the <a href=https: * for more information on the conversion of these fields to Span Context format. * * @param spanContext is a specification defines an agreed-upon format for the exchange of trace context propagation data * @retrun The dignostic Id providing an unique identifier for individual traces and requests, * allowing trace data of multiple providers to be linked together. */ /** * The traceparent HTTP header field identifies the incoming request in a tracing system with four fields: * version, trace-id, parent-id, trace-flags. * * Please refer to the <a href=https: * for more information on the conversion of these fields to Span Context format. * * @param diagnosticId provides a unique identifier for individual traces and requests, * @return SpanContext is a specification defines an agreed-upon format for the exchange of trace context propagation data */ private static SpanContext fromDiagnosticId(String diagnosticId) { if (diagnosticId == null || diagnosticId.length() < 55 || !diagnosticId.startsWith("00")) { return SpanContext.create(TraceId.INVALID, SpanId.INVALID, TraceOptions.DEFAULT, Tracestate.builder().build()); } return SpanContext.create( TraceId.fromLowerBase16(diagnosticId, 3), SpanId.fromLowerBase16(diagnosticId, 36), TraceOptions.fromLowerBase16(diagnosticId, 53), Tracestate.builder().build()); } }
I recall this is asyn call `partitionProcessor.processEvent`, right? If so, `endScopedTracingSpan(processSpanContext)` happens before user code returns? Or am I missing something?
private void receiveEvents(PartitionOwnership partitionOwnership) { EventHubConsumerOptions consumerOptions = new EventHubConsumerOptions(); consumerOptions.ownerLevel(0L); EventPosition startFromEventPosition = partitionOwnership.sequenceNumber() == null ? this.initialEventPosition : EventPosition.fromSequenceNumber(partitionOwnership.sequenceNumber(), false); EventHubAsyncConsumer consumer = this.eventHubAsyncClient .createConsumer(this.consumerGroupName, partitionOwnership.partitionId(), startFromEventPosition, consumerOptions); this.partitionConsumers.put(partitionOwnership.partitionId(), consumer); PartitionContext partitionContext = new PartitionContext(partitionOwnership.partitionId(), this.eventHubName, this.consumerGroupName); CheckpointManager checkpointManager = new CheckpointManager(this.identifier, partitionContext, this.partitionManager, null); logger.info("Subscribing to receive events from partition {}", partitionOwnership.partitionId()); PartitionProcessor partitionProcessor = this.partitionProcessorFactory .createPartitionProcessor(partitionContext, checkpointManager); partitionProcessor.initialize().subscribe(); final AtomicReference<Context> processSpanContext = new AtomicReference<>(Context.NONE); consumer.receive().subscribeOn(Schedulers.newElastic("PartitionPump")) .subscribe(eventData -> { startScopedTracingSpan(eventData, processSpanContext); partitionProcessor.processEvent(eventData).subscribe(unused -> { }, partitionProcessor::processError); endScopedTracingSpan(processSpanContext); }, partitionProcessor::processError, () -> partitionProcessor.close(CloseReason.LOST_PARTITION_OWNERSHIP)); }
partitionProcessor.processEvent(eventData).subscribe(unused -> {
private void receiveEvents(PartitionOwnership partitionOwnership) { EventHubConsumerOptions consumerOptions = new EventHubConsumerOptions(); consumerOptions.ownerLevel(0L); EventPosition startFromEventPosition = partitionOwnership.sequenceNumber() == null ? this.initialEventPosition : EventPosition.fromSequenceNumber(partitionOwnership.sequenceNumber(), false); EventHubAsyncConsumer consumer = this.eventHubAsyncClient .createConsumer(this.consumerGroupName, partitionOwnership.partitionId(), startFromEventPosition, consumerOptions); this.partitionConsumers.put(partitionOwnership.partitionId(), consumer); PartitionContext partitionContext = new PartitionContext(partitionOwnership.partitionId(), this.eventHubName, this.consumerGroupName); CheckpointManager checkpointManager = new CheckpointManager(this.identifier, partitionContext, this.partitionManager, null); logger.info("Subscribing to receive events from partition {}", partitionOwnership.partitionId()); PartitionProcessor partitionProcessor = this.partitionProcessorFactory .createPartitionProcessor(partitionContext, checkpointManager); partitionProcessor.initialize().subscribe(); consumer.receive().subscribeOn(Schedulers.newElastic("PartitionPump")) .subscribe(eventData -> { Context processSpanContext = startProcessTracingSpan(eventData); if (processSpanContext.getData(SPAN_CONTEXT).isPresent()) { eventData.addContext(SPAN_CONTEXT, processSpanContext); } partitionProcessor.processEvent(eventData).doOnEach(signal -> endProcessTracingSpan(processSpanContext, signal)).subscribe(unused -> { }, partitionProcessor::processError); }, partitionProcessor::processError, () -> partitionProcessor.close(CloseReason.LOST_PARTITION_OWNERSHIP)); }
class EventProcessor { private static final long INTERVAL_IN_SECONDS = 10; private static final long INITIAL_DELAY = 0; private static final long OWNERSHIP_EXPIRATION_TIME_IN_MILLIS = TimeUnit.SECONDS.toMillis(30); private final ClientLogger logger = new ClientLogger(EventProcessor.class); private final EventHubAsyncClient eventHubAsyncClient; private final String consumerGroupName; private final EventPosition initialEventPosition; private final PartitionProcessorFactory partitionProcessorFactory; private final PartitionManager partitionManager; private final String identifier; private final Map<String, EventHubAsyncConsumer> partitionConsumers = new ConcurrentHashMap<>(); private final String eventHubName; private final AtomicBoolean started = new AtomicBoolean(false); private Disposable runner; private Scheduler scheduler; /** * Package-private constructor. Use {@link EventHubClientBuilder} to create an instance. * * @param eventHubAsyncClient The {@link EventHubAsyncClient}. * @param consumerGroupName The consumer group name used in this event processor to consumer events. * @param partitionProcessorFactory The factory to create new partition processor(s). * @param initialEventPosition Initial event position to start consuming events. * @param partitionManager The partition manager. * @param eventHubName The Event Hub name. */ EventProcessor(EventHubAsyncClient eventHubAsyncClient, String consumerGroupName, PartitionProcessorFactory partitionProcessorFactory, EventPosition initialEventPosition, PartitionManager partitionManager, String eventHubName) { this.eventHubAsyncClient = Objects .requireNonNull(eventHubAsyncClient, "eventHubAsyncClient cannot be null"); this.consumerGroupName = Objects .requireNonNull(consumerGroupName, "consumerGroupname cannot be null"); this.partitionProcessorFactory = Objects .requireNonNull(partitionProcessorFactory, "partitionProcessorFactory cannot be null"); this.partitionManager = Objects .requireNonNull(partitionManager, "partitionManager cannot be null"); this.initialEventPosition = Objects .requireNonNull(initialEventPosition, "initialEventPosition cannot be null"); this.eventHubName = Objects .requireNonNull(eventHubName, "eventHubName cannot be null"); this.identifier = UUID.randomUUID().toString(); logger.info("The instance ID for this event processors is {}", this.identifier); } /** * The identifier is a unique name given to this event processor instance. * * @return Identifier for this event processor. */ public String identifier() { return this.identifier; } /** * Starts processing of events for all partitions of the Event Hub that this event processor can own, assigning a * dedicated {@link PartitionProcessor} to each partition. If there are other Event Processors active for the same * consumer group on the Event Hub, responsibility for partitions will be shared between them. * <p> * Subsequent calls to start will be ignored if this event processor is already running. Calling start after {@link * * </p> * * <p><strong>Starting the processor to consume events from all partitions</strong></p> * {@codesnippet com.azure.messaging.eventhubs.eventprocessor.startstop} */ public synchronized void start() { if (!started.compareAndSet(false, true)) { logger.info("Event processor is already running"); return; } logger.info("Starting a new event processor instance with id {}", this.identifier); scheduler = Schedulers.newElastic("EventProcessor"); runner = scheduler.schedulePeriodically(this::run, INITIAL_DELAY, INTERVAL_IN_SECONDS, TimeUnit.SECONDS); } /** * Stops processing events for all partitions owned by this event processor. All {@link PartitionProcessor} will be * shutdown and any open resources will be closed. * <p> * Subsequent calls to stop will be ignored if the event processor is not running. * </p> * * <p><strong>Stopping the processor</strong></p> * {@codesnippet com.azure.messaging.eventhubs.eventprocessor.startstop} */ public synchronized void stop() { if (!started.compareAndSet(true, false)) { logger.info("Event processor has already stopped"); return; } this.partitionConsumers.forEach((key, value) -> { try { logger.info("Closing event hub consumer for partition {}", key); value.close(); logger.info("Closed event hub consumer for partition {}", key); partitionConsumers.remove(key); } catch (IOException ex) { logger.warning("Unable to close event hub consumer for partition {}", key); } }); runner.dispose(); scheduler.dispose(); } /* * A simple implementation of an event processor that: * 1. Fetches all partition ids from Event Hub * 2. Gets the current ownership information of all the partitions from PartitionManager * 3. Claims ownership of any partition that doesn't have an owner yet. * 4. Starts a new PartitionProcessor and receives events from each of the partitions this instance owns */ private void run() { /* This will run periodically to get new ownership details and close/open new consumers when ownership of this instance has changed */ final Flux<PartitionOwnership> ownershipFlux = partitionManager.listOwnership(eventHubName, consumerGroupName) .cache(); eventHubAsyncClient.getPartitionIds() .flatMap(id -> getCandidatePartitions(ownershipFlux, id)) .flatMap(this::claimOwnership) .subscribe(this::receiveEvents, ex -> logger.warning("Failed to receive events {}", ex.getMessage()), () -> logger.info("Completed starting partition pumps for new partitions owned")); } /* * Get the candidate partitions for claiming ownerships */ private Publisher<? extends PartitionOwnership> getCandidatePartitions(Flux<PartitionOwnership> ownershipFlux, String id) { return ownershipFlux .filter(ownership -> id.equals(ownership.partitionId())) .single(new PartitionOwnership() .partitionId(id) .eventHubName(this.eventHubName) .ownerId(this.identifier) .consumerGroupName(this.consumerGroupName) .ownerLevel(0L)); } /* * Claim ownership of the given partition if it's available */ private Publisher<? extends PartitionOwnership> claimOwnership(PartitionOwnership ownershipInfo) { if (ownershipInfo.lastModifiedTime() == null || (System.currentTimeMillis() - ownershipInfo.lastModifiedTime() > OWNERSHIP_EXPIRATION_TIME_IN_MILLIS && !ownershipInfo.ownerId().equals(this.identifier))) { ownershipInfo.ownerId(this.identifier); return partitionManager.claimOwnership(ownershipInfo).doOnComplete(() -> { logger.info("Claimed ownership of partition {}", ownershipInfo.partitionId()); }).doOnError(error -> { logger.error("Unable to claim ownership of partition {}", ownershipInfo.partitionId(), error); }); } else { return Flux.empty(); } } /* * Creates a new consumer for given partition and starts receiving events for that partition. */ /* * Starts a new process tracing span and attached context the EventData object for users. */ private void startScopedTracingSpan(EventData eventData, AtomicReference<Context> processSpanContext) { Object diagnosticId = eventData.properties().get(DIAGNOSTIC_ID_KEY); if (diagnosticId == null) { return; } eventData.context(TraceUtil.extractContext(diagnosticId.toString(), Context.NONE)); processSpanContext.set(TraceUtil.startScopedSpan("process", eventData.context())); eventData.context(processSpanContext.get()); } /* * Ends the tracing span and the scope of that span. */ private void endScopedTracingSpan(AtomicReference<Context> processSpanContext) { if (!processSpanContext.get().getData("scope").isPresent()) { return; } Closeable close = (Closeable) processSpanContext.get().getData("scope").get(); try { close.close(); } catch (IOException ioException) { logger.error("EventProcessor.run() endTracingSpan().close() failed with an error %s", ioException); } TraceUtil.endTracingSpan(processSpanContext.get(), null); } }
class EventProcessor { private static final long INTERVAL_IN_SECONDS = 10; private static final long INITIAL_DELAY = 0; private static final long OWNERSHIP_EXPIRATION_TIME_IN_MILLIS = TimeUnit.SECONDS.toMillis(30); private final ClientLogger logger = new ClientLogger(EventProcessor.class); private final EventHubAsyncClient eventHubAsyncClient; private final String consumerGroupName; private final EventPosition initialEventPosition; private final PartitionProcessorFactory partitionProcessorFactory; private final PartitionManager partitionManager; private final String identifier; private final Map<String, EventHubAsyncConsumer> partitionConsumers = new ConcurrentHashMap<>(); private final String eventHubName; private final TracerProvider tracerProvider; private final AtomicBoolean started = new AtomicBoolean(false); private Disposable runner; private Scheduler scheduler; /** * Package-private constructor. Use {@link EventHubClientBuilder} to create an instance. * @param eventHubAsyncClient The {@link EventHubAsyncClient}. * @param consumerGroupName The consumer group name used in this event processor to consumer events. * @param partitionProcessorFactory The factory to create new partition processor(s). * @param initialEventPosition Initial event position to start consuming events. * @param partitionManager The partition manager. * @param eventHubName The Event Hub name. * @param tracerProvider The tracer implementation */ EventProcessor(EventHubAsyncClient eventHubAsyncClient, String consumerGroupName, PartitionProcessorFactory partitionProcessorFactory, EventPosition initialEventPosition, PartitionManager partitionManager, String eventHubName, TracerProvider tracerProvider) { this.eventHubAsyncClient = Objects .requireNonNull(eventHubAsyncClient, "eventHubAsyncClient cannot be null"); this.consumerGroupName = Objects .requireNonNull(consumerGroupName, "consumerGroupname cannot be null"); this.partitionProcessorFactory = Objects .requireNonNull(partitionProcessorFactory, "partitionProcessorFactory cannot be null"); this.partitionManager = Objects .requireNonNull(partitionManager, "partitionManager cannot be null"); this.initialEventPosition = Objects .requireNonNull(initialEventPosition, "initialEventPosition cannot be null"); this.eventHubName = Objects .requireNonNull(eventHubName, "eventHubName cannot be null"); this.tracerProvider = tracerProvider; this.identifier = UUID.randomUUID().toString(); logger.info("The instance ID for this event processors is {}", this.identifier); } /** * The identifier is a unique name given to this event processor instance. * * @return Identifier for this event processor. */ public String identifier() { return this.identifier; } /** * Starts processing of events for all partitions of the Event Hub that this event processor can own, assigning a * dedicated {@link PartitionProcessor} to each partition. If there are other Event Processors active for the same * consumer group on the Event Hub, responsibility for partitions will be shared between them. * <p> * Subsequent calls to start will be ignored if this event processor is already running. Calling start after {@link * * </p> * * <p><strong>Starting the processor to consume events from all partitions</strong></p> * {@codesnippet com.azure.messaging.eventhubs.eventprocessor.startstop} */ public synchronized void start() { if (!started.compareAndSet(false, true)) { logger.info("Event processor is already running"); return; } logger.info("Starting a new event processor instance with id {}", this.identifier); scheduler = Schedulers.newElastic("EventProcessor"); runner = scheduler.schedulePeriodically(this::run, INITIAL_DELAY, INTERVAL_IN_SECONDS, TimeUnit.SECONDS); } /** * Stops processing events for all partitions owned by this event processor. All {@link PartitionProcessor} will be * shutdown and any open resources will be closed. * <p> * Subsequent calls to stop will be ignored if the event processor is not running. * </p> * * <p><strong>Stopping the processor</strong></p> * {@codesnippet com.azure.messaging.eventhubs.eventprocessor.startstop} */ public synchronized void stop() { if (!started.compareAndSet(true, false)) { logger.info("Event processor has already stopped"); return; } this.partitionConsumers.forEach((key, value) -> { try { logger.info("Closing event hub consumer for partition {}", key); value.close(); logger.info("Closed event hub consumer for partition {}", key); partitionConsumers.remove(key); } catch (IOException ex) { logger.warning("Unable to close event hub consumer for partition {}", key); } }); runner.dispose(); scheduler.dispose(); } /* * A simple implementation of an event processor that: * 1. Fetches all partition ids from Event Hub * 2. Gets the current ownership information of all the partitions from PartitionManager * 3. Claims ownership of any partition that doesn't have an owner yet. * 4. Starts a new PartitionProcessor and receives events from each of the partitions this instance owns */ private void run() { /* This will run periodically to get new ownership details and close/open new consumers when ownership of this instance has changed */ final Flux<PartitionOwnership> ownershipFlux = partitionManager.listOwnership(eventHubName, consumerGroupName) .cache(); eventHubAsyncClient.getPartitionIds() .flatMap(id -> getCandidatePartitions(ownershipFlux, id)) .flatMap(this::claimOwnership) .subscribe(this::receiveEvents, ex -> logger.warning("Failed to receive events {}", ex.getMessage()), () -> logger.info("Completed starting partition pumps for new partitions owned")); } /* * Get the candidate partitions for claiming ownerships */ private Publisher<? extends PartitionOwnership> getCandidatePartitions(Flux<PartitionOwnership> ownershipFlux, String id) { return ownershipFlux .filter(ownership -> id.equals(ownership.partitionId())) .single(new PartitionOwnership() .partitionId(id) .eventHubName(this.eventHubName) .ownerId(this.identifier) .consumerGroupName(this.consumerGroupName) .ownerLevel(0L)); } /* * Claim ownership of the given partition if it's available */ private Publisher<? extends PartitionOwnership> claimOwnership(PartitionOwnership ownershipInfo) { if (ownershipInfo.lastModifiedTime() == null || (System.currentTimeMillis() - ownershipInfo.lastModifiedTime() > OWNERSHIP_EXPIRATION_TIME_IN_MILLIS && !ownershipInfo.ownerId().equals(this.identifier))) { ownershipInfo.ownerId(this.identifier); return partitionManager.claimOwnership(ownershipInfo).doOnComplete(() -> { logger.info("Claimed ownership of partition {}", ownershipInfo.partitionId()); }).doOnError(error -> { logger.error("Unable to claim ownership of partition {}", ownershipInfo.partitionId(), error); }); } else { return Flux.empty(); } } /* * Creates a new consumer for given partition and starts receiving events for that partition. */ /* * Starts a new process tracing span and attached context the EventData object for users. */ private Context startProcessTracingSpan(EventData eventData) { Object diagnosticId = eventData.properties().get(DIAGNOSTIC_ID_KEY); if (diagnosticId == null || !tracerProvider.isEnabled()) { return Context.NONE; } Context spanContext = tracerProvider.extractContext(diagnosticId.toString(), Context.NONE); return tracerProvider.startSpan(spanContext, ProcessKind.PROCESS); } /* * Ends the process tracing span and the scope of that span. */ private void endProcessTracingSpan(Context processSpanContext, Signal<Void> signal) { Optional<Object> spanScope = processSpanContext.getData("scope"); if (!spanScope.isPresent() || !tracerProvider.isEnabled()) { return; } if (spanScope.get() instanceof Closeable) { Closeable close = (Closeable) processSpanContext.getData("scope").get(); try { close.close(); tracerProvider.endSpan(processSpanContext, signal); } catch (IOException ioException) { logger.error("EventProcessor.run() endTracingSpan().close() failed with an error %s", ioException); } } else { logger.warning(String.format(Locale.US, "Process span scope type is not of type Closeable, but type: %s. Not closing the scope and span", spanScope.get() != null ? spanScope.getClass() : "null")); } } }